Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTom Lane2018-10-06 19:49:37 +0000
committerTom Lane2018-10-06 19:49:37 +0000
commit29ef2b310da9892fda075ff9ee12da7f92d5da6e (patch)
tree9505320f23af01455ff4cde46bd33702b3ddf635
parentf2343653f5b2aecfc759f36dbb3fd2a61f36853e (diff)
Restore sane locking behavior during parallel query.
Commit 9a3cebeaa changed things so that parallel workers didn't obtain any lock of their own on tables they access. That was clearly a bad idea, but I'd mistakenly supposed that it was the intended end result of the series of patches for simplifying the executor's lock management. Undo that change in relation_open(), and adjust ExecOpenScanRelation() so that it gets the correct lock if inside a parallel worker. In passing, clean up some more obsolete comments about when locks are acquired. Discussion: https://postgr.es/m/468c85d9-540e-66a2-1dde-fec2b741e688@lab.ntt.co.jp
-rw-r--r--src/backend/access/heap/heapam.c4
-rw-r--r--src/backend/executor/execMain.c4
-rw-r--r--src/backend/executor/execUtils.c32
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c6
-rw-r--r--src/backend/executor/nodeCustom.c2
-rw-r--r--src/backend/executor/nodeForeignscan.c4
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c2
-rw-r--r--src/backend/executor/nodeIndexscan.c2
-rw-r--r--src/backend/executor/nodeSamplescan.c5
-rw-r--r--src/backend/executor/nodeSeqscan.c5
-rw-r--r--src/backend/executor/nodeTidscan.c2
11 files changed, 34 insertions, 34 deletions
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 727d6776e13..5f1a69ca53c 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -1140,13 +1140,9 @@ relation_open(Oid relationId, LOCKMODE lockmode)
/*
* If we didn't get the lock ourselves, assert that caller holds one,
* except in bootstrap mode where no locks are used.
- *
- * Also, parallel workers currently assume that their parent holds locks
- * for tables used in the parallel query (a mighty shaky assumption).
*/
Assert(lockmode != NoLock ||
IsBootstrapProcessingMode() ||
- IsParallelWorker() ||
CheckRelationLockedByMe(r, AccessShareLock, true));
/* Make note that we've accessed a temporary relation */
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 23e6749920a..b6abad554a4 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1622,8 +1622,8 @@ ExecEndPlan(PlanState *planstate, EState *estate)
}
/*
- * close whatever rangetable Relations have been opened. We did not
- * acquire locks in ExecGetRangeTableRelation, so don't release 'em here.
+ * close whatever rangetable Relations have been opened. We do not
+ * release any locks we might hold on those rels.
*/
num_relations = estate->es_range_table_size;
for (i = 0; i < num_relations; i++)
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 650fd81ff17..d98e90e7117 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -732,16 +732,30 @@ ExecGetRangeTableRelation(EState *estate, Index rti)
Assert(rte->rtekind == RTE_RELATION);
- rel = estate->es_relations[rti - 1] = heap_open(rte->relid, NoLock);
+ if (!IsParallelWorker())
+ {
+ /*
+ * In a normal query, we should already have the appropriate lock,
+ * but verify that through an Assert. Since there's already an
+ * Assert inside heap_open that insists on holding some lock, it
+ * seems sufficient to check this only when rellockmode is higher
+ * than the minimum.
+ */
+ rel = heap_open(rte->relid, NoLock);
+ Assert(rte->rellockmode == AccessShareLock ||
+ CheckRelationLockedByMe(rel, rte->rellockmode, false));
+ }
+ else
+ {
+ /*
+ * If we are a parallel worker, we need to obtain our own local
+ * lock on the relation. This ensures sane behavior in case the
+ * parent process exits before we do.
+ */
+ rel = heap_open(rte->relid, rte->rellockmode);
+ }
- /*
- * Verify that appropriate lock was obtained before execution.
- *
- * In the case of parallel query, only the leader would've obtained
- * the lock (that needs to be fixed, though).
- */
- Assert(IsParallelWorker() ||
- CheckRelationLockedByMe(rel, rte->rellockmode, false));
+ estate->es_relations[rti - 1] = rel;
}
return rel;
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 5307cd1b870..304ef07f2cc 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -899,16 +899,12 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
ExecAssignExprContext(estate, &scanstate->ss.ps);
/*
- * open the base relation and acquire appropriate lock on it.
+ * open the scan relation
*/
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
/*
* initialize child nodes
- *
- * We do this after ExecOpenScanRelation because the child nodes will open
- * indexscans on our relation's indexes, and we want to be sure we have
- * acquired a lock on the relation first.
*/
outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags);
diff --git a/src/backend/executor/nodeCustom.c b/src/backend/executor/nodeCustom.c
index 9a33eda6887..7972d5a952a 100644
--- a/src/backend/executor/nodeCustom.c
+++ b/src/backend/executor/nodeCustom.c
@@ -55,7 +55,7 @@ ExecInitCustomScan(CustomScan *cscan, EState *estate, int eflags)
ExecAssignExprContext(estate, &css->ss.ps);
/*
- * open the base relation, if any, and acquire an appropriate lock on it
+ * open the scan relation, if any
*/
if (scanrelid > 0)
{
diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c
index cf7df72d8c2..2ec7fcb9621 100644
--- a/src/backend/executor/nodeForeignscan.c
+++ b/src/backend/executor/nodeForeignscan.c
@@ -156,8 +156,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags)
ExecAssignExprContext(estate, &scanstate->ss.ps);
/*
- * open the base relation, if any, and acquire an appropriate lock on it;
- * also acquire function pointers from the FDW's handler
+ * open the scan relation, if any; also acquire function pointers from the
+ * FDW's handler
*/
if (scanrelid > 0)
{
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index 1b530cea405..daedf342f72 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -511,7 +511,7 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags)
ExecAssignExprContext(estate, &indexstate->ss.ps);
/*
- * open the base relation and acquire appropriate lock on it.
+ * open the scan relation
*/
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index d9527669f53..ba7821b0e2b 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -933,7 +933,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
ExecAssignExprContext(estate, &indexstate->ss.ps);
/*
- * open the base relation and acquire appropriate lock on it.
+ * open the scan relation
*/
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c
index 70ae1bc7e46..f01fc3b62a0 100644
--- a/src/backend/executor/nodeSamplescan.c
+++ b/src/backend/executor/nodeSamplescan.c
@@ -134,10 +134,7 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags)
ExecAssignExprContext(estate, &scanstate->ss.ps);
/*
- * Initialize scan relation.
- *
- * Get the relation object id from the relid'th entry in the range table,
- * open that relation and acquire appropriate lock on it.
+ * open the scan relation
*/
scanstate->ss.ss_currentRelation =
ExecOpenScanRelation(estate,
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 5dede816c6a..79729dbbecb 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -163,10 +163,7 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags)
ExecAssignExprContext(estate, &scanstate->ss.ps);
/*
- * Initialize scan relation.
- *
- * Get the relation object id from the relid'th entry in the range table,
- * open that relation and acquire appropriate lock on it.
+ * open the scan relation
*/
scanstate->ss.ss_currentRelation =
ExecOpenScanRelation(estate,
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index 593185f726b..d21d6553e42 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -531,7 +531,7 @@ ExecInitTidScan(TidScan *node, EState *estate, int eflags)
tidstate->tss_TidPtr = -1;
/*
- * open the base relation and acquire appropriate lock on it.
+ * open the scan relation
*/
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);