Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 29ef2b3

Browse files
committed
Restore sane locking behavior during parallel query.
Commit 9a3cebe changed things so that parallel workers didn't obtain any lock of their own on tables they access. That was clearly a bad idea, but I'd mistakenly supposed that it was the intended end result of the series of patches for simplifying the executor's lock management. Undo that change in relation_open(), and adjust ExecOpenScanRelation() so that it gets the correct lock if inside a parallel worker. In passing, clean up some more obsolete comments about when locks are acquired. Discussion: https://postgr.es/m/468c85d9-540e-66a2-1dde-fec2b741e688@lab.ntt.co.jp
1 parent f234365 commit 29ef2b3

11 files changed

+34
-34
lines changed

src/backend/access/heap/heapam.c

-4
Original file line numberDiff line numberDiff line change
@@ -1140,13 +1140,9 @@ relation_open(Oid relationId, LOCKMODE lockmode)
11401140
/*
11411141
* If we didn't get the lock ourselves, assert that caller holds one,
11421142
* except in bootstrap mode where no locks are used.
1143-
*
1144-
* Also, parallel workers currently assume that their parent holds locks
1145-
* for tables used in the parallel query (a mighty shaky assumption).
11461143
*/
11471144
Assert(lockmode != NoLock ||
11481145
IsBootstrapProcessingMode() ||
1149-
IsParallelWorker() ||
11501146
CheckRelationLockedByMe(r, AccessShareLock, true));
11511147

11521148
/* Make note that we've accessed a temporary relation */

src/backend/executor/execMain.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -1622,8 +1622,8 @@ ExecEndPlan(PlanState *planstate, EState *estate)
16221622
}
16231623

16241624
/*
1625-
* close whatever rangetable Relations have been opened. We did not
1626-
* acquire locks in ExecGetRangeTableRelation, so don't release 'em here.
1625+
* close whatever rangetable Relations have been opened. We do not
1626+
* release any locks we might hold on those rels.
16271627
*/
16281628
num_relations = estate->es_range_table_size;
16291629
for (i = 0; i < num_relations; i++)

src/backend/executor/execUtils.c

+23-9
Original file line numberDiff line numberDiff line change
@@ -732,16 +732,30 @@ ExecGetRangeTableRelation(EState *estate, Index rti)
732732

733733
Assert(rte->rtekind == RTE_RELATION);
734734

735-
rel = estate->es_relations[rti - 1] = heap_open(rte->relid, NoLock);
735+
if (!IsParallelWorker())
736+
{
737+
/*
738+
* In a normal query, we should already have the appropriate lock,
739+
* but verify that through an Assert. Since there's already an
740+
* Assert inside heap_open that insists on holding some lock, it
741+
* seems sufficient to check this only when rellockmode is higher
742+
* than the minimum.
743+
*/
744+
rel = heap_open(rte->relid, NoLock);
745+
Assert(rte->rellockmode == AccessShareLock ||
746+
CheckRelationLockedByMe(rel, rte->rellockmode, false));
747+
}
748+
else
749+
{
750+
/*
751+
* If we are a parallel worker, we need to obtain our own local
752+
* lock on the relation. This ensures sane behavior in case the
753+
* parent process exits before we do.
754+
*/
755+
rel = heap_open(rte->relid, rte->rellockmode);
756+
}
736757

737-
/*
738-
* Verify that appropriate lock was obtained before execution.
739-
*
740-
* In the case of parallel query, only the leader would've obtained
741-
* the lock (that needs to be fixed, though).
742-
*/
743-
Assert(IsParallelWorker() ||
744-
CheckRelationLockedByMe(rel, rte->rellockmode, false));
758+
estate->es_relations[rti - 1] = rel;
745759
}
746760

747761
return rel;

src/backend/executor/nodeBitmapHeapscan.c

+1-5
Original file line numberDiff line numberDiff line change
@@ -899,16 +899,12 @@ ExecInitBitmapHeapScan(BitmapHeapScan *node, EState *estate, int eflags)
899899
ExecAssignExprContext(estate, &scanstate->ss.ps);
900900

901901
/*
902-
* open the base relation and acquire appropriate lock on it.
902+
* open the scan relation
903903
*/
904904
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
905905

906906
/*
907907
* initialize child nodes
908-
*
909-
* We do this after ExecOpenScanRelation because the child nodes will open
910-
* indexscans on our relation's indexes, and we want to be sure we have
911-
* acquired a lock on the relation first.
912908
*/
913909
outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags);
914910

src/backend/executor/nodeCustom.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ ExecInitCustomScan(CustomScan *cscan, EState *estate, int eflags)
5555
ExecAssignExprContext(estate, &css->ss.ps);
5656

5757
/*
58-
* open the base relation, if any, and acquire an appropriate lock on it
58+
* open the scan relation, if any
5959
*/
6060
if (scanrelid > 0)
6161
{

src/backend/executor/nodeForeignscan.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -156,8 +156,8 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags)
156156
ExecAssignExprContext(estate, &scanstate->ss.ps);
157157

158158
/*
159-
* open the base relation, if any, and acquire an appropriate lock on it;
160-
* also acquire function pointers from the FDW's handler
159+
* open the scan relation, if any; also acquire function pointers from the
160+
* FDW's handler
161161
*/
162162
if (scanrelid > 0)
163163
{

src/backend/executor/nodeIndexonlyscan.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -511,7 +511,7 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags)
511511
ExecAssignExprContext(estate, &indexstate->ss.ps);
512512

513513
/*
514-
* open the base relation and acquire appropriate lock on it.
514+
* open the scan relation
515515
*/
516516
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
517517

src/backend/executor/nodeIndexscan.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -933,7 +933,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
933933
ExecAssignExprContext(estate, &indexstate->ss.ps);
934934

935935
/*
936-
* open the base relation and acquire appropriate lock on it.
936+
* open the scan relation
937937
*/
938938
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
939939

src/backend/executor/nodeSamplescan.c

+1-4
Original file line numberDiff line numberDiff line change
@@ -134,10 +134,7 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags)
134134
ExecAssignExprContext(estate, &scanstate->ss.ps);
135135

136136
/*
137-
* Initialize scan relation.
138-
*
139-
* Get the relation object id from the relid'th entry in the range table,
140-
* open that relation and acquire appropriate lock on it.
137+
* open the scan relation
141138
*/
142139
scanstate->ss.ss_currentRelation =
143140
ExecOpenScanRelation(estate,

src/backend/executor/nodeSeqscan.c

+1-4
Original file line numberDiff line numberDiff line change
@@ -163,10 +163,7 @@ ExecInitSeqScan(SeqScan *node, EState *estate, int eflags)
163163
ExecAssignExprContext(estate, &scanstate->ss.ps);
164164

165165
/*
166-
* Initialize scan relation.
167-
*
168-
* Get the relation object id from the relid'th entry in the range table,
169-
* open that relation and acquire appropriate lock on it.
166+
* open the scan relation
170167
*/
171168
scanstate->ss.ss_currentRelation =
172169
ExecOpenScanRelation(estate,

src/backend/executor/nodeTidscan.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -531,7 +531,7 @@ ExecInitTidScan(TidScan *node, EState *estate, int eflags)
531531
tidstate->tss_TidPtr = -1;
532532

533533
/*
534-
* open the base relation and acquire appropriate lock on it.
534+
* open the scan relation
535535
*/
536536
currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags);
537537

0 commit comments

Comments
 (0)