@@ -250,7 +250,8 @@ static HTAB *LockMethodProcLockHash;
250
250
static HTAB * LockMethodLocalHash ;
251
251
252
252
253
- /* private state for GrantAwaitedLock */
253
+ /* private state for error cleanup */
254
+ static LOCALLOCK * StrongLockInProgress ;
254
255
static LOCALLOCK * awaitedLock ;
255
256
static ResourceOwner awaitedOwner ;
256
257
@@ -338,6 +339,8 @@ static void RemoveLocalLock(LOCALLOCK *locallock);
338
339
static PROCLOCK * SetupLockInTable (LockMethod lockMethodTable , PGPROC * proc ,
339
340
const LOCKTAG * locktag , uint32 hashcode , LOCKMODE lockmode );
340
341
static void GrantLockLocal (LOCALLOCK * locallock , ResourceOwner owner );
342
+ static void BeginStrongLockAcquire (LOCALLOCK * locallock , uint32 fasthashcode );
343
+ static void FinishStrongLockAcquire (void );
341
344
static void WaitOnLock (LOCALLOCK * locallock , ResourceOwner owner );
342
345
static void ReleaseLockForOwner (LOCALLOCK * locallock , ResourceOwner owner );
343
346
static bool UnGrantLock (LOCK * lock , LOCKMODE lockmode ,
@@ -738,22 +741,11 @@ LockAcquireExtended(const LOCKTAG *locktag,
738
741
}
739
742
else if (FastPathStrongMode (lockmode ))
740
743
{
741
- /*
742
- * Adding to a memory location is not atomic, so we take a
743
- * spinlock to ensure we don't collide with someone else trying
744
- * to bump the count at the same time.
745
- *
746
- * XXX: It might be worth considering using an atomic fetch-and-add
747
- * instruction here, on architectures where that is supported.
748
- */
749
- Assert (locallock -> holdsStrongLockCount == FALSE);
750
- SpinLockAcquire (& FastPathStrongRelationLocks -> mutex );
751
- FastPathStrongRelationLocks -> count [fasthashcode ]++ ;
752
- locallock -> holdsStrongLockCount = TRUE;
753
- SpinLockRelease (& FastPathStrongRelationLocks -> mutex );
744
+ BeginStrongLockAcquire (locallock , fasthashcode );
754
745
if (!FastPathTransferRelationLocks (lockMethodTable , locktag ,
755
746
hashcode ))
756
747
{
748
+ AbortStrongLockAcquire ();
757
749
if (reportMemoryError )
758
750
ereport (ERROR ,
759
751
(errcode (ERRCODE_OUT_OF_MEMORY ),
@@ -779,6 +771,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
779
771
hashcode , lockmode );
780
772
if (!proclock )
781
773
{
774
+ AbortStrongLockAcquire ();
782
775
LWLockRelease (partitionLock );
783
776
if (reportMemoryError )
784
777
ereport (ERROR ,
@@ -820,6 +813,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
820
813
*/
821
814
if (dontWait )
822
815
{
816
+ AbortStrongLockAcquire ();
823
817
if (proclock -> holdMask == 0 )
824
818
{
825
819
uint32 proclock_hashcode ;
@@ -884,6 +878,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
884
878
*/
885
879
if (!(proclock -> holdMask & LOCKBIT_ON (lockmode )))
886
880
{
881
+ AbortStrongLockAcquire ();
887
882
PROCLOCK_PRINT ("LockAcquire: INCONSISTENT" , proclock );
888
883
LOCK_PRINT ("LockAcquire: INCONSISTENT" , lock , lockmode );
889
884
/* Should we retry ? */
@@ -894,6 +889,12 @@ LockAcquireExtended(const LOCKTAG *locktag,
894
889
LOCK_PRINT ("LockAcquire: granted" , lock , lockmode );
895
890
}
896
891
892
+ /*
893
+ * Lock state is fully up-to-date now; if we error out after this, no
894
+ * special error cleanup is required.
895
+ */
896
+ FinishStrongLockAcquire ();
897
+
897
898
LWLockRelease (partitionLock );
898
899
899
900
/*
@@ -1349,6 +1350,64 @@ GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
1349
1350
locallock -> numLockOwners ++ ;
1350
1351
}
1351
1352
1353
+ /*
1354
+ * BeginStrongLockAcquire - inhibit use of fastpath for a given LOCALLOCK,
1355
+ * and arrange for error cleanup if it fails
1356
+ */
1357
+ static void
1358
+ BeginStrongLockAcquire (LOCALLOCK * locallock , uint32 fasthashcode )
1359
+ {
1360
+ Assert (StrongLockInProgress == NULL );
1361
+ Assert (locallock -> holdsStrongLockCount == FALSE);
1362
+
1363
+ /*
1364
+ * Adding to a memory location is not atomic, so we take a
1365
+ * spinlock to ensure we don't collide with someone else trying
1366
+ * to bump the count at the same time.
1367
+ *
1368
+ * XXX: It might be worth considering using an atomic fetch-and-add
1369
+ * instruction here, on architectures where that is supported.
1370
+ */
1371
+
1372
+ SpinLockAcquire (& FastPathStrongRelationLocks -> mutex );
1373
+ FastPathStrongRelationLocks -> count [fasthashcode ]++ ;
1374
+ locallock -> holdsStrongLockCount = TRUE;
1375
+ StrongLockInProgress = locallock ;
1376
+ SpinLockRelease (& FastPathStrongRelationLocks -> mutex );
1377
+ }
1378
+
1379
+ /*
1380
+ * FinishStrongLockAcquire - cancel pending cleanup for a strong lock
1381
+ * acquisition once it's no longer needed
1382
+ */
1383
+ static void
1384
+ FinishStrongLockAcquire (void )
1385
+ {
1386
+ StrongLockInProgress = NULL ;
1387
+ }
1388
+
1389
+ /*
1390
+ * AbortStrongLockAcquire - undo strong lock state changes performed by
1391
+ * BeginStrongLockAcquire.
1392
+ */
1393
+ void
1394
+ AbortStrongLockAcquire (void )
1395
+ {
1396
+ uint32 fasthashcode ;
1397
+ LOCALLOCK * locallock = StrongLockInProgress ;
1398
+
1399
+ if (locallock == NULL )
1400
+ return ;
1401
+
1402
+ fasthashcode = FastPathStrongLockHashPartition (locallock -> hashcode );
1403
+ Assert (locallock -> holdsStrongLockCount == TRUE);
1404
+ SpinLockAcquire (& FastPathStrongRelationLocks -> mutex );
1405
+ FastPathStrongRelationLocks -> count [fasthashcode ]-- ;
1406
+ locallock -> holdsStrongLockCount = FALSE;
1407
+ StrongLockInProgress = NULL ;
1408
+ SpinLockRelease (& FastPathStrongRelationLocks -> mutex );
1409
+ }
1410
+
1352
1411
/*
1353
1412
* GrantAwaitedLock -- call GrantLockLocal for the lock we are doing
1354
1413
* WaitOnLock on.
@@ -1414,7 +1473,7 @@ WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1414
1473
* We can and do use a PG_TRY block to try to clean up after failure, but
1415
1474
* this still has a major limitation: elog(FATAL) can occur while waiting
1416
1475
* (eg, a "die" interrupt), and then control won't come back here. So all
1417
- * cleanup of essential state should happen in LockWaitCancel , not here.
1476
+ * cleanup of essential state should happen in LockErrorCleanup , not here.
1418
1477
* We can use PG_TRY to clear the "waiting" status flags, since doing that
1419
1478
* is unimportant if the process exits.
1420
1479
*/
@@ -1441,7 +1500,7 @@ WaitOnLock(LOCALLOCK *locallock, ResourceOwner owner)
1441
1500
}
1442
1501
PG_CATCH ();
1443
1502
{
1444
- /* In this path, awaitedLock remains set until LockWaitCancel */
1503
+ /* In this path, awaitedLock remains set until LockErrorCleanup */
1445
1504
1446
1505
/* Report change to non-waiting status */
1447
1506
pgstat_report_waiting (false);
0 commit comments