@@ -155,11 +155,11 @@ static int FastPathLocalUseCount = 0;
155
155
#define FastPathStrongMode (mode ) ((mode) > ShareUpdateExclusiveLock)
156
156
#define FastPathRelevantMode (mode ) ((mode) != ShareUpdateExclusiveLock)
157
157
158
- static bool FastPathGrantLock (Oid relid , LOCKMODE lockmode );
159
- static bool FastPathUnGrantLock (Oid relid , LOCKMODE lockmode );
160
- static bool FastPathTransferLocks (LockMethod lockMethodTable ,
158
+ static bool FastPathGrantRelationLock (Oid relid , LOCKMODE lockmode );
159
+ static bool FastPathUnGrantRelationLock (Oid relid , LOCKMODE lockmode );
160
+ static bool FastPathTransferRelationLocks (LockMethod lockMethodTable ,
161
161
const LOCKTAG * locktag , uint32 hashcode );
162
- static PROCLOCK * FastPathGetLockEntry (LOCALLOCK * locallock );
162
+ static PROCLOCK * FastPathGetRelationLockEntry (LOCALLOCK * locallock );
163
163
164
164
/*
165
165
* To make the fast-path lock mechanism work, we must have some way of
@@ -186,9 +186,9 @@ typedef struct
186
186
{
187
187
slock_t mutex ;
188
188
uint32 count [FAST_PATH_STRONG_LOCK_HASH_PARTITIONS ];
189
- } FastPathStrongLockData ;
189
+ } FastPathStrongRelationLockData ;
190
190
191
- FastPathStrongLockData * FastPathStrongLocks ;
191
+ FastPathStrongRelationLockData * FastPathStrongRelationLocks ;
192
192
193
193
#ifndef LOCK_DEBUG
194
194
static bool Dummy_trace = false;
@@ -415,10 +415,11 @@ InitLocks(void)
415
415
/*
416
416
* Allocate fast-path structures.
417
417
*/
418
- FastPathStrongLocks = ShmemInitStruct ("Fast Path Strong Lock Data" ,
419
- sizeof (FastPathStrongLockData ), & found );
418
+ FastPathStrongRelationLocks =
419
+ ShmemInitStruct ("Fast Path Strong Relation Lock Data" ,
420
+ sizeof (FastPathStrongRelationLockData ), & found );
420
421
if (!found )
421
- SpinLockInit (& FastPathStrongLocks -> mutex );
422
+ SpinLockInit (& FastPathStrongRelationLocks -> mutex );
422
423
423
424
/*
424
425
* Allocate non-shared hash table for LOCALLOCK structs. This stores lock
@@ -720,10 +721,11 @@ LockAcquireExtended(const LOCKTAG *locktag,
720
721
* yet to begin to transfer fast-path locks.
721
722
*/
722
723
LWLockAcquire (MyProc -> backendLock , LW_EXCLUSIVE );
723
- if (FastPathStrongLocks -> count [fasthashcode ] != 0 )
724
+ if (FastPathStrongRelationLocks -> count [fasthashcode ] != 0 )
724
725
acquired = false;
725
726
else
726
- acquired = FastPathGrantLock (locktag -> locktag_field2 , lockmode );
727
+ acquired = FastPathGrantRelationLock (locktag -> locktag_field2 ,
728
+ lockmode );
727
729
LWLockRelease (MyProc -> backendLock );
728
730
if (acquired )
729
731
{
@@ -742,11 +744,12 @@ LockAcquireExtended(const LOCKTAG *locktag,
742
744
* instruction here, on architectures where that is supported.
743
745
*/
744
746
Assert (locallock -> holdsStrongLockCount == FALSE);
745
- SpinLockAcquire (& FastPathStrongLocks -> mutex );
746
- FastPathStrongLocks -> count [fasthashcode ]++ ;
747
+ SpinLockAcquire (& FastPathStrongRelationLocks -> mutex );
748
+ FastPathStrongRelationLocks -> count [fasthashcode ]++ ;
747
749
locallock -> holdsStrongLockCount = TRUE;
748
- SpinLockRelease (& FastPathStrongLocks -> mutex );
749
- if (!FastPathTransferLocks (lockMethodTable , locktag , hashcode ))
750
+ SpinLockRelease (& FastPathStrongRelationLocks -> mutex );
751
+ if (!FastPathTransferRelationLocks (lockMethodTable , locktag ,
752
+ hashcode ))
750
753
{
751
754
if (reportMemoryError )
752
755
ereport (ERROR ,
@@ -1099,11 +1102,11 @@ RemoveLocalLock(LOCALLOCK *locallock)
1099
1102
uint32 fasthashcode ;
1100
1103
fasthashcode = FastPathStrongLockHashPartition (locallock -> hashcode );
1101
1104
1102
- SpinLockAcquire (& FastPathStrongLocks -> mutex );
1103
- Assert (FastPathStrongLocks -> count [fasthashcode ] > 0 );
1104
- FastPathStrongLocks -> count [fasthashcode ]-- ;
1105
+ SpinLockAcquire (& FastPathStrongRelationLocks -> mutex );
1106
+ Assert (FastPathStrongRelationLocks -> count [fasthashcode ] > 0 );
1107
+ FastPathStrongRelationLocks -> count [fasthashcode ]-- ;
1105
1108
locallock -> holdsStrongLockCount = FALSE;
1106
- SpinLockRelease (& FastPathStrongLocks -> mutex );
1109
+ SpinLockRelease (& FastPathStrongRelationLocks -> mutex );
1107
1110
}
1108
1111
if (!hash_search (LockMethodLocalHash ,
1109
1112
(void * ) & (locallock -> tag ),
@@ -1642,7 +1645,8 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
1642
1645
* it here. Another backend may have moved it to the main table.
1643
1646
*/
1644
1647
LWLockAcquire (MyProc -> backendLock , LW_EXCLUSIVE );
1645
- released = FastPathUnGrantLock (locktag -> locktag_field2 , lockmode );
1648
+ released = FastPathUnGrantRelationLock (locktag -> locktag_field2 ,
1649
+ lockmode );
1646
1650
LWLockRelease (MyProc -> backendLock );
1647
1651
if (released )
1648
1652
{
@@ -1825,7 +1829,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
1825
1829
1826
1830
/* Attempt fast-path release. */
1827
1831
relid = locallock -> tag .lock .locktag_field2 ;
1828
- if (FastPathUnGrantLock (relid , lockmode ))
1832
+ if (FastPathUnGrantRelationLock (relid , lockmode ))
1829
1833
{
1830
1834
RemoveLocalLock (locallock );
1831
1835
continue ;
@@ -2117,11 +2121,11 @@ LockReassignCurrentOwner(void)
2117
2121
}
2118
2122
2119
2123
/*
2120
- * FastPathGrantLock
2124
+ * FastPathGrantRelationLock
2121
2125
* Grant lock using per-backend fast-path array, if there is space.
2122
2126
*/
2123
2127
static bool
2124
- FastPathGrantLock (Oid relid , LOCKMODE lockmode )
2128
+ FastPathGrantRelationLock (Oid relid , LOCKMODE lockmode )
2125
2129
{
2126
2130
uint32 f ;
2127
2131
uint32 unused_slot = FP_LOCK_SLOTS_PER_BACKEND ;
@@ -2153,12 +2157,12 @@ FastPathGrantLock(Oid relid, LOCKMODE lockmode)
2153
2157
}
2154
2158
2155
2159
/*
2156
- * FastPathUnGrantLock
2160
+ * FastPathUnGrantRelationLock
2157
2161
* Release fast-path lock, if present. Update backend-private local
2158
2162
* use count, while we're at it.
2159
2163
*/
2160
2164
static bool
2161
- FastPathUnGrantLock (Oid relid , LOCKMODE lockmode )
2165
+ FastPathUnGrantRelationLock (Oid relid , LOCKMODE lockmode )
2162
2166
{
2163
2167
uint32 f ;
2164
2168
bool result = false;
@@ -2180,12 +2184,12 @@ FastPathUnGrantLock(Oid relid, LOCKMODE lockmode)
2180
2184
}
2181
2185
2182
2186
/*
2183
- * FastPathTransferLocks
2187
+ * FastPathTransferRelationLocks
2184
2188
* Transfer locks matching the given lock tag from per-backend fast-path
2185
2189
* arrays to the shared hash table.
2186
2190
*/
2187
2191
static bool
2188
- FastPathTransferLocks (LockMethod lockMethodTable , const LOCKTAG * locktag ,
2192
+ FastPathTransferRelationLocks (LockMethod lockMethodTable , const LOCKTAG * locktag ,
2189
2193
uint32 hashcode )
2190
2194
{
2191
2195
LWLockId partitionLock = LockHashPartitionLock (hashcode );
@@ -2267,7 +2271,7 @@ FastPathTransferLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
2267
2271
* transferring it to the primary lock table if necessary.
2268
2272
*/
2269
2273
static PROCLOCK *
2270
- FastPathGetLockEntry (LOCALLOCK * locallock )
2274
+ FastPathGetRelationLockEntry (LOCALLOCK * locallock )
2271
2275
{
2272
2276
LockMethod lockMethodTable = LockMethods [DEFAULT_LOCKMETHOD ];
2273
2277
LOCKTAG * locktag = & locallock -> tag .lock ;
@@ -2650,9 +2654,9 @@ LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
2650
2654
&& FastPathTag (& lock -> tag ) && FastPathStrongMode (lockmode ))
2651
2655
{
2652
2656
uint32 fasthashcode = FastPathStrongLockHashPartition (hashcode );
2653
- SpinLockAcquire (& FastPathStrongLocks -> mutex );
2654
- FastPathStrongLocks -> count [fasthashcode ]-- ;
2655
- SpinLockRelease (& FastPathStrongLocks -> mutex );
2657
+ SpinLockAcquire (& FastPathStrongRelationLocks -> mutex );
2658
+ FastPathStrongRelationLocks -> count [fasthashcode ]-- ;
2659
+ SpinLockRelease (& FastPathStrongRelationLocks -> mutex );
2656
2660
}
2657
2661
}
2658
2662
@@ -2715,11 +2719,11 @@ AtPrepare_Locks(void)
2715
2719
/*
2716
2720
* If the local lock was taken via the fast-path, we need to move it
2717
2721
* to the primary lock table, or just get a pointer to the existing
2718
- * primary lock table if by chance it's already been transferred.
2722
+ * primary lock table entry if by chance it's already been transferred.
2719
2723
*/
2720
2724
if (locallock -> proclock == NULL )
2721
2725
{
2722
- locallock -> proclock = FastPathGetLockEntry (locallock );
2726
+ locallock -> proclock = FastPathGetRelationLockEntry (locallock );
2723
2727
locallock -> lock = locallock -> proclock -> tag .myLock ;
2724
2728
}
2725
2729
@@ -3010,7 +3014,7 @@ GetLockStatusData(void)
3010
3014
3011
3015
for (f = 0 ; f < FP_LOCK_SLOTS_PER_BACKEND ; ++ f )
3012
3016
{
3013
- LockInstanceData * instance = & data -> locks [ el ] ;
3017
+ LockInstanceData * instance ;
3014
3018
uint32 lockbits = FAST_PATH_GET_BITS (proc , f );
3015
3019
3016
3020
/* Skip unallocated slots. */
@@ -3024,6 +3028,7 @@ GetLockStatusData(void)
3024
3028
repalloc (data -> locks , sizeof (LockInstanceData ) * els );
3025
3029
}
3026
3030
3031
+ instance = & data -> locks [el ];
3027
3032
SET_LOCKTAG_RELATION (instance -> locktag , proc -> databaseId ,
3028
3033
proc -> fpRelId [f ]);
3029
3034
instance -> holdMask = lockbits << FAST_PATH_LOCKNUMBER_OFFSET ;
@@ -3455,9 +3460,9 @@ lock_twophase_recover(TransactionId xid, uint16 info,
3455
3460
if (FastPathTag (& lock -> tag ) && FastPathStrongMode (lockmode ))
3456
3461
{
3457
3462
uint32 fasthashcode = FastPathStrongLockHashPartition (hashcode );
3458
- SpinLockAcquire (& FastPathStrongLocks -> mutex );
3459
- FastPathStrongLocks -> count [fasthashcode ]++ ;
3460
- SpinLockRelease (& FastPathStrongLocks -> mutex );
3463
+ SpinLockAcquire (& FastPathStrongRelationLocks -> mutex );
3464
+ FastPathStrongRelationLocks -> count [fasthashcode ]++ ;
3465
+ SpinLockRelease (& FastPathStrongRelationLocks -> mutex );
3461
3466
}
3462
3467
3463
3468
LWLockRelease (partitionLock );
0 commit comments