Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 82e861f

Browse files
committed
Fix LWLockAssign() so that it can safely be executed after postmaster
initialization. Add spinlocking, fix EXEC_BACKEND unsafeness.
1 parent 77d1de3 commit 82e861f

File tree

2 files changed

+31
-15
lines changed

2 files changed

+31
-15
lines changed

src/backend/storage/ipc/shmem.c

+2-3
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
*
99
*
1010
* IDENTIFICATION
11-
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.85 2005/08/20 23:26:20 tgl Exp $
11+
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.86 2005/10/07 21:42:38 tgl Exp $
1212
*
1313
*-------------------------------------------------------------------------
1414
*/
@@ -71,8 +71,7 @@ SHMEM_OFFSET ShmemBase; /* start address of shared memory */
7171

7272
static SHMEM_OFFSET ShmemEnd; /* end+1 address of shared memory */
7373

74-
NON_EXEC_STATIC slock_t *ShmemLock; /* spinlock for shared memory
75-
* allocation */
74+
slock_t *ShmemLock; /* spinlock for shared memory and LWLock allocation */
7675

7776
NON_EXEC_STATIC slock_t *ShmemIndexLock; /* spinlock for ShmemIndex */
7877

src/backend/storage/lmgr/lwlock.c

+29-12
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
* Portions Copyright (c) 1994, Regents of the University of California
1616
*
1717
* IDENTIFICATION
18-
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.31 2005/10/07 20:11:03 tgl Exp $
18+
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.32 2005/10/07 21:42:38 tgl Exp $
1919
*
2020
*-------------------------------------------------------------------------
2121
*/
@@ -27,6 +27,10 @@
2727
#include "storage/spin.h"
2828

2929

30+
/* We use the ShmemLock spinlock to protect LWLockAssign */
31+
extern slock_t *ShmemLock;
32+
33+
3034
typedef struct LWLock
3135
{
3236
slock_t mutex; /* Protects LWLock and queue of PGPROCs */
@@ -65,9 +69,6 @@ typedef union LWLockPadded
6569
*/
6670
NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL;
6771

68-
/* shared counter for dynamic allocation of LWLockIds */
69-
static int *LWLockCounter;
70-
7172

7273
/*
7374
* We use this structure to keep track of locked LWLocks for release
@@ -159,7 +160,7 @@ LWLockShmemSize(void)
159160
/* Space for the LWLock array. */
160161
size = mul_size(numLocks, sizeof(LWLockPadded));
161162

162-
/* Space for shared allocation counter, plus room for alignment. */
163+
/* Space for dynamic allocation counter, plus room for alignment. */
163164
size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);
164165

165166
return size;
@@ -175,12 +176,16 @@ CreateLWLocks(void)
175176
int numLocks = NumLWLocks();
176177
Size spaceLocks = LWLockShmemSize();
177178
LWLockPadded *lock;
179+
int *LWLockCounter;
178180
char *ptr;
179181
int id;
180182

181183
/* Allocate space */
182184
ptr = (char *) ShmemAlloc(spaceLocks);
183185

186+
/* Leave room for dynamic allocation counter */
187+
ptr += 2 * sizeof(int);
188+
184189
/* Ensure desired alignment of LWLock array */
185190
ptr += LWLOCK_PADDED_SIZE - ((unsigned long) ptr) % LWLOCK_PADDED_SIZE;
186191

@@ -200,9 +205,10 @@ CreateLWLocks(void)
200205
}
201206

202207
/*
203-
* Initialize the dynamic-allocation counter at the end of the array
208+
* Initialize the dynamic-allocation counter, which is stored just before
209+
* the first LWLock.
204210
*/
205-
LWLockCounter = (int *) lock;
211+
LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
206212
LWLockCounter[0] = (int) NumFixedLWLocks;
207213
LWLockCounter[1] = numLocks;
208214
}
@@ -211,16 +217,27 @@ CreateLWLocks(void)
211217
/*
212218
* LWLockAssign - assign a dynamically-allocated LWLock number
213219
*
214-
* NB: we do not currently try to interlock this. Could perhaps use
215-
* ShmemLock spinlock if there were any need to assign LWLockIds after
216-
* shmem setup.
220+
* We interlock this using the same spinlock that is used to protect
221+
* ShmemAlloc(). Interlocking is not really necessary during postmaster
222+
* startup, but it is needed if any user-defined code tries to allocate
223+
* LWLocks after startup.
217224
*/
218225
LWLockId
219226
LWLockAssign(void)
220227
{
228+
LWLockId result;
229+
int *LWLockCounter;
230+
231+
LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
232+
SpinLockAcquire(ShmemLock);
221233
if (LWLockCounter[0] >= LWLockCounter[1])
222-
elog(FATAL, "no more LWLockIds available");
223-
return (LWLockId) (LWLockCounter[0]++);
234+
{
235+
SpinLockRelease(ShmemLock);
236+
elog(ERROR, "no more LWLockIds available");
237+
}
238+
result = (LWLockId) (LWLockCounter[0]++);
239+
SpinLockRelease(ShmemLock);
240+
return result;
224241
}
225242

226243

0 commit comments

Comments
 (0)