My back-patch of the 9.4-era commit
44cd47c1d49655c5 into 9.2 and 9.3 fixed
HPPA builds as expected, but it broke --disable-spinlocks builds, because
the dummy spinlock is initialized before the underlying semaphore
infrastructure is alive. In 9.4 and up this works because of commit
daa7527afc227443, which decoupled initialization of an slock_t variable
from access to the actual system semaphore object. The best solution
seems to be to back-port that patch, which should be a net win anyway
because it improves the usability of --disable-spinlocks builds in the
older branches; and it's been out long enough now to not be worrisome
from a stability perspective.
slock_t *ShmemLock;
VariableCache ShmemVariableCache;
Backend *ShmemBackendArray;
+#ifndef HAVE_SPINLOCKS
+ PGSemaphore SpinlockSemaArray;
+#endif
LWLock *LWLockArray;
slock_t *ProcStructLock;
PROC_HDR *ProcGlobal;
param->ShmemVariableCache = ShmemVariableCache;
param->ShmemBackendArray = ShmemBackendArray;
+#ifndef HAVE_SPINLOCKS
+ param->SpinlockSemaArray = SpinlockSemaArray;
+#endif
param->LWLockArray = LWLockArray;
param->ProcStructLock = ProcStructLock;
param->ProcGlobal = ProcGlobal;
ShmemVariableCache = param->ShmemVariableCache;
ShmemBackendArray = param->ShmemBackendArray;
+#ifndef HAVE_SPINLOCKS
+ SpinlockSemaArray = param->SpinlockSemaArray;
+#endif
LWLockArray = param->LWLockArray;
ProcStructLock = param->ProcStructLock;
ProcGlobal = param->ProcGlobal;
* need to be so careful during the actual allocation phase.
*/
size = 100000;
+ size = add_size(size, SpinlockSemaSize());
size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
sizeof(ShmemIndexEnt)));
size = add_size(size, BufferShmemSize());
Assert(shmhdr != NULL);
/*
- * Initialize the spinlock used by ShmemAlloc. We have to do the space
- * allocation the hard way, since obviously ShmemAlloc can't be called
- * yet.
+ * If spinlocks are disabled, initialize emulation layer. We have to do
+ * the space allocation the hard way, since obviously ShmemAlloc can't be
+ * called yet.
+ */
+#ifndef HAVE_SPINLOCKS
+ {
+ PGSemaphore spinsemas;
+
+ spinsemas = (PGSemaphore) (((char *) shmhdr) + shmhdr->freeoffset);
+ shmhdr->freeoffset += MAXALIGN(SpinlockSemaSize());
+ SpinlockSemaInit(spinsemas);
+ Assert(shmhdr->freeoffset <= shmhdr->totalsize);
+ }
+#endif
+
+ /*
+ * Initialize the spinlock used by ShmemAlloc; we have to do this the hard
+ * way, too, for the same reasons as above.
*/
ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
#include "miscadmin.h"
#include "replication/walsender.h"
#include "storage/lwlock.h"
+#include "storage/pg_sema.h"
#include "storage/spin.h"
+#ifndef HAVE_SPINLOCKS
+PGSemaphore SpinlockSemaArray;
+#endif
+
+/*
+ * Report the amount of shared memory needed to store semaphores for spinlock
+ * support.
+ */
+Size
+SpinlockSemaSize(void)
+{
+ return SpinlockSemas() * sizeof(PGSemaphoreData);
+}
+
#ifdef HAVE_SPINLOCKS
/*
int
SpinlockSemas(void)
{
- int nsemas;
-
- /*
- * It would be cleaner to distribute this logic into the affected modules,
- * similar to the way shmem space estimation is handled.
- *
- * For now, though, there are few enough users of spinlocks that we just
- * keep the knowledge here.
- */
- nsemas = NumLWLocks(); /* one for each lwlock */
- nsemas += NBuffers; /* one for each buffer header */
- nsemas += max_wal_senders; /* one for each wal sender process */
- nsemas += 30; /* plus a bunch for other small-scale use */
-
- return nsemas;
+ return NUM_SPINLOCK_SEMAPHORES;
+}
+
+/*
+ * Initialize semaphores.
+ */
+extern void
+SpinlockSemaInit(PGSemaphore spinsemas)
+{
+ int i;
+
+ for (i = 0; i < NUM_SPINLOCK_SEMAPHORES; ++i)
+ PGSemaphoreCreate(&spinsemas[i]);
+ SpinlockSemaArray = spinsemas;
}
/*
void
s_init_lock_sema(volatile slock_t *lock)
{
- PGSemaphoreCreate((PGSemaphore) lock);
+ static int counter = 0;
+
+ *lock = (++counter) % NUM_SPINLOCK_SEMAPHORES;
}
void
s_unlock_sema(volatile slock_t *lock)
{
- PGSemaphoreUnlock((PGSemaphore) lock);
+ PGSemaphoreUnlock(&SpinlockSemaArray[*lock]);
}
bool
tas_sema(volatile slock_t *lock)
{
/* Note that TAS macros return 0 if *success* */
- return !PGSemaphoreTryLock((PGSemaphore) lock);
+ return !PGSemaphoreTryLock(&SpinlockSemaArray[*lock]);
}
#endif /* !HAVE_SPINLOCKS */
*/
#define NUM_USER_DEFINED_LWLOCKS 4
+/*
+ * When we don't have native spinlocks, we use semaphores to simulate them.
+ * Decreasing this value reduces consumption of OS resources; increasing it
+ * may improve performance, but supplying a real spinlock implementation is
+ * probably far better.
+ */
+#define NUM_SPINLOCK_SEMAPHORES 1024
+
/*
* Define this if you want to allow the lo_import and lo_export SQL
* functions to be executed by ordinary users. By default these
#ifndef S_LOCK_H
#define S_LOCK_H
-#include "storage/pg_sema.h"
-
#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
-
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
/*************************************************************************
* All the gcc inlines
* to fall foul of kernel limits on number of semaphores, so don't use this
* unless you must! The subroutines appear in spin.c.
*/
-typedef PGSemaphoreData slock_t;
+typedef int slock_t;
extern bool s_lock_free_sema(volatile slock_t *lock);
extern void s_unlock_sema(volatile slock_t *lock);
#define SPIN_H
#include "storage/s_lock.h"
+#ifndef HAVE_SPINLOCKS
+#include "storage/pg_sema.h"
+#endif
#define SpinLockInit(lock) S_INIT_LOCK(lock)
extern int SpinlockSemas(void);
+extern Size SpinlockSemaSize(void);
+
+#ifndef HAVE_SPINLOCKS
+extern void SpinlockSemaInit(PGSemaphore);
+extern PGSemaphore SpinlockSemaArray;
+#endif
#endif /* SPIN_H */