Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Fix --disable-spinlocks in 9.2 and 9.3 branches.
authorTom Lane <tgl@sss.pgh.pa.us>
Mon, 18 Apr 2016 17:19:52 +0000 (13:19 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Mon, 18 Apr 2016 17:19:52 +0000 (13:19 -0400)
My back-patch of the 9.4-era commit 44cd47c1d49655c5 into 9.2 and 9.3 fixed
HPPA builds as expected, but it broke --disable-spinlocks builds, because
the dummy spinlock is initialized before the underlying semaphore
infrastructure is alive.  In 9.4 and up this works because of commit
daa7527afc227443, which decoupled initialization of an slock_t variable
from access to the actual system semaphore object.  The best solution
seems to be to back-port that patch, which should be a net win anyway
because it improves the usability of --disable-spinlocks builds in the
older branches; and it's been out long enough now to not be worrisome
from a stability perspective.

src/backend/postmaster/postmaster.c
src/backend/storage/ipc/ipci.c
src/backend/storage/ipc/shmem.c
src/backend/storage/lmgr/spin.c
src/include/pg_config_manual.h
src/include/storage/s_lock.h
src/include/storage/spin.h

index 38088360b8093884dbfa22e44bacf783ac5cacbe..fc4abbcb792c70331e7dbe57c8c910cc72d421f1 100644 (file)
@@ -500,6 +500,9 @@ typedef struct
    slock_t    *ShmemLock;
    VariableCache ShmemVariableCache;
    Backend    *ShmemBackendArray;
+#ifndef HAVE_SPINLOCKS
+   PGSemaphore SpinlockSemaArray;
+#endif
    LWLock     *LWLockArray;
    slock_t    *ProcStructLock;
    PROC_HDR   *ProcGlobal;
@@ -6050,6 +6053,9 @@ save_backend_variables(BackendParameters *param, Port *port,
    param->ShmemVariableCache = ShmemVariableCache;
    param->ShmemBackendArray = ShmemBackendArray;
 
+#ifndef HAVE_SPINLOCKS
+   param->SpinlockSemaArray = SpinlockSemaArray;
+#endif
    param->LWLockArray = LWLockArray;
    param->ProcStructLock = ProcStructLock;
    param->ProcGlobal = ProcGlobal;
@@ -6278,6 +6284,9 @@ restore_backend_variables(BackendParameters *param, Port *port)
    ShmemVariableCache = param->ShmemVariableCache;
    ShmemBackendArray = param->ShmemBackendArray;
 
+#ifndef HAVE_SPINLOCKS
+   SpinlockSemaArray = param->SpinlockSemaArray;
+#endif
    LWLockArray = param->LWLockArray;
    ProcStructLock = param->ProcStructLock;
    ProcGlobal = param->ProcGlobal;
index 918ac51b194a44e67b7cf55ecaa145d0603d35fe..ec8e4f6373419f123f4353dc06c4e5c6e44f50bc 100644 (file)
@@ -103,6 +103,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
         * need to be so careful during the actual allocation phase.
         */
        size = 100000;
+       size = add_size(size, SpinlockSemaSize());
        size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
                                                 sizeof(ShmemIndexEnt)));
        size = add_size(size, BufferShmemSize());
index 129d9f83705f9340070ebadf215c62e8e9f553c9..4194db644d8b8d954d4025b56c8da980366a42fa 100644 (file)
@@ -116,9 +116,24 @@ InitShmemAllocation(void)
    Assert(shmhdr != NULL);
 
    /*
-    * Initialize the spinlock used by ShmemAlloc.  We have to do the space
-    * allocation the hard way, since obviously ShmemAlloc can't be called
-    * yet.
+    * If spinlocks are disabled, initialize emulation layer.  We have to do
+    * the space allocation the hard way, since obviously ShmemAlloc can't be
+    * called yet.
+    */
+#ifndef HAVE_SPINLOCKS
+   {
+       PGSemaphore spinsemas;
+
+       spinsemas = (PGSemaphore) (((char *) shmhdr) + shmhdr->freeoffset);
+       shmhdr->freeoffset += MAXALIGN(SpinlockSemaSize());
+       SpinlockSemaInit(spinsemas);
+       Assert(shmhdr->freeoffset <= shmhdr->totalsize);
+   }
+#endif
+
+   /*
+    * Initialize the spinlock used by ShmemAlloc; we have to do this the hard
+    * way, too, for the same reasons as above.
     */
    ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
    shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
index 2864790d82d554a5d4c69f0429437bbd648e165e..874e3134b36a1fc546dbefeeb26838b348dee2f4 100644 (file)
 #include "miscadmin.h"
 #include "replication/walsender.h"
 #include "storage/lwlock.h"
+#include "storage/pg_sema.h"
 #include "storage/spin.h"
 
 
+#ifndef HAVE_SPINLOCKS
+PGSemaphore SpinlockSemaArray;
+#endif
+
+/*
+ * Report the amount of shared memory needed to store semaphores for spinlock
+ * support.
+ */
+Size
+SpinlockSemaSize(void)
+{
+   return SpinlockSemas() * sizeof(PGSemaphoreData);
+}
+
 #ifdef HAVE_SPINLOCKS
 
 /*
@@ -51,21 +66,20 @@ SpinlockSemas(void)
 int
 SpinlockSemas(void)
 {
-   int         nsemas;
-
-   /*
-    * It would be cleaner to distribute this logic into the affected modules,
-    * similar to the way shmem space estimation is handled.
-    *
-    * For now, though, there are few enough users of spinlocks that we just
-    * keep the knowledge here.
-    */
-   nsemas = NumLWLocks();      /* one for each lwlock */
-   nsemas += NBuffers;         /* one for each buffer header */
-   nsemas += max_wal_senders;  /* one for each wal sender process */
-   nsemas += 30;               /* plus a bunch for other small-scale use */
-
-   return nsemas;
+   return NUM_SPINLOCK_SEMAPHORES;
+}
+
+/*
+ * Initialize semaphores.
+ */
+extern void
+SpinlockSemaInit(PGSemaphore spinsemas)
+{
+   int         i;
+
+   for (i = 0; i < NUM_SPINLOCK_SEMAPHORES; ++i)
+       PGSemaphoreCreate(&spinsemas[i]);
+   SpinlockSemaArray = spinsemas;
 }
 
 /*
@@ -75,13 +89,15 @@ SpinlockSemas(void)
 void
 s_init_lock_sema(volatile slock_t *lock)
 {
-   PGSemaphoreCreate((PGSemaphore) lock);
+   static int  counter = 0;
+
+   *lock = (++counter) % NUM_SPINLOCK_SEMAPHORES;
 }
 
 void
 s_unlock_sema(volatile slock_t *lock)
 {
-   PGSemaphoreUnlock((PGSemaphore) lock);
+   PGSemaphoreUnlock(&SpinlockSemaArray[*lock]);
 }
 
 bool
@@ -96,7 +112,7 @@ int
 tas_sema(volatile slock_t *lock)
 {
    /* Note that TAS macros return 0 if *success* */
-   return !PGSemaphoreTryLock((PGSemaphore) lock);
+   return !PGSemaphoreTryLock(&SpinlockSemaArray[*lock]);
 }
 
 #endif   /* !HAVE_SPINLOCKS */
index 24c5069949be90129fea25cb7047d045e7c640f1..74d7ba2010af8f7e9a13b08511a04b32b9b20942 100644 (file)
  */
 #define NUM_USER_DEFINED_LWLOCKS   4
 
+/*
+ * When we don't have native spinlocks, we use semaphores to simulate them.
+ * Decreasing this value reduces consumption of OS resources; increasing it
+ * may improve performance, but supplying a real spinlock implementation is
+ * probably far better.
+ */
+#define    NUM_SPINLOCK_SEMAPHORES     1024
+
 /*
  * Define this if you want to allow the lo_import and lo_export SQL
  * functions to be executed by ordinary users.  By default these
index f869cb0824bf05caec5269beb1ac051db9f7b298..b2605f99f38c327758c5b9c77bb9a8c616b79554 100644 (file)
 #ifndef S_LOCK_H
 #define S_LOCK_H
 
-#include "storage/pg_sema.h"
-
 #ifdef HAVE_SPINLOCKS  /* skip spinlocks if requested */
 
-
 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
 /*************************************************************************
  * All the gcc inlines
@@ -1032,7 +1029,7 @@ spin_delay(void)
  * to fall foul of kernel limits on number of semaphores, so don't use this
  * unless you must!  The subroutines appear in spin.c.
  */
-typedef PGSemaphoreData slock_t;
+typedef int slock_t;
 
 extern bool s_lock_free_sema(volatile slock_t *lock);
 extern void s_unlock_sema(volatile slock_t *lock);
index f459b909fefbfdcbad534a131124ccea71ad6890..b2b13398e8a454cf005ad0ac9b855959f32e9d83 100644 (file)
@@ -57,6 +57,9 @@
 #define SPIN_H
 
 #include "storage/s_lock.h"
+#ifndef HAVE_SPINLOCKS
+#include "storage/pg_sema.h"
+#endif
 
 
 #define SpinLockInit(lock) S_INIT_LOCK(lock)
 
 
 extern int SpinlockSemas(void);
+extern Size SpinlockSemaSize(void);
+
+#ifndef HAVE_SPINLOCKS
+extern void SpinlockSemaInit(PGSemaphore);
+extern PGSemaphore SpinlockSemaArray;
+#endif
 
 #endif   /* SPIN_H */