Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Fix --disable-spinlocks in 9.2 and 9.3 branches.
authorTom Lane <tgl@sss.pgh.pa.us>
Mon, 18 Apr 2016 17:19:52 +0000 (13:19 -0400)
committerTom Lane <tgl@sss.pgh.pa.us>
Mon, 18 Apr 2016 17:19:52 +0000 (13:19 -0400)
My back-patch of the 9.4-era commit 44cd47c1d49655c5 into 9.2 and 9.3 fixed
HPPA builds as expected, but it broke --disable-spinlocks builds, because
the dummy spinlock is initialized before the underlying semaphore
infrastructure is alive.  In 9.4 and up this works because of commit
daa7527afc227443, which decoupled initialization of an slock_t variable
from access to the actual system semaphore object.  The best solution
seems to be to back-port that patch, which should be a net win anyway
because it improves the usability of --disable-spinlocks builds in the
older branches; and it's been out long enough now to not be worrisome
from a stability perspective.

src/backend/postmaster/postmaster.c
src/backend/storage/ipc/ipci.c
src/backend/storage/ipc/shmem.c
src/backend/storage/lmgr/spin.c
src/include/pg_config_manual.h
src/include/storage/s_lock.h
src/include/storage/spin.h

index 504d98e2b35b4dd265da77b67831bae350419407..b8b51f1e6350473d7f7cc375aa04882ff733bd11 100644 (file)
@@ -440,6 +440,9 @@ typedef struct
    slock_t    *ShmemLock;
    VariableCache ShmemVariableCache;
    Backend    *ShmemBackendArray;
+#ifndef HAVE_SPINLOCKS
+   PGSemaphore SpinlockSemaArray;
+#endif
    LWLock     *LWLockArray;
    slock_t    *ProcStructLock;
    PROC_HDR   *ProcGlobal;
@@ -4927,6 +4930,9 @@ save_backend_variables(BackendParameters *param, Port *port,
    param->ShmemVariableCache = ShmemVariableCache;
    param->ShmemBackendArray = ShmemBackendArray;
 
+#ifndef HAVE_SPINLOCKS
+   param->SpinlockSemaArray = SpinlockSemaArray;
+#endif
    param->LWLockArray = LWLockArray;
    param->ProcStructLock = ProcStructLock;
    param->ProcGlobal = ProcGlobal;
@@ -5153,6 +5159,9 @@ restore_backend_variables(BackendParameters *param, Port *port)
    ShmemVariableCache = param->ShmemVariableCache;
    ShmemBackendArray = param->ShmemBackendArray;
 
+#ifndef HAVE_SPINLOCKS
+   SpinlockSemaArray = param->SpinlockSemaArray;
+#endif
    LWLockArray = param->LWLockArray;
    ProcStructLock = param->ProcStructLock;
    ProcGlobal = param->ProcGlobal;
index dded397a6179d08a80715c17636480c1553c229d..d50a870d11c2c39f31f3059c3c4d64b6bf05f67d 100644 (file)
@@ -102,6 +102,7 @@ CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
         * need to be so careful during the actual allocation phase.
         */
        size = 100000;
+       size = add_size(size, SpinlockSemaSize());
        size = add_size(size, hash_estimate_size(SHMEM_INDEX_SIZE,
                                                 sizeof(ShmemIndexEnt)));
        size = add_size(size, BufferShmemSize());
index 5868405d0636781d39985339762e459c9d209b51..aa23162bc7ff3ccb339d9b9d2545623d8841bb5d 100644 (file)
@@ -116,9 +116,24 @@ InitShmemAllocation(void)
    Assert(shmhdr != NULL);
 
    /*
-    * Initialize the spinlock used by ShmemAlloc.  We have to do the space
-    * allocation the hard way, since obviously ShmemAlloc can't be called
-    * yet.
+    * If spinlocks are disabled, initialize emulation layer.  We have to do
+    * the space allocation the hard way, since obviously ShmemAlloc can't be
+    * called yet.
+    */
+#ifndef HAVE_SPINLOCKS
+   {
+       PGSemaphore spinsemas;
+
+       spinsemas = (PGSemaphore) (((char *) shmhdr) + shmhdr->freeoffset);
+       shmhdr->freeoffset += MAXALIGN(SpinlockSemaSize());
+       SpinlockSemaInit(spinsemas);
+       Assert(shmhdr->freeoffset <= shmhdr->totalsize);
+   }
+#endif
+
+   /*
+    * Initialize the spinlock used by ShmemAlloc; we have to do this the hard
+    * way, too, for the same reasons as above.
     */
    ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
    shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
index 479e71225f99a22277de35adae637ab1e7f3b1d6..df3378fb9899124b38e35c67a4f8f949b9f27e1f 100644 (file)
 #include "miscadmin.h"
 #include "replication/walsender.h"
 #include "storage/lwlock.h"
+#include "storage/pg_sema.h"
 #include "storage/spin.h"
 
 
+#ifndef HAVE_SPINLOCKS
+PGSemaphore SpinlockSemaArray;
+#endif
+
+/*
+ * Report the amount of shared memory needed to store semaphores for spinlock
+ * support.
+ */
+Size
+SpinlockSemaSize(void)
+{
+   return SpinlockSemas() * sizeof(PGSemaphoreData);
+}
+
 #ifdef HAVE_SPINLOCKS
 
 /*
@@ -51,21 +66,20 @@ SpinlockSemas(void)
 int
 SpinlockSemas(void)
 {
-   int     nsemas;
-
-   /*
-    * It would be cleaner to distribute this logic into the affected modules,
-    * similar to the way shmem space estimation is handled.
-    *
-    * For now, though, there are few enough users of spinlocks that we just
-    * keep the knowledge here.
-    */
-   nsemas = NumLWLocks();      /* one for each lwlock */
-   nsemas += NBuffers;         /* one for each buffer header */
-   nsemas += max_wal_senders;  /* one for each wal sender process */
-   nsemas += 30;               /* plus a bunch for other small-scale use */
-
-   return nsemas;
+   return NUM_SPINLOCK_SEMAPHORES;
+}
+
+/*
+ * Initialize semaphores.
+ */
+extern void
+SpinlockSemaInit(PGSemaphore spinsemas)
+{
+   int         i;
+
+   for (i = 0; i < NUM_SPINLOCK_SEMAPHORES; ++i)
+       PGSemaphoreCreate(&spinsemas[i]);
+   SpinlockSemaArray = spinsemas;
 }
 
 /*
@@ -75,13 +89,15 @@ SpinlockSemas(void)
 void
 s_init_lock_sema(volatile slock_t *lock)
 {
-   PGSemaphoreCreate((PGSemaphore) lock);
+   static int  counter = 0;
+
+   *lock = (++counter) % NUM_SPINLOCK_SEMAPHORES;
 }
 
 void
 s_unlock_sema(volatile slock_t *lock)
 {
-   PGSemaphoreUnlock((PGSemaphore) lock);
+   PGSemaphoreUnlock(&SpinlockSemaArray[*lock]);
 }
 
 bool
@@ -96,7 +112,7 @@ int
 tas_sema(volatile slock_t *lock)
 {
    /* Note that TAS macros return 0 if *success* */
-   return !PGSemaphoreTryLock((PGSemaphore) lock);
+   return !PGSemaphoreTryLock(&SpinlockSemaArray[*lock]);
 }
 
 #endif   /* !HAVE_SPINLOCKS */
index ee7dab0bd6067f0168d364a0597904c90621dff2..5e2ae6f88e7e4c5bdc2e94c1a6b7464d439b3de6 100644 (file)
  */
 #define NUM_USER_DEFINED_LWLOCKS   4
 
+/*
+ * When we don't have native spinlocks, we use semaphores to simulate them.
+ * Decreasing this value reduces consumption of OS resources; increasing it
+ * may improve performance, but supplying a real spinlock implementation is
+ * probably far better.
+ */
+#define    NUM_SPINLOCK_SEMAPHORES     1024
+
 /*
  * Define this if you want to allow the lo_import and lo_export SQL
  * functions to be executed by ordinary users.  By default these
index b7bed522da36fbe8c024887c8aa067e97529332e..6a070d4b4756bf961a37476b9d78d9ba57158b49 100644 (file)
 #ifndef S_LOCK_H
 #define S_LOCK_H
 
-#include "storage/pg_sema.h"
-
 #ifdef HAVE_SPINLOCKS  /* skip spinlocks if requested */
 
-
 #if defined(__GNUC__) || defined(__INTEL_COMPILER)
 /*************************************************************************
  * All the gcc inlines
@@ -1031,7 +1028,7 @@ spin_delay(void)
  * to fall foul of kernel limits on number of semaphores, so don't use this
  * unless you must!  The subroutines appear in spin.c.
  */
-typedef PGSemaphoreData slock_t;
+typedef int slock_t;
 
 extern bool s_lock_free_sema(volatile slock_t *lock);
 extern void s_unlock_sema(volatile slock_t *lock);
index 3f6cacfb0e5b33eada20da4fa3775778b02ca486..7d793bbcccb2616dc5c5552034886fab98d40dc7 100644 (file)
@@ -57,6 +57,9 @@
 #define SPIN_H
 
 #include "storage/s_lock.h"
+#ifndef HAVE_SPINLOCKS
+#include "storage/pg_sema.h"
+#endif
 
 
 #define SpinLockInit(lock) S_INIT_LOCK(lock)
 
 
 extern int SpinlockSemas(void);
+extern Size SpinlockSemaSize(void);
+
+#ifndef HAVE_SPINLOCKS
+extern void SpinlockSemaInit(PGSemaphore);
+extern PGSemaphore SpinlockSemaArray;
+#endif
 
 #endif   /* SPIN_H */