28
28
29
29
30
30
#ifndef HAVE_SPINLOCKS
31
+
32
+ /*
33
+ * No TAS, so spinlocks are implemented as PGSemaphores.
34
+ */
35
+
36
+ #ifndef HAVE_ATOMICS
37
+ #define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
38
+ #else
39
+ #define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
40
+ #endif /* DISABLE_ATOMICS */
41
+
31
42
PGSemaphore * SpinlockSemaArray ;
32
- #endif
43
+
44
+ #else /* !HAVE_SPINLOCKS */
45
+
46
+ #define NUM_EMULATION_SEMAPHORES 0
47
+
48
+ #endif /* HAVE_SPINLOCKS */
33
49
34
50
/*
35
51
* Report the amount of shared memory needed to store semaphores for spinlock
@@ -38,34 +54,19 @@ PGSemaphore *SpinlockSemaArray;
38
54
Size
39
55
SpinlockSemaSize (void )
40
56
{
41
- return SpinlockSemas () * sizeof (PGSemaphore );
57
+ return NUM_EMULATION_SEMAPHORES * sizeof (PGSemaphore );
42
58
}
43
59
44
- #ifdef HAVE_SPINLOCKS
45
-
46
60
/*
47
61
* Report number of semaphores needed to support spinlocks.
48
62
*/
49
63
int
50
64
SpinlockSemas (void )
51
65
{
52
- return 0 ;
66
+ return NUM_EMULATION_SEMAPHORES ;
53
67
}
54
- #else /* !HAVE_SPINLOCKS */
55
68
56
- /*
57
- * No TAS, so spinlocks are implemented as PGSemaphores.
58
- */
59
-
60
-
61
- /*
62
- * Report number of semaphores needed to support spinlocks.
63
- */
64
- int
65
- SpinlockSemas (void )
66
- {
67
- return NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES ;
68
- }
69
+ #ifndef HAVE_SPINLOCKS
69
70
70
71
/*
71
72
* Initialize spinlock emulation.
@@ -92,32 +93,68 @@ SpinlockSemaInit(void)
92
93
/*
93
94
* s_lock.h hardware-spinlock emulation using semaphores
94
95
*
95
- * We map all spinlocks onto a set of NUM_SPINLOCK_SEMAPHORES semaphores.
96
- * It's okay to map multiple spinlocks onto one semaphore because no process
97
- * should ever hold more than one at a time. We just need enough semaphores
98
- * so that we aren't adding too much extra contention from that.
96
+ * We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores. It's okay to
97
+ * map multiple spinlocks onto one semaphore because no process should ever
98
+ * hold more than one at a time. We just need enough semaphores so that we
99
+ * aren't adding too much extra contention from that.
100
+ *
101
+ * There is one exception to the restriction of only holding one spinlock at a
102
+ * time, which is that it's ok if emulated atomic operations are nested inside
103
+ * spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
104
+ * we make sure "normal" spinlocks and atomics backed by spinlocks use
105
+ * distinct semaphores (see the nested argument to s_init_lock_sema).
99
106
*
100
107
* slock_t is just an int for this implementation; it holds the spinlock
101
- * number from 1..NUM_SPINLOCK_SEMAPHORES . We intentionally ensure that 0
108
+ * number from 1..NUM_EMULATION_SEMAPHORES . We intentionally ensure that 0
102
109
* is not a valid value, so that testing with this code can help find
103
110
* failures to initialize spinlocks.
104
111
*/
105
112
113
+ static inline void
114
+ s_check_valid (int lockndx )
115
+ {
116
+ if (unlikely (lockndx <= 0 || lockndx > NUM_EMULATION_SEMAPHORES ))
117
+ elog (ERROR , "invalid spinlock number: %d" , lockndx );
118
+ }
119
+
106
120
void
107
121
s_init_lock_sema (volatile slock_t * lock , bool nested )
108
122
{
109
123
static uint32 counter = 0 ;
110
-
111
- * lock = ((++ counter ) % NUM_SPINLOCK_SEMAPHORES ) + 1 ;
124
+ uint32 offset ;
125
+ uint32 sema_total ;
126
+ uint32 idx ;
127
+
128
+ if (nested )
129
+ {
130
+ /*
131
+ * To allow nesting atomics inside spinlocked sections, use a
132
+ * different spinlock. See comment above.
133
+ */
134
+ offset = 1 + NUM_SPINLOCK_SEMAPHORES ;
135
+ sema_total = NUM_ATOMICS_SEMAPHORES ;
136
+ }
137
+ else
138
+ {
139
+ offset = 1 ;
140
+ sema_total = NUM_SPINLOCK_SEMAPHORES ;
141
+ }
142
+
143
+ idx = (counter ++ % sema_total ) + offset ;
144
+
145
+ /* double check we did things correctly */
146
+ s_check_valid (idx );
147
+
148
+ * lock = idx ;
112
149
}
113
150
114
151
void
115
152
s_unlock_sema (volatile slock_t * lock )
116
153
{
117
154
int lockndx = * lock ;
118
155
119
- if (lockndx <= 0 || lockndx > NUM_SPINLOCK_SEMAPHORES )
120
- elog ( ERROR , "invalid spinlock number: %d" , lockndx );
156
+ s_check_valid (lockndx );
157
+
121
158
PGSemaphoreUnlock (SpinlockSemaArray [lockndx - 1 ]);
122
159
}
123
160
@@ -134,8 +171,8 @@ tas_sema(volatile slock_t *lock)
134
171
{
135
172
int lockndx = * lock ;
136
173
137
- if (lockndx <= 0 || lockndx > NUM_SPINLOCK_SEMAPHORES )
138
- elog ( ERROR , "invalid spinlock number: %d" , lockndx );
174
+ s_check_valid (lockndx );
175
+
139
176
/* Note that TAS macros return 0 if *success* */
140
177
return !PGSemaphoreTryLock (SpinlockSemaArray [lockndx - 1 ]);
141
178
}
0 commit comments