Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit bfea925

Browse files
committed
Further marginal hacking on generic atomic ops.
In the generic atomic ops that rely on a loop around a CAS primitive, there's no need to force the initial read of the "old" value to be atomic. In the typically-rare case that we get a torn value, that simply means that the first CAS attempt will fail; but it will update "old" to the atomically-read value, so the next attempt has a chance of succeeding. It was already being done that way in pg_atomic_exchange_u64_impl(), but let's duplicate the approach in the rest. (Given the current coding of the pg_atomic_read functions, this change is a no-op anyway on popular platforms; it only makes a difference where pg_atomic_read_u64_impl() is implemented as a CAS.) In passing, also remove unnecessary take-a-pointer-and-dereference-it coding in the pg_atomic_read functions. That seems to have been based on a misunderstanding of what the C standard requires. What actually matters is that the pointer be declared as pointing to volatile, which it is. I don't believe this will change the assembly code at all on x86 platforms (even ignoring the likelihood that these implementations get overridden by others); but it may help on less-mainstream CPUs. Discussion: https://postgr.es/m/13707.1504718238@sss.pgh.pa.us
1 parent f06588a commit bfea925

File tree

1 file changed

+14
-16
lines changed

1 file changed

+14
-16
lines changed

src/include/port/atomics/generic.h

+14-16
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ typedef pg_atomic_uint32 pg_atomic_flag;
4545
static inline uint32
4646
pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
4747
{
48-
return *(&ptr->value);
48+
return ptr->value;
4949
}
5050
#endif
5151

@@ -170,7 +170,7 @@ static inline uint32
170170
pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
171171
{
172172
uint32 old;
173-
old = pg_atomic_read_u32_impl(ptr);
173+
old = ptr->value; /* ok if read is not atomic */
174174
while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_))
175175
/* skip */;
176176
return old;
@@ -183,7 +183,7 @@ static inline uint32
183183
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
184184
{
185185
uint32 old;
186-
old = pg_atomic_read_u32_impl(ptr);
186+
old = ptr->value; /* ok if read is not atomic */
187187
while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_))
188188
/* skip */;
189189
return old;
@@ -205,7 +205,7 @@ static inline uint32
205205
pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
206206
{
207207
uint32 old;
208-
old = pg_atomic_read_u32_impl(ptr);
208+
old = ptr->value; /* ok if read is not atomic */
209209
while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_))
210210
/* skip */;
211211
return old;
@@ -218,7 +218,7 @@ static inline uint32
218218
pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
219219
{
220220
uint32 old;
221-
old = pg_atomic_read_u32_impl(ptr);
221+
old = ptr->value; /* ok if read is not atomic */
222222
while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_))
223223
/* skip */;
224224
return old;
@@ -249,7 +249,7 @@ static inline uint64
249249
pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
250250
{
251251
uint64 old;
252-
old = ptr->value;
252+
old = ptr->value; /* ok if read is not atomic */
253253
while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_))
254254
/* skip */;
255255
return old;
@@ -299,12 +299,10 @@ static inline uint64
299299
pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
300300
{
301301
/*
302-
* On this platform aligned 64bit reads are guaranteed to be atomic,
303-
* except if using the fallback implementation, where can't guarantee the
304-
* required alignment.
302+
* On this platform aligned 64-bit reads are guaranteed to be atomic.
305303
*/
306304
AssertPointerAlignment(ptr, 8);
307-
return *(&ptr->value);
305+
return ptr->value;
308306
}
309307

310308
#else
@@ -315,10 +313,10 @@ pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
315313
uint64 old = 0;
316314

317315
/*
318-
* 64 bit reads aren't safe on all platforms. In the generic
316+
* 64-bit reads aren't atomic on all platforms. In the generic
319317
* implementation implement them as a compare/exchange with 0. That'll
320-
* fail or succeed, but always return the old value. Possible might store
321-
* a 0, but only if the prev. value also was a 0 - i.e. harmless.
318+
* fail or succeed, but always return the old value. Possibly might store
319+
* a 0, but only if the previous value also was a 0 - i.e. harmless.
322320
*/
323321
pg_atomic_compare_exchange_u64_impl(ptr, &old, 0);
324322

@@ -342,7 +340,7 @@ static inline uint64
342340
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
343341
{
344342
uint64 old;
345-
old = pg_atomic_read_u64_impl(ptr);
343+
old = ptr->value; /* ok if read is not atomic */
346344
while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_))
347345
/* skip */;
348346
return old;
@@ -364,7 +362,7 @@ static inline uint64
364362
pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
365363
{
366364
uint64 old;
367-
old = pg_atomic_read_u64_impl(ptr);
365+
old = ptr->value; /* ok if read is not atomic */
368366
while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_))
369367
/* skip */;
370368
return old;
@@ -377,7 +375,7 @@ static inline uint64
377375
pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
378376
{
379377
uint64 old;
380-
old = pg_atomic_read_u64_impl(ptr);
378+
old = ptr->value; /* ok if read is not atomic */
381379
while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_))
382380
/* skip */;
383381
return old;

0 commit comments

Comments
 (0)