* fence.
*/
#if defined(__INTEL_COMPILER)
-#define pg_memory_barrier_impl() __mf()
+# define pg_memory_barrier_impl() __mf()
#elif defined(__GNUC__)
-#define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
+# define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
#elif defined(__hpux)
-#define pg_memory_barrier_impl() _Asm_mf()
+# define pg_memory_barrier_impl() _Asm_mf()
#endif
} pg_atomic_uint64;
#endif
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
-#endif /* defined(__GNUC__) &&
- * !defined(__INTEL_COMPILER) */
+#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
* PAUSE in the inner loop of a spin lock is necessary for good
* performance:
*
- * The PAUSE instruction improves the performance of IA-32
- * processors supporting Hyper-Threading Technology when
- * executing spin-wait loops and other routines where one
- * thread is accessing a shared lock or semaphore in a tight
- * polling loop. When executing a spin-wait loop, the
- * processor can suffer a severe performance penalty when
- * exiting the loop because it detects a possible memory order
- * violation and flushes the core processor's pipeline. The
- * PAUSE instruction provides a hint to the processor that the
- * code sequence is a spin-wait loop. The processor uses this
- * hint to avoid the memory order violation and prevent the
- * pipeline flush. In addition, the PAUSE instruction
- * de-pipelines the spin-wait loop to prevent it from
- * consuming execution resources excessively.
+ * The PAUSE instruction improves the performance of IA-32
+ * processors supporting Hyper-Threading Technology when
+ * executing spin-wait loops and other routines where one
+ * thread is accessing a shared lock or semaphore in a tight
+ * polling loop. When executing a spin-wait loop, the
+ * processor can suffer a severe performance penalty when
+ * exiting the loop because it detects a possible memory order
+ * violation and flushes the core processor's pipeline. The
+ * PAUSE instruction provides a hint to the processor that the
+ * code sequence is a spin-wait loop. The processor uses this
+ * hint to avoid the memory order violation and prevent the
+ * pipeline flush. In addition, the PAUSE instruction
+ * de-pipelines the spin-wait loop to prevent it from
+ * consuming execution resources excessively.
*/
#if defined(__INTEL_COMPILER)
#define PG_HAVE_SPIN_DELAY
static __inline__ void
pg_spin_delay_impl(void)
{
- __asm__ __volatile__(
- " rep; nop \n");
+ __asm__ __volatile__(
+ " rep; nop \n");
}
#elif defined(WIN32_ONLY_COMPILER) && defined(__x86_64__)
#define PG_HAVE_SPIN_DELAY
pg_spin_delay_impl(void)
{
/* See comment for gcc code. Same code, MASM syntax */
- __asm rep nop;
+ __asm rep nop;
}
#endif
-#endif /* !defined(PG_HAVE_SPIN_DELAY) */
+#endif /* !defined(PG_HAVE_SPIN_DELAY) */
#if defined(HAVE_ATOMICS)
{
register char _res = 1;
- __asm__ __volatile__(
- " lock \n"
- " xchgb %0,%1 \n"
- : "+q"(_res), "+m"(ptr->value)
- :
- : "memory");
-
+ __asm__ __volatile__(
+ " lock \n"
+ " xchgb %0,%1 \n"
+: "+q"(_res), "+m"(ptr->value)
+:
+: "memory");
return _res == 0;
}
* On a TSO architecture like x86 it's sufficient to use a compiler
* barrier to achieve release semantics.
*/
- __asm__ __volatile__("":::"memory");
-
+ __asm__ __volatile__("" ::: "memory");
ptr->value = 0;
}
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- char ret;
+ char ret;
/*
* Perform cmpxchg and use the zero flag which it implicitly sets when
* equal to measure the success.
*/
- __asm__ __volatile__(
- " lock \n"
- " cmpxchgl %4,%5 \n"
- " setz %2 \n"
- : "=a"(*expected), "=m"(ptr->value), "=q"(ret)
- : "a"(*expected), "r"(newval), "m"(ptr->value)
- : "memory", "cc");
-
+ __asm__ __volatile__(
+ " lock \n"
+ " cmpxchgl %4,%5 \n"
+ " setz %2 \n"
+: "=a" (*expected), "=m"(ptr->value), "=q" (ret)
+: "a" (*expected), "r" (newval), "m"(ptr->value)
+: "memory", "cc");
return (bool) ret;
}
static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
- uint32 res;
- __asm__ __volatile__(
- " lock \n"
- " xaddl %0,%1 \n"
- : "=q"(res), "=m"(ptr->value)
- : "0"(add_), "m"(ptr->value)
- : "memory", "cc");
-
+ uint32 res;
+ __asm__ __volatile__(
+ " lock \n"
+ " xaddl %0,%1 \n"
+: "=q"(res), "=m"(ptr->value)
+: "0" (add_), "m"(ptr->value)
+: "memory", "cc");
return res;
}
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- char ret;
+ char ret;
/*
* Perform cmpxchg and use the zero flag which it implicitly sets when
* equal to measure the success.
*/
- __asm__ __volatile__(
- " lock \n"
- " cmpxchgq %4,%5 \n"
- " setz %2 \n"
- : "=a"(*expected), "=m"(ptr->value), "=q"(ret)
- : "a"(*expected), "r"(newval), "m"(ptr->value)
- : "memory", "cc");
-
+ __asm__ __volatile__(
+ " lock \n"
+ " cmpxchgq %4,%5 \n"
+ " setz %2 \n"
+: "=a" (*expected), "=m"(ptr->value), "=q" (ret)
+: "a" (*expected), "r" (newval), "m"(ptr->value)
+: "memory", "cc");
return (bool) ret;
}
static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
- uint64 res;
- __asm__ __volatile__(
- " lock \n"
- " xaddq %0,%1 \n"
- : "=q"(res), "=m"(ptr->value)
- : "0"(add_), "m"(ptr->value)
- : "memory", "cc");
-
+ uint64 res;
+ __asm__ __volatile__(
+ " lock \n"
+ " xaddq %0,%1 \n"
+: "=q"(res), "=m"(ptr->value)
+: "0" (add_), "m"(ptr->value)
+: "memory", "cc");
return res;
}
-#endif /* __x86_64__ */
+#endif /* __x86_64__ */
-#endif /* defined(__GNUC__) &&
- * !defined(__INTEL_COMPILER) */
+#endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
-#endif /* HAVE_ATOMICS */
+#endif /* HAVE_ATOMICS */
-#endif /* defined(PG_USE_INLINE) ||
- * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
/*-------------------------------------------------------------------------
*
* fallback.h
- * Fallback for platforms without spinlock and/or atomics support. Slower
- * than native atomics support, but not unusably slow.
+ * Fallback for platforms without spinlock and/or atomics support. Slower
+ * than native atomics support, but not unusably slow.
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
/* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H
-#error "should be included via atomics.h"
+# error "should be included via atomics.h"
#endif
#ifndef pg_memory_barrier_impl
* be content with just one byte instead of 4, but that's not too much
* waste.
*/
-#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP
- * compilers */
+#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
int sema[4];
#else
int sema;
#endif
} pg_atomic_flag;
-#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
typedef struct pg_atomic_uint32
{
/* Check pg_atomic_flag's definition above for an explanation */
-#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP
- * compilers */
+#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
int sema[4];
#else
int sema;
volatile uint32 value;
} pg_atomic_uint32;
-#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
return true;
}
-#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
#ifdef PG_HAVE_ATOMIC_U32_SIMULATION
#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
- uint32 *expected, uint32 newval);
+ uint32 *expected, uint32 newval);
#define PG_HAVE_ATOMIC_FETCH_ADD_U32
extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);
-#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
+#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
-#endif /* defined(PG_USE_INLINE) ||
- * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
*
* Documentation:
* * inline assembly for Itanium-based HP-UX:
- * http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
+ * http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
* * Implementing Spinlocks on the Intel (R) Itanium (R) Architecture and PA-RISC
- * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
+ * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
*
* Itanium only supports a small set of numbers (6, -8, -4, -1, 1, 4, 8, 16)
* for atomic add/sub, so we just implement everything but compare_exchange
volatile uint64 value;
} pg_atomic_uint64;
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint32 current;
+ bool ret;
+ uint32 current;
_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
-
/*
* We want a barrier, not just release/acquire semantics.
*/
_Asm_mf();
-
/*
- * Notes: DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the
- * compiler
+ * Notes:
+ * DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the compiler
*/
- current = _Asm_cmpxchg(_SZ_W, /* word */
- _SEM_REL,
- &ptr->value,
- newval, _LDHINT_NONE,
- _DOWN_MEM_FENCE | _UP_MEM_FENCE);
+ current = _Asm_cmpxchg(_SZ_W, /* word */
+ _SEM_REL,
+ &ptr->value,
+ newval, _LDHINT_NONE,
+ _DOWN_MEM_FENCE | _UP_MEM_FENCE);
ret = current == *expected;
*expected = current;
return ret;
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
_Asm_mf();
- current = _Asm_cmpxchg(_SZ_D, /* doubleword */
- _SEM_REL,
- &ptr->value,
- newval, _LDHINT_NONE,
- _DOWN_MEM_FENCE | _UP_MEM_FENCE);
+ current = _Asm_cmpxchg(_SZ_D, /* doubleword */
+ _SEM_REL,
+ &ptr->value,
+ newval, _LDHINT_NONE,
+ _DOWN_MEM_FENCE | _UP_MEM_FENCE);
ret = current == *expected;
*expected = current;
return ret;
#undef MINOR_FENCE
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
-#endif /* defined(PG_USE_INLINE) ||
- * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
*
* Documentation:
* * Legacy __sync Built-in Functions for Atomic Memory Access
- * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
+ * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
* * Built-in functions for memory model aware atomic operations
- * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
+ * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
*
* src/include/port/atomics/generic-gcc.h
*
* definitions where possible, and use this only as a fallback.
*/
#if !defined(pg_memory_barrier_impl)
-#if defined(HAVE_GCC__ATOMIC_INT32_CAS)
-#define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
-#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
-#define pg_memory_barrier_impl() __sync_synchronize()
-#endif
-#endif /* !defined(pg_memory_barrier_impl) */
+# if defined(HAVE_GCC__ATOMIC_INT32_CAS)
+# define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
+# elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
+# define pg_memory_barrier_impl() __sync_synchronize()
+# endif
+#endif /* !defined(pg_memory_barrier_impl) */
#if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
/* acquire semantics include read barrier semantics */
-#define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
+# define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
#endif
#if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
/* release semantics include write barrier semantics */
-#define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
+# define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
#endif
#ifdef HAVE_ATOMICS
#endif
} pg_atomic_flag;
-#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
+#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
/* generic gcc based atomic uint32 implementation */
#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
volatile uint32 value;
} pg_atomic_uint32;
-#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) ||
- * defined(HAVE_GCC__SYNC_INT32_CAS) */
+#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
/* generic gcc based atomic uint64 implementation */
#if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64;
-#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) ||
- * defined(HAVE_GCC__SYNC_INT64_CAS) */
+#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
/*
* Implementation follows. Inlined or directly included from atomics.c
}
#endif
-#endif /* defined(HAVE_GCC__SYNC_*_TAS) */
+#endif /* defined(HAVE_GCC__SYNC_*_TAS) */
#ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
}
#endif
-#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
+#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
/* prefer __atomic, it has a better API */
#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint32 current;
-
+ bool ret;
+ uint32 current;
current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
ret = current == *expected;
*expected = current;
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
-
+ bool ret;
+ uint64 current;
current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
ret = current == *expected;
*expected = current;
}
#endif
-#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
+#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
-#endif /* defined(PG_USE_INLINE) ||
- * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
*
* Documentation:
* * Interlocked Variable Access
- * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
+ * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
*
* src/include/port/atomics/generic-msvc.h
*
} pg_atomic_uint32;
#define PG_HAVE_ATOMIC_U64_SUPPORT
-typedef struct __declspec (
- align(8))
-pg_atomic_uint64
+typedef struct __declspec(align(8)) pg_atomic_uint64
{
volatile uint64 value;
} pg_atomic_uint64;
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint32 current;
-
+ bool ret;
+ uint32 current;
current = InterlockedCompareExchange(&ptr->value, newval, *expected);
ret = current == *expected;
*expected = current;
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
-
+ bool ret;
+ uint64 current;
current = _InterlockedCompareExchange64(&ptr->value, newval, *expected);
ret = current == *expected;
*expected = current;
{
return _InterlockedExchangeAdd64(&ptr->value, add_);
}
-#endif /* _WIN64 */
+#endif /* _WIN64 */
-#endif /* HAVE_ATOMICS */
+#endif /* HAVE_ATOMICS */
-#endif /* defined(PG_USE_INLINE) ||
- * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
*
* Documentation:
* * manpage for atomic_cas(3C)
- * http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
- * http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
+ * http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
+ * http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
*
* src/include/port/atomics/generic-sunpro.h
*
* membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad on x86/sparc
* respectively.
*/
-#define pg_memory_barrier_impl() __machine_rw_barrier()
+# define pg_memory_barrier_impl() __machine_rw_barrier()
#endif
#ifndef pg_read_barrier_impl
-#define pg_read_barrier_impl() __machine_r_barrier()
+# define pg_read_barrier_impl() __machine_r_barrier()
#endif
#ifndef pg_write_barrier_impl
-#define pg_write_barrier_impl() __machine_w_barrier()
+# define pg_write_barrier_impl() __machine_w_barrier()
#endif
-#endif /* HAVE_MBARRIER_H */
+#endif /* HAVE_MBARRIER_H */
/* Older versions of the compiler don't have atomic.h... */
#ifdef HAVE_ATOMIC_H
volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64;
-#endif /* HAVE_ATOMIC_H */
+#endif /* HAVE_ATOMIC_H */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint32 current;
+ bool ret;
+ uint32 current;
current = atomic_cas_32(&ptr->value, *expected, newval);
ret = current == *expected;
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
current = atomic_cas_64(&ptr->value, *expected, newval);
ret = current == *expected;
return ret;
}
-#endif /* HAVE_ATOMIC_H */
+#endif /* HAVE_ATOMIC_H */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
-#endif /* defined(PG_USE_INLINE) ||
- * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
*
* Documentation:
* * Synchronization and atomic built-in functions
- * http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm
+ * http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm
*
* src/include/port/atomics/generic-xlc.h
*
volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64;
-#endif /* __64BIT__ */
+#endif /* __64BIT__ */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
/*
- * xlc's documentation tells us: "If __compare_and_swap is used as a
- * locking primitive, insert a call to the __isync built-in function at
- * the start of any critical sections."
+ * xlc's documentation tells us:
+ * "If __compare_and_swap is used as a locking primitive, insert a call to
+ * the __isync built-in function at the start of any critical sections."
*/
__isync();
* XXX: __compare_and_swap is defined to take signed parameters, but that
* shouldn't matter since we don't perform any arithmetic operations.
*/
- current = (uint32) __compare_and_swap((volatile int *) ptr->value,
- (int) *expected, (int) newval);
+ current = (uint32)__compare_and_swap((volatile int*)ptr->value,
+ (int)*expected, (int)newval);
ret = current == *expected;
*expected = current;
return ret;
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval)
{
- bool ret;
- uint64 current;
+ bool ret;
+ uint64 current;
__isync();
- current = (uint64) __compare_and_swaplp((volatile long *) ptr->value,
- (long) *expected, (long) newval);
+ current = (uint64)__compare_and_swaplp((volatile long*)ptr->value,
+ (long)*expected, (long)newval);
ret = current == *expected;
*expected = current;
return ret;
return __fetch_and_addlp(&ptr->value, add_);
}
-#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
+#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
-#endif /* defined(HAVE_ATOMICS) */
+#endif /* defined(HAVE_ATOMICS) */
-#endif /* defined(PG_USE_INLINE) ||
- * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
/* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H
-#error "should be included via atomics.h"
+# error "should be included via atomics.h"
#endif
/*
* barriers.
*/
#if !defined(pg_read_barrier_impl)
-#define pg_read_barrier_impl pg_memory_barrier_impl
+# define pg_read_barrier_impl pg_memory_barrier_impl
#endif
#if !defined(pg_write_barrier_impl)
-#define pg_write_barrier_impl pg_memory_barrier_impl
+# define pg_write_barrier_impl pg_memory_barrier_impl
#endif
#ifndef PG_HAVE_SPIN_DELAY
static inline bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{
- uint32 value = 0;
-
+ uint32 value = 0;
return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
}
pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
{
/*
- * Use a memory barrier + plain write if we have a native memory barrier.
- * But don't do so if memory barriers use spinlocks - that'd lead to
- * circularity if flags are used to implement spinlocks.
+ * Use a memory barrier + plain write if we have a native memory
+ * barrier. But don't do so if memory barriers use spinlocks - that'd lead
+ * to circularity if flags are used to implement spinlocks.
*/
#ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
/* XXX: release semantics suffice? */
pg_memory_barrier_impl();
pg_atomic_write_u32_impl(ptr, 0);
#else
- uint32 value = 1;
+ uint32 value = 1;
pg_atomic_compare_exchange_u32_impl(ptr, &value, 0);
#endif
}
#elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
-#error "No pg_atomic_test_and_set provided"
-#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
+# error "No pg_atomic_test_and_set provided"
+#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
#ifndef PG_HAVE_ATOMIC_INIT_U32
static inline uint32
pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
{
- uint32 old;
-
+ uint32 old;
while (true)
{
old = pg_atomic_read_u32_impl(ptr);
static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
- uint32 old;
-
+ uint32 old;
while (true)
{
old = pg_atomic_read_u32_impl(ptr);
static inline uint32
pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
{
- uint32 old;
-
+ uint32 old;
while (true)
{
old = pg_atomic_read_u32_impl(ptr);
static inline uint32
pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
{
- uint32 old;
-
+ uint32 old;
while (true)
{
old = pg_atomic_read_u32_impl(ptr);
static inline uint64
pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
{
- uint64 old;
-
+ uint64 old;
while (true)
{
old = ptr->value;
static inline uint64
pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
{
- uint64 old = 0;
+ uint64 old = 0;
/*
* 64 bit reads aren't safe on all platforms. In the generic
static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
- uint64 old;
-
+ uint64 old;
while (true)
{
old = pg_atomic_read_u64_impl(ptr);
static inline uint64
pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
{
- uint64 old;
-
+ uint64 old;
while (true)
{
old = pg_atomic_read_u64_impl(ptr);
static inline uint64
pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
{
- uint64 old;
-
+ uint64 old;
while (true)
{
old = pg_atomic_read_u64_impl(ptr);
}
#endif
-#endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
+#endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
-#endif /* defined(PG_USE_INLINE) ||
- * defined(ATOMICS_INCLUDE_DEFINITIONS) */
+#endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
#list of file patterns to exclude from pg_indent runs
/s_lock\.h$
+/atomics/
/ecpg/test/expected/
/snowball/libstemmer/
/pl/plperl/ppport\.h$