Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit 71e4cc6

Browse files
committed
Optimize WAL insertion lock acquisition and release with some atomics
The WAL insertion lock variable insertingAt is currently being read and written with the help of the LWLock wait list lock to avoid any read of torn values. This wait list lock can become a point of contention on a highly concurrent write workloads. This commit switches insertingAt to a 64b atomic variable that provides torn-free reads/writes. On platforms without 64b atomic support, the fallback implementation uses spinlocks to provide the same guarantees for the values read. LWLockWaitForVar(), through LWLockConflictsWithVar(), reads the new value to check if it still needs to wait with a u64 atomic operation. LWLockUpdateVar() updates the variable before waking up the waiters with an exchange_u64 (full memory barrier). LWLockReleaseClearVar() now uses also an exchange_u64 to reset the variable. Before this commit, all these steps relied on LWLockWaitListLock() and LWLockWaitListUnlock(). This reduces contention on LWLock wait list lock and improves performance of highly-concurrent write workloads. Here are some numbers using pg_logical_emit_message() (HEAD at d6677b9) with various arbitrary record lengths and clients up to 1k on a rather-large machine (64 vCPUs, 512GB of RAM, 16 cores per sockets, 2 sockets), in terms of TPS numbers coming from pgbench: message_size_b | 16 | 64 | 256 | 1024 --------------------+--------+--------+--------+------- patch_4_clients | 83830 | 82929 | 80478 | 73131 patch_16_clients | 267655 | 264973 | 250566 | 213985 patch_64_clients | 380423 | 378318 | 356907 | 294248 patch_256_clients | 360915 | 354436 | 326209 | 263664 patch_512_clients | 332654 | 321199 | 287521 | 240128 patch_1024_clients | 288263 | 276614 | 258220 | 217063 patch_2048_clients | 252280 | 243558 | 230062 | 192429 patch_4096_clients | 212566 | 213654 | 205951 | 166955 head_4_clients | 83686 | 83766 | 81233 | 73749 head_16_clients | 266503 | 265546 | 249261 | 213645 head_64_clients | 366122 | 363462 | 341078 | 261707 head_256_clients | 132600 | 132573 | 134392 | 165799 head_512_clients | 118937 | 114332 | 116860 | 150672 head_1024_clients | 133546 | 115256 | 125236 | 151390 head_2048_clients | 137877 | 117802 | 120909 | 138165 head_4096_clients | 113440 | 115611 | 120635 | 114361 Bharath has been measuring similar improvements, where the limit of the WAL insertion lock begins to be felt when more than 256 concurrent clients are involved in this specific workload. An extra patch has been discussed to introduce a fast-exit path in LWLockUpdateVar() when there are no waiters, still this does not influence the write-heavy workload cases discussed as there are always waiters. This will be considered separately. Author: Bharath Rupireddy Reviewed-by: Nathan Bossart, Andres Freund, Michael Paquier Discussion: https://postgr.es/m/CALj2ACVF+6jLvqKe6xhDzCCkr=rfd6upaGc3477Pji1Ke9G7Bg@mail.gmail.com
1 parent d38ad8e commit 71e4cc6

File tree

3 files changed

+26
-30
lines changed

3 files changed

+26
-30
lines changed

src/backend/access/transam/xlog.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -376,7 +376,7 @@ typedef struct XLogwrtResult
376376
typedef struct
377377
{
378378
LWLock lock;
379-
XLogRecPtr insertingAt;
379+
pg_atomic_uint64 insertingAt;
380380
XLogRecPtr lastImportantAt;
381381
} WALInsertLock;
382382

@@ -4611,7 +4611,7 @@ XLOGShmemInit(void)
46114611
for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
46124612
{
46134613
LWLockInitialize(&WALInsertLocks[i].l.lock, LWTRANCHE_WAL_INSERT);
4614-
WALInsertLocks[i].l.insertingAt = InvalidXLogRecPtr;
4614+
pg_atomic_init_u64(&WALInsertLocks[i].l.insertingAt, InvalidXLogRecPtr);
46154615
WALInsertLocks[i].l.lastImportantAt = InvalidXLogRecPtr;
46164616
}
46174617

src/backend/storage/lmgr/lwlock.c

+21-25
Original file line numberDiff line numberDiff line change
@@ -1547,9 +1547,8 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
15471547
* *result is set to true if the lock was free, and false otherwise.
15481548
*/
15491549
static bool
1550-
LWLockConflictsWithVar(LWLock *lock,
1551-
uint64 *valptr, uint64 oldval, uint64 *newval,
1552-
bool *result)
1550+
LWLockConflictsWithVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
1551+
uint64 *newval, bool *result)
15531552
{
15541553
bool mustwait;
15551554
uint64 value;
@@ -1572,13 +1571,10 @@ LWLockConflictsWithVar(LWLock *lock,
15721571
*result = false;
15731572

15741573
/*
1575-
* Read value using the lwlock's wait list lock, as we can't generally
1576-
* rely on atomic 64 bit reads/stores. TODO: On platforms with a way to
1577-
* do atomic 64 bit reads/writes the spinlock should be optimized away.
1574+
* Reading this value atomically is safe even on platforms where uint64
1575+
* cannot be read without observing a torn value.
15781576
*/
1579-
LWLockWaitListLock(lock);
1580-
value = *valptr;
1581-
LWLockWaitListUnlock(lock);
1577+
value = pg_atomic_read_u64(valptr);
15821578

15831579
if (value != oldval)
15841580
{
@@ -1607,7 +1603,8 @@ LWLockConflictsWithVar(LWLock *lock,
16071603
* in shared mode, returns 'true'.
16081604
*/
16091605
bool
1610-
LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
1606+
LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval,
1607+
uint64 *newval)
16111608
{
16121609
PGPROC *proc = MyProc;
16131610
int extraWaits = 0;
@@ -1735,29 +1732,32 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval)
17351732
* LWLockUpdateVar - Update a variable and wake up waiters atomically
17361733
*
17371734
* Sets *valptr to 'val', and wakes up all processes waiting for us with
1738-
* LWLockWaitForVar(). Setting the value and waking up the processes happen
1739-
* atomically so that any process calling LWLockWaitForVar() on the same lock
1740-
* is guaranteed to see the new value, and act accordingly.
1735+
* LWLockWaitForVar(). It first sets the value atomically and then wakes up
1736+
* waiting processes so that any process calling LWLockWaitForVar() on the same
1737+
* lock is guaranteed to see the new value, and act accordingly.
17411738
*
17421739
* The caller must be holding the lock in exclusive mode.
17431740
*/
17441741
void
1745-
LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val)
1742+
LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
17461743
{
17471744
proclist_head wakeup;
17481745
proclist_mutable_iter iter;
17491746

17501747
PRINT_LWDEBUG("LWLockUpdateVar", lock, LW_EXCLUSIVE);
17511748

1749+
/*
1750+
* Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1751+
* that the variable is updated before waking up waiters.
1752+
*/
1753+
pg_atomic_exchange_u64(valptr, val);
1754+
17521755
proclist_init(&wakeup);
17531756

17541757
LWLockWaitListLock(lock);
17551758

17561759
Assert(pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE);
17571760

1758-
/* Update the lock's value */
1759-
*valptr = val;
1760-
17611761
/*
17621762
* See if there are any LW_WAIT_UNTIL_FREE waiters that need to be woken
17631763
* up. They are always in the front of the queue.
@@ -1873,17 +1873,13 @@ LWLockRelease(LWLock *lock)
18731873
* LWLockReleaseClearVar - release a previously acquired lock, reset variable
18741874
*/
18751875
void
1876-
LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val)
1876+
LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
18771877
{
1878-
LWLockWaitListLock(lock);
1879-
18801878
/*
1881-
* Set the variable's value before releasing the lock, that prevents race
1882-
* a race condition wherein a new locker acquires the lock, but hasn't yet
1883-
* set the variables value.
1879+
* Note that pg_atomic_exchange_u64 is a full barrier, so we're guaranteed
1880+
* that the variable is updated before releasing the lock.
18841881
*/
1885-
*valptr = val;
1886-
LWLockWaitListUnlock(lock);
1882+
pg_atomic_exchange_u64(valptr, val);
18871883

18881884
LWLockRelease(lock);
18891885
}

src/include/storage/lwlock.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -129,14 +129,14 @@ extern bool LWLockAcquire(LWLock *lock, LWLockMode mode);
129129
extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode);
130130
extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode);
131131
extern void LWLockRelease(LWLock *lock);
132-
extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val);
132+
extern void LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val);
133133
extern void LWLockReleaseAll(void);
134134
extern bool LWLockHeldByMe(LWLock *lock);
135135
extern bool LWLockAnyHeldByMe(LWLock *lock, int nlocks, size_t stride);
136136
extern bool LWLockHeldByMeInMode(LWLock *lock, LWLockMode mode);
137137

138-
extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval);
139-
extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 val);
138+
extern bool LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval);
139+
extern void LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val);
140140

141141
extern Size LWLockShmemSize(void);
142142
extern void CreateLWLocks(void);

0 commit comments

Comments
 (0)