@@ -501,7 +501,7 @@ typedef struct XLogCtlData
501
501
* WALBufMappingLock.
502
502
*/
503
503
char * pages ; /* buffers for unwritten XLOG pages */
504
- XLogRecPtr * xlblocks ; /* 1st byte ptr-s + XLOG_BLCKSZ */
504
+ pg_atomic_uint64 * xlblocks ; /* 1st byte ptr-s + XLOG_BLCKSZ */
505
505
int XLogCacheBlck ; /* highest allocated xlog buffer index */
506
506
507
507
/*
@@ -1636,20 +1636,16 @@ GetXLogBuffer(XLogRecPtr ptr, TimeLineID tli)
1636
1636
* out to disk and evicted, and the caller is responsible for making sure
1637
1637
* that doesn't happen.
1638
1638
*
1639
- * However, we don't hold a lock while we read the value. If someone has
1640
- * just initialized the page, it's possible that we get a "torn read" of
1641
- * the XLogRecPtr if 64-bit fetches are not atomic on this platform. In
1642
- * that case we will see a bogus value. That's ok, we'll grab the mapping
1643
- * lock (in AdvanceXLInsertBuffer) and retry if we see anything else than
1644
- * the page we're looking for. But it means that when we do this unlocked
1645
- * read, we might see a value that appears to be ahead of the page we're
1646
- * looking for. Don't PANIC on that, until we've verified the value while
1647
- * holding the lock.
1639
+ * We don't hold a lock while we read the value. If someone is just about
1640
+ * to initialize or has just initialized the page, it's possible that we
1641
+ * get InvalidXLogRecPtr. That's ok, we'll grab the mapping lock (in
1642
+ * AdvanceXLInsertBuffer) and retry if we see anything other than the page
1643
+ * we're looking for.
1648
1644
*/
1649
1645
expectedEndPtr = ptr ;
1650
1646
expectedEndPtr += XLOG_BLCKSZ - ptr % XLOG_BLCKSZ ;
1651
1647
1652
- endptr = XLogCtl -> xlblocks [idx ];
1648
+ endptr = pg_atomic_read_u64 ( & XLogCtl -> xlblocks [idx ]) ;
1653
1649
if (expectedEndPtr != endptr )
1654
1650
{
1655
1651
XLogRecPtr initializedUpto ;
@@ -1680,7 +1676,7 @@ GetXLogBuffer(XLogRecPtr ptr, TimeLineID tli)
1680
1676
WALInsertLockUpdateInsertingAt (initializedUpto );
1681
1677
1682
1678
AdvanceXLInsertBuffer (ptr , tli , false);
1683
- endptr = XLogCtl -> xlblocks [idx ];
1679
+ endptr = pg_atomic_read_u64 ( & XLogCtl -> xlblocks [idx ]) ;
1684
1680
1685
1681
if (expectedEndPtr != endptr )
1686
1682
elog (PANIC , "could not find WAL buffer for %X/%X" ,
@@ -1867,7 +1863,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
1867
1863
* be zero if the buffer hasn't been used yet). Fall through if it's
1868
1864
* already written out.
1869
1865
*/
1870
- OldPageRqstPtr = XLogCtl -> xlblocks [nextidx ];
1866
+ OldPageRqstPtr = pg_atomic_read_u64 ( & XLogCtl -> xlblocks [nextidx ]) ;
1871
1867
if (LogwrtResult .Write < OldPageRqstPtr )
1872
1868
{
1873
1869
/*
@@ -1989,8 +1985,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
1989
1985
*/
1990
1986
pg_write_barrier ();
1991
1987
1992
- * ((volatile XLogRecPtr * ) & XLogCtl -> xlblocks [nextidx ]) = NewPageEndPtr ;
1993
-
1988
+ pg_atomic_write_u64 (& XLogCtl -> xlblocks [nextidx ], NewPageEndPtr );
1994
1989
XLogCtl -> InitializedUpTo = NewPageEndPtr ;
1995
1990
1996
1991
npages ++ ;
@@ -2208,7 +2203,7 @@ XLogWrite(XLogwrtRqst WriteRqst, TimeLineID tli, bool flexible)
2208
2203
* if we're passed a bogus WriteRqst.Write that is past the end of the
2209
2204
* last page that's been initialized by AdvanceXLInsertBuffer.
2210
2205
*/
2211
- XLogRecPtr EndPtr = XLogCtl -> xlblocks [curridx ];
2206
+ XLogRecPtr EndPtr = pg_atomic_read_u64 ( & XLogCtl -> xlblocks [curridx ]) ;
2212
2207
2213
2208
if (LogwrtResult .Write >= EndPtr )
2214
2209
elog (PANIC , "xlog write request %X/%X is past end of log %X/%X" ,
@@ -4632,7 +4627,7 @@ XLOGShmemSize(void)
4632
4627
/* WAL insertion locks, plus alignment */
4633
4628
size = add_size (size , mul_size (sizeof (WALInsertLockPadded ), NUM_XLOGINSERT_LOCKS + 1 ));
4634
4629
/* xlblocks array */
4635
- size = add_size (size , mul_size (sizeof (XLogRecPtr ), XLOGbuffers ));
4630
+ size = add_size (size , mul_size (sizeof (pg_atomic_uint64 ), XLOGbuffers ));
4636
4631
/* extra alignment padding for XLOG I/O buffers */
4637
4632
size = add_size (size , Max (XLOG_BLCKSZ , PG_IO_ALIGN_SIZE ));
4638
4633
/* and the buffers themselves */
@@ -4710,10 +4705,13 @@ XLOGShmemInit(void)
4710
4705
* needed here.
4711
4706
*/
4712
4707
allocptr = ((char * ) XLogCtl ) + sizeof (XLogCtlData );
4713
- XLogCtl -> xlblocks = (XLogRecPtr * ) allocptr ;
4714
- memset (XLogCtl -> xlblocks , 0 , sizeof (XLogRecPtr ) * XLOGbuffers );
4715
- allocptr += sizeof (XLogRecPtr ) * XLOGbuffers ;
4708
+ XLogCtl -> xlblocks = (pg_atomic_uint64 * ) allocptr ;
4709
+ allocptr += sizeof (pg_atomic_uint64 ) * XLOGbuffers ;
4716
4710
4711
+ for (i = 0 ; i < XLOGbuffers ; i ++ )
4712
+ {
4713
+ pg_atomic_init_u64 (& XLogCtl -> xlblocks [i ], InvalidXLogRecPtr );
4714
+ }
4717
4715
4718
4716
/* WAL insertion locks. Ensure they're aligned to the full padded size */
4719
4717
allocptr += sizeof (WALInsertLockPadded ) -
@@ -5750,7 +5748,7 @@ StartupXLOG(void)
5750
5748
memcpy (page , endOfRecoveryInfo -> lastPage , len );
5751
5749
memset (page + len , 0 , XLOG_BLCKSZ - len );
5752
5750
5753
- XLogCtl -> xlblocks [firstIdx ] = endOfRecoveryInfo -> lastPageBeginPtr + XLOG_BLCKSZ ;
5751
+ pg_atomic_write_u64 ( & XLogCtl -> xlblocks [firstIdx ], endOfRecoveryInfo -> lastPageBeginPtr + XLOG_BLCKSZ ) ;
5754
5752
XLogCtl -> InitializedUpTo = endOfRecoveryInfo -> lastPageBeginPtr + XLOG_BLCKSZ ;
5755
5753
}
5756
5754
else
0 commit comments