@@ -1657,8 +1657,8 @@ ReleaseAndReadBuffer(Buffer buffer,
1657
1657
{
1658
1658
bufHdr = GetLocalBufferDescriptor (- buffer - 1 );
1659
1659
if (bufHdr -> tag .blockNum == blockNum &&
1660
- RelFileLocatorEquals ( bufHdr -> tag . rlocator , relation -> rd_locator ) &&
1661
- bufHdr -> tag . forkNum == forkNum )
1660
+ BufTagMatchesRelFileLocator ( & bufHdr -> tag , & relation -> rd_locator ) &&
1661
+ BufTagGetForkNum ( & bufHdr -> tag ) == forkNum )
1662
1662
return buffer ;
1663
1663
ResourceOwnerForgetBuffer (CurrentResourceOwner , buffer );
1664
1664
LocalRefCount [- buffer - 1 ]-- ;
@@ -1668,8 +1668,8 @@ ReleaseAndReadBuffer(Buffer buffer,
1668
1668
bufHdr = GetBufferDescriptor (buffer - 1 );
1669
1669
/* we have pin, so it's ok to examine tag without spinlock */
1670
1670
if (bufHdr -> tag .blockNum == blockNum &&
1671
- RelFileLocatorEquals ( bufHdr -> tag . rlocator , relation -> rd_locator ) &&
1672
- bufHdr -> tag . forkNum == forkNum )
1671
+ BufTagMatchesRelFileLocator ( & bufHdr -> tag , & relation -> rd_locator ) &&
1672
+ BufTagGetForkNum ( & bufHdr -> tag ) == forkNum )
1673
1673
return buffer ;
1674
1674
UnpinBuffer (bufHdr , true);
1675
1675
}
@@ -2010,9 +2010,9 @@ BufferSync(int flags)
2010
2010
2011
2011
item = & CkptBufferIds [num_to_scan ++ ];
2012
2012
item -> buf_id = buf_id ;
2013
- item -> tsId = bufHdr -> tag .rlocator . spcOid ;
2014
- item -> relNumber = bufHdr -> tag . rlocator . relNumber ;
2015
- item -> forkNum = bufHdr -> tag . forkNum ;
2013
+ item -> tsId = bufHdr -> tag .spcOid ;
2014
+ item -> relNumber = BufTagGetRelNumber ( & bufHdr -> tag ) ;
2015
+ item -> forkNum = BufTagGetForkNum ( & bufHdr -> tag ) ;
2016
2016
item -> blockNum = bufHdr -> tag .blockNum ;
2017
2017
}
2018
2018
@@ -2718,7 +2718,8 @@ PrintBufferLeakWarning(Buffer buffer)
2718
2718
}
2719
2719
2720
2720
/* theoretically we should lock the bufhdr here */
2721
- path = relpathbackend (buf -> tag .rlocator , backend , buf -> tag .forkNum );
2721
+ path = relpathbackend (BufTagGetRelFileLocator (& buf -> tag ), backend ,
2722
+ BufTagGetForkNum (& buf -> tag ));
2722
2723
buf_state = pg_atomic_read_u32 (& buf -> state );
2723
2724
elog (WARNING ,
2724
2725
"buffer refcount leak: [%03d] "
@@ -2797,8 +2798,8 @@ BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
2797
2798
bufHdr = GetBufferDescriptor (buffer - 1 );
2798
2799
2799
2800
/* pinned, so OK to read tag without spinlock */
2800
- * rlocator = bufHdr -> tag . rlocator ;
2801
- * forknum = bufHdr -> tag . forkNum ;
2801
+ * rlocator = BufTagGetRelFileLocator ( & bufHdr -> tag ) ;
2802
+ * forknum = BufTagGetForkNum ( & bufHdr -> tag ) ;
2802
2803
* blknum = bufHdr -> tag .blockNum ;
2803
2804
}
2804
2805
@@ -2848,9 +2849,9 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
2848
2849
2849
2850
/* Find smgr relation for buffer */
2850
2851
if (reln == NULL )
2851
- reln = smgropen (buf -> tag . rlocator , InvalidBackendId );
2852
+ reln = smgropen (BufTagGetRelFileLocator ( & buf -> tag ) , InvalidBackendId );
2852
2853
2853
- TRACE_POSTGRESQL_BUFFER_FLUSH_START (buf -> tag . forkNum ,
2854
+ TRACE_POSTGRESQL_BUFFER_FLUSH_START (BufTagGetForkNum ( & buf -> tag ) ,
2854
2855
buf -> tag .blockNum ,
2855
2856
reln -> smgr_rlocator .locator .spcOid ,
2856
2857
reln -> smgr_rlocator .locator .dbOid ,
@@ -2909,7 +2910,7 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
2909
2910
* bufToWrite is either the shared buffer or a copy, as appropriate.
2910
2911
*/
2911
2912
smgrwrite (reln ,
2912
- buf -> tag . forkNum ,
2913
+ BufTagGetForkNum ( & buf -> tag ) ,
2913
2914
buf -> tag .blockNum ,
2914
2915
bufToWrite ,
2915
2916
false);
@@ -2930,7 +2931,7 @@ FlushBuffer(BufferDesc *buf, SMgrRelation reln)
2930
2931
*/
2931
2932
TerminateBufferIO (buf , true, 0 );
2932
2933
2933
- TRACE_POSTGRESQL_BUFFER_FLUSH_DONE (buf -> tag . forkNum ,
2934
+ TRACE_POSTGRESQL_BUFFER_FLUSH_DONE (BufTagGetForkNum ( & buf -> tag ) ,
2934
2935
buf -> tag .blockNum ,
2935
2936
reln -> smgr_rlocator .locator .spcOid ,
2936
2937
reln -> smgr_rlocator .locator .dbOid ,
@@ -3151,15 +3152,15 @@ DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
3151
3152
* We could check forkNum and blockNum as well as the rlocator, but
3152
3153
* the incremental win from doing so seems small.
3153
3154
*/
3154
- if (!RelFileLocatorEquals ( bufHdr -> tag . rlocator , rlocator .locator ))
3155
+ if (!BufTagMatchesRelFileLocator ( & bufHdr -> tag , & rlocator .locator ))
3155
3156
continue ;
3156
3157
3157
3158
buf_state = LockBufHdr (bufHdr );
3158
3159
3159
3160
for (j = 0 ; j < nforks ; j ++ )
3160
3161
{
3161
- if (RelFileLocatorEquals ( bufHdr -> tag . rlocator , rlocator .locator ) &&
3162
- bufHdr -> tag . forkNum == forkNum [j ] &&
3162
+ if (BufTagMatchesRelFileLocator ( & bufHdr -> tag , & rlocator .locator ) &&
3163
+ BufTagGetForkNum ( & bufHdr -> tag ) == forkNum [j ] &&
3163
3164
bufHdr -> tag .blockNum >= firstDelBlock [j ])
3164
3165
{
3165
3166
InvalidateBuffer (bufHdr ); /* releases spinlock */
@@ -3309,7 +3310,7 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
3309
3310
3310
3311
for (j = 0 ; j < n ; j ++ )
3311
3312
{
3312
- if (RelFileLocatorEquals ( bufHdr -> tag . rlocator , locators [j ]))
3313
+ if (BufTagMatchesRelFileLocator ( & bufHdr -> tag , & locators [j ]))
3313
3314
{
3314
3315
rlocator = & locators [j ];
3315
3316
break ;
@@ -3318,7 +3319,10 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
3318
3319
}
3319
3320
else
3320
3321
{
3321
- rlocator = bsearch ((const void * ) & (bufHdr -> tag .rlocator ),
3322
+ RelFileLocator locator ;
3323
+
3324
+ locator = BufTagGetRelFileLocator (& bufHdr -> tag );
3325
+ rlocator = bsearch ((const void * ) & (locator ),
3322
3326
locators , n , sizeof (RelFileLocator ),
3323
3327
rlocator_comparator );
3324
3328
}
@@ -3328,7 +3332,7 @@ DropRelationsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
3328
3332
continue ;
3329
3333
3330
3334
buf_state = LockBufHdr (bufHdr );
3331
- if (RelFileLocatorEquals ( bufHdr -> tag . rlocator , ( * rlocator ) ))
3335
+ if (BufTagMatchesRelFileLocator ( & bufHdr -> tag , rlocator ))
3332
3336
InvalidateBuffer (bufHdr ); /* releases spinlock */
3333
3337
else
3334
3338
UnlockBufHdr (bufHdr , buf_state );
@@ -3388,8 +3392,8 @@ FindAndDropRelationBuffers(RelFileLocator rlocator, ForkNumber forkNum,
3388
3392
*/
3389
3393
buf_state = LockBufHdr (bufHdr );
3390
3394
3391
- if (RelFileLocatorEquals ( bufHdr -> tag . rlocator , rlocator ) &&
3392
- bufHdr -> tag . forkNum == forkNum &&
3395
+ if (BufTagMatchesRelFileLocator ( & bufHdr -> tag , & rlocator ) &&
3396
+ BufTagGetForkNum ( & bufHdr -> tag ) == forkNum &&
3393
3397
bufHdr -> tag .blockNum >= firstDelBlock )
3394
3398
InvalidateBuffer (bufHdr ); /* releases spinlock */
3395
3399
else
@@ -3427,11 +3431,11 @@ DropDatabaseBuffers(Oid dbid)
3427
3431
* As in DropRelationBuffers, an unlocked precheck should be
3428
3432
* safe and saves some cycles.
3429
3433
*/
3430
- if (bufHdr -> tag .rlocator . dbOid != dbid )
3434
+ if (bufHdr -> tag .dbOid != dbid )
3431
3435
continue ;
3432
3436
3433
3437
buf_state = LockBufHdr (bufHdr );
3434
- if (bufHdr -> tag .rlocator . dbOid == dbid )
3438
+ if (bufHdr -> tag .dbOid == dbid )
3435
3439
InvalidateBuffer (bufHdr ); /* releases spinlock */
3436
3440
else
3437
3441
UnlockBufHdr (bufHdr , buf_state );
@@ -3461,7 +3465,8 @@ PrintBufferDescs(void)
3461
3465
"[%02d] (freeNext=%d, rel=%s, "
3462
3466
"blockNum=%u, flags=0x%x, refcount=%u %d)" ,
3463
3467
i , buf -> freeNext ,
3464
- relpathbackend (buf -> tag .rlocator , InvalidBackendId , buf -> tag .forkNum ),
3468
+ relpathbackend (BufTagGetRelFileLocator (& buf -> tag ),
3469
+ InvalidBackendId , BufTagGetForkNum (& buf -> tag )),
3465
3470
buf -> tag .blockNum , buf -> flags ,
3466
3471
buf -> refcount , GetPrivateRefCount (b ));
3467
3472
}
@@ -3486,7 +3491,8 @@ PrintPinnedBufs(void)
3486
3491
"[%02d] (freeNext=%d, rel=%s, "
3487
3492
"blockNum=%u, flags=0x%x, refcount=%u %d)" ,
3488
3493
i , buf -> freeNext ,
3489
- relpathperm (buf -> tag .rlocator , buf -> tag .forkNum ),
3494
+ relpathperm (BufTagGetRelFileLocator (& buf -> tag ),
3495
+ BufTagGetForkNum (& buf -> tag )),
3490
3496
buf -> tag .blockNum , buf -> flags ,
3491
3497
buf -> refcount , GetPrivateRefCount (b ));
3492
3498
}
@@ -3525,7 +3531,7 @@ FlushRelationBuffers(Relation rel)
3525
3531
uint32 buf_state ;
3526
3532
3527
3533
bufHdr = GetLocalBufferDescriptor (i );
3528
- if (RelFileLocatorEquals ( bufHdr -> tag . rlocator , rel -> rd_locator ) &&
3534
+ if (BufTagMatchesRelFileLocator ( & bufHdr -> tag , & rel -> rd_locator ) &&
3529
3535
((buf_state = pg_atomic_read_u32 (& bufHdr -> state )) &
3530
3536
(BM_VALID | BM_DIRTY )) == (BM_VALID | BM_DIRTY ))
3531
3537
{
@@ -3543,7 +3549,7 @@ FlushRelationBuffers(Relation rel)
3543
3549
PageSetChecksumInplace (localpage , bufHdr -> tag .blockNum );
3544
3550
3545
3551
smgrwrite (RelationGetSmgr (rel ),
3546
- bufHdr -> tag . forkNum ,
3552
+ BufTagGetForkNum ( & bufHdr -> tag ) ,
3547
3553
bufHdr -> tag .blockNum ,
3548
3554
localpage ,
3549
3555
false);
@@ -3572,13 +3578,13 @@ FlushRelationBuffers(Relation rel)
3572
3578
* As in DropRelationBuffers, an unlocked precheck should be
3573
3579
* safe and saves some cycles.
3574
3580
*/
3575
- if (!RelFileLocatorEquals ( bufHdr -> tag . rlocator , rel -> rd_locator ))
3581
+ if (!BufTagMatchesRelFileLocator ( & bufHdr -> tag , & rel -> rd_locator ))
3576
3582
continue ;
3577
3583
3578
3584
ReservePrivateRefCountEntry ();
3579
3585
3580
3586
buf_state = LockBufHdr (bufHdr );
3581
- if (RelFileLocatorEquals ( bufHdr -> tag . rlocator , rel -> rd_locator ) &&
3587
+ if (BufTagMatchesRelFileLocator ( & bufHdr -> tag , & rel -> rd_locator ) &&
3582
3588
(buf_state & (BM_VALID | BM_DIRTY )) == (BM_VALID | BM_DIRTY ))
3583
3589
{
3584
3590
PinBuffer_Locked (bufHdr );
@@ -3652,7 +3658,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
3652
3658
3653
3659
for (j = 0 ; j < nrels ; j ++ )
3654
3660
{
3655
- if (RelFileLocatorEquals ( bufHdr -> tag . rlocator , srels [j ].rlocator ))
3661
+ if (BufTagMatchesRelFileLocator ( & bufHdr -> tag , & srels [j ].rlocator ))
3656
3662
{
3657
3663
srelent = & srels [j ];
3658
3664
break ;
@@ -3661,7 +3667,10 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
3661
3667
}
3662
3668
else
3663
3669
{
3664
- srelent = bsearch ((const void * ) & (bufHdr -> tag .rlocator ),
3670
+ RelFileLocator rlocator ;
3671
+
3672
+ rlocator = BufTagGetRelFileLocator (& bufHdr -> tag );
3673
+ srelent = bsearch ((const void * ) & (rlocator ),
3665
3674
srels , nrels , sizeof (SMgrSortArray ),
3666
3675
rlocator_comparator );
3667
3676
}
@@ -3673,7 +3682,7 @@ FlushRelationsAllBuffers(SMgrRelation *smgrs, int nrels)
3673
3682
ReservePrivateRefCountEntry ();
3674
3683
3675
3684
buf_state = LockBufHdr (bufHdr );
3676
- if (RelFileLocatorEquals ( bufHdr -> tag . rlocator , srelent -> rlocator ) &&
3685
+ if (BufTagMatchesRelFileLocator ( & bufHdr -> tag , & srelent -> rlocator ) &&
3677
3686
(buf_state & (BM_VALID | BM_DIRTY )) == (BM_VALID | BM_DIRTY ))
3678
3687
{
3679
3688
PinBuffer_Locked (bufHdr );
@@ -3877,13 +3886,13 @@ FlushDatabaseBuffers(Oid dbid)
3877
3886
* As in DropRelationBuffers, an unlocked precheck should be
3878
3887
* safe and saves some cycles.
3879
3888
*/
3880
- if (bufHdr -> tag .rlocator . dbOid != dbid )
3889
+ if (bufHdr -> tag .dbOid != dbid )
3881
3890
continue ;
3882
3891
3883
3892
ReservePrivateRefCountEntry ();
3884
3893
3885
3894
buf_state = LockBufHdr (bufHdr );
3886
- if (bufHdr -> tag .rlocator . dbOid == dbid &&
3895
+ if (bufHdr -> tag .dbOid == dbid &&
3887
3896
(buf_state & (BM_VALID | BM_DIRTY )) == (BM_VALID | BM_DIRTY ))
3888
3897
{
3889
3898
PinBuffer_Locked (bufHdr );
@@ -4052,7 +4061,7 @@ MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
4052
4061
* See src/backend/storage/page/README for longer discussion.
4053
4062
*/
4054
4063
if (RecoveryInProgress () ||
4055
- RelFileLocatorSkippingWAL (bufHdr -> tag . rlocator ))
4064
+ RelFileLocatorSkippingWAL (BufTagGetRelFileLocator ( & bufHdr -> tag ) ))
4056
4065
return ;
4057
4066
4058
4067
/*
@@ -4661,7 +4670,8 @@ AbortBufferIO(void)
4661
4670
/* Buffer is pinned, so we can read tag without spinlock */
4662
4671
char * path ;
4663
4672
4664
- path = relpathperm (buf -> tag .rlocator , buf -> tag .forkNum );
4673
+ path = relpathperm (BufTagGetRelFileLocator (& buf -> tag ),
4674
+ BufTagGetForkNum (& buf -> tag ));
4665
4675
ereport (WARNING ,
4666
4676
(errcode (ERRCODE_IO_ERROR ),
4667
4677
errmsg ("could not write block %u of %s" ,
@@ -4685,7 +4695,8 @@ shared_buffer_write_error_callback(void *arg)
4685
4695
/* Buffer is pinned, so we can read the tag without locking the spinlock */
4686
4696
if (bufHdr != NULL )
4687
4697
{
4688
- char * path = relpathperm (bufHdr -> tag .rlocator , bufHdr -> tag .forkNum );
4698
+ char * path = relpathperm (BufTagGetRelFileLocator (& bufHdr -> tag ),
4699
+ BufTagGetForkNum (& bufHdr -> tag ));
4689
4700
4690
4701
errcontext ("writing block %u of relation %s" ,
4691
4702
bufHdr -> tag .blockNum , path );
@@ -4703,8 +4714,9 @@ local_buffer_write_error_callback(void *arg)
4703
4714
4704
4715
if (bufHdr != NULL )
4705
4716
{
4706
- char * path = relpathbackend (bufHdr -> tag .rlocator , MyBackendId ,
4707
- bufHdr -> tag .forkNum );
4717
+ char * path = relpathbackend (BufTagGetRelFileLocator (& bufHdr -> tag ),
4718
+ MyBackendId ,
4719
+ BufTagGetForkNum (& bufHdr -> tag ));
4708
4720
4709
4721
errcontext ("writing block %u of relation %s" ,
4710
4722
bufHdr -> tag .blockNum , path );
@@ -4798,15 +4810,20 @@ static inline int
4798
4810
buffertag_comparator (const BufferTag * ba , const BufferTag * bb )
4799
4811
{
4800
4812
int ret ;
4813
+ RelFileLocator rlocatora ;
4814
+ RelFileLocator rlocatorb ;
4801
4815
4802
- ret = rlocator_comparator (& ba -> rlocator , & bb -> rlocator );
4816
+ rlocatora = BufTagGetRelFileLocator (ba );
4817
+ rlocatorb = BufTagGetRelFileLocator (bb );
4818
+
4819
+ ret = rlocator_comparator (& rlocatora , & rlocatorb );
4803
4820
4804
4821
if (ret != 0 )
4805
4822
return ret ;
4806
4823
4807
- if (ba -> forkNum < bb -> forkNum )
4824
+ if (BufTagGetForkNum ( ba ) < BufTagGetForkNum ( bb ) )
4808
4825
return -1 ;
4809
- if (ba -> forkNum > bb -> forkNum )
4826
+ if (BufTagGetForkNum ( ba ) > BufTagGetForkNum ( bb ) )
4810
4827
return 1 ;
4811
4828
4812
4829
if (ba -> blockNum < bb -> blockNum )
@@ -4956,22 +4973,26 @@ IssuePendingWritebacks(WritebackContext *context)
4956
4973
SMgrRelation reln ;
4957
4974
int ahead ;
4958
4975
BufferTag tag ;
4976
+ RelFileLocator currlocator ;
4959
4977
Size nblocks = 1 ;
4960
4978
4961
4979
cur = & context -> pending_writebacks [i ];
4962
4980
tag = cur -> tag ;
4981
+ currlocator = BufTagGetRelFileLocator (& tag );
4963
4982
4964
4983
/*
4965
4984
* Peek ahead, into following writeback requests, to see if they can
4966
4985
* be combined with the current one.
4967
4986
*/
4968
4987
for (ahead = 0 ; i + ahead + 1 < context -> nr_pending ; ahead ++ )
4969
4988
{
4989
+
4970
4990
next = & context -> pending_writebacks [i + ahead + 1 ];
4971
4991
4972
4992
/* different file, stop */
4973
- if (!RelFileLocatorEquals (cur -> tag .rlocator , next -> tag .rlocator ) ||
4974
- cur -> tag .forkNum != next -> tag .forkNum )
4993
+ if (!RelFileLocatorEquals (currlocator ,
4994
+ BufTagGetRelFileLocator (& next -> tag )) ||
4995
+ BufTagGetForkNum (& cur -> tag ) != BufTagGetForkNum (& next -> tag ))
4975
4996
break ;
4976
4997
4977
4998
/* ok, block queued twice, skip */
@@ -4989,8 +5010,8 @@ IssuePendingWritebacks(WritebackContext *context)
4989
5010
i += ahead ;
4990
5011
4991
5012
/* and finally tell the kernel to write the data to storage */
4992
- reln = smgropen (tag . rlocator , InvalidBackendId );
4993
- smgrwriteback (reln , tag . forkNum , tag .blockNum , nblocks );
5013
+ reln = smgropen (currlocator , InvalidBackendId );
5014
+ smgrwriteback (reln , BufTagGetForkNum ( & tag ) , tag .blockNum , nblocks );
4994
5015
}
4995
5016
4996
5017
context -> nr_pending = 0 ;
0 commit comments