@@ -83,6 +83,11 @@ static Bitmapset *HeapDetermineColumnsInfo(Relation relation,
83
83
static bool heap_acquire_tuplock (Relation relation , ItemPointer tid ,
84
84
LockTupleMode mode , LockWaitPolicy wait_policy ,
85
85
bool * have_tuple_lock );
86
+ static inline BlockNumber heapgettup_advance_block (HeapScanDesc scan ,
87
+ BlockNumber block ,
88
+ ScanDirection dir );
89
+ static pg_noinline BlockNumber heapgettup_initial_block (HeapScanDesc scan ,
90
+ ScanDirection dir );
86
91
static void compute_new_xmax_infomask (TransactionId xmax , uint16 old_infomask ,
87
92
uint16 old_infomask2 , TransactionId add_to_xmax ,
88
93
LockTupleMode mode , bool is_update ,
@@ -455,16 +460,14 @@ heap_prepare_pagescan(TableScanDesc sscan)
455
460
}
456
461
457
462
/*
458
- * heapfetchbuf - read and pin the given MAIN_FORKNUM block number .
463
+ * heap_fetch_next_buffer - read and pin the next block from MAIN_FORKNUM .
459
464
*
460
- * Read the specified block of the scan relation into a buffer and pin that
461
- * buffer before saving it in the scan descriptor.
465
+ * Read the next block of the scan relation into a buffer and pin that buffer
466
+ * before saving it in the scan descriptor.
462
467
*/
463
468
static inline void
464
- heapfetchbuf (HeapScanDesc scan , BlockNumber block )
469
+ heap_fetch_next_buffer (HeapScanDesc scan , ScanDirection dir )
465
470
{
466
- Assert (block < scan -> rs_nblocks );
467
-
468
471
/* release previous scan buffer, if any */
469
472
if (BufferIsValid (scan -> rs_cbuf ))
470
473
{
@@ -479,10 +482,25 @@ heapfetchbuf(HeapScanDesc scan, BlockNumber block)
479
482
*/
480
483
CHECK_FOR_INTERRUPTS ();
481
484
482
- /* read page using selected strategy */
483
- scan -> rs_cbuf = ReadBufferExtended (scan -> rs_base .rs_rd , MAIN_FORKNUM , block ,
484
- RBM_NORMAL , scan -> rs_strategy );
485
- scan -> rs_cblock = block ;
485
+ if (unlikely (!scan -> rs_inited ))
486
+ {
487
+ scan -> rs_cblock = heapgettup_initial_block (scan , dir );
488
+
489
+ /* ensure rs_cbuf is invalid when we get InvalidBlockNumber */
490
+ Assert (scan -> rs_cblock != InvalidBlockNumber ||
491
+ !BufferIsValid (scan -> rs_cbuf ));
492
+
493
+ scan -> rs_inited = true;
494
+ }
495
+ else
496
+ scan -> rs_cblock = heapgettup_advance_block (scan , scan -> rs_cblock ,
497
+ dir );
498
+
499
+ /* read block if valid */
500
+ if (BlockNumberIsValid (scan -> rs_cblock ))
501
+ scan -> rs_cbuf = ReadBufferExtended (scan -> rs_base .rs_rd , MAIN_FORKNUM ,
502
+ scan -> rs_cblock , RBM_NORMAL ,
503
+ scan -> rs_strategy );
486
504
}
487
505
488
506
/*
@@ -492,7 +510,7 @@ heapfetchbuf(HeapScanDesc scan, BlockNumber block)
492
510
* occur with empty tables and in parallel scans when parallel workers get all
493
511
* of the pages before we can get a chance to get our first page.
494
512
*/
495
- static BlockNumber
513
+ static pg_noinline BlockNumber
496
514
heapgettup_initial_block (HeapScanDesc scan , ScanDirection dir )
497
515
{
498
516
Assert (!scan -> rs_inited );
@@ -619,7 +637,7 @@ heapgettup_continue_page(HeapScanDesc scan, ScanDirection dir, int *linesleft,
619
637
}
620
638
621
639
/*
622
- * heapgettup_advance_block - helper for heapgettup() and heapgettup_pagemode ()
640
+ * heapgettup_advance_block - helper for heap_fetch_next_buffer ()
623
641
*
624
642
* Given the current block number, the scan direction, and various information
625
643
* contained in the scan descriptor, calculate the BlockNumber to scan next
@@ -730,23 +748,13 @@ heapgettup(HeapScanDesc scan,
730
748
ScanKey key )
731
749
{
732
750
HeapTuple tuple = & (scan -> rs_ctup );
733
- BlockNumber block ;
734
751
Page page ;
735
752
OffsetNumber lineoff ;
736
753
int linesleft ;
737
754
738
- if (unlikely (!scan -> rs_inited ))
739
- {
740
- block = heapgettup_initial_block (scan , dir );
741
- /* ensure rs_cbuf is invalid when we get InvalidBlockNumber */
742
- Assert (block != InvalidBlockNumber || !BufferIsValid (scan -> rs_cbuf ));
743
- scan -> rs_inited = true;
744
- }
745
- else
755
+ if (likely (scan -> rs_inited ))
746
756
{
747
757
/* continue from previously returned page/tuple */
748
- block = scan -> rs_cblock ;
749
-
750
758
LockBuffer (scan -> rs_cbuf , BUFFER_LOCK_SHARE );
751
759
page = heapgettup_continue_page (scan , dir , & linesleft , & lineoff );
752
760
goto continue_page ;
@@ -756,9 +764,16 @@ heapgettup(HeapScanDesc scan,
756
764
* advance the scan until we find a qualifying tuple or run out of stuff
757
765
* to scan
758
766
*/
759
- while (block != InvalidBlockNumber )
767
+ while (true )
760
768
{
761
- heapfetchbuf (scan , block );
769
+ heap_fetch_next_buffer (scan , dir );
770
+
771
+ /* did we run out of blocks to scan? */
772
+ if (!BufferIsValid (scan -> rs_cbuf ))
773
+ break ;
774
+
775
+ Assert (BufferGetBlockNumber (scan -> rs_cbuf ) == scan -> rs_cblock );
776
+
762
777
LockBuffer (scan -> rs_cbuf , BUFFER_LOCK_SHARE );
763
778
page = heapgettup_start_page (scan , dir , & linesleft , & lineoff );
764
779
continue_page :
@@ -780,7 +795,7 @@ heapgettup(HeapScanDesc scan,
780
795
781
796
tuple -> t_data = (HeapTupleHeader ) PageGetItem (page , lpp );
782
797
tuple -> t_len = ItemIdGetLength (lpp );
783
- ItemPointerSet (& (tuple -> t_self ), block , lineoff );
798
+ ItemPointerSet (& (tuple -> t_self ), scan -> rs_cblock , lineoff );
784
799
785
800
visible = HeapTupleSatisfiesVisibility (tuple ,
786
801
scan -> rs_base .rs_snapshot ,
@@ -810,9 +825,6 @@ heapgettup(HeapScanDesc scan,
810
825
* it's time to move to the next.
811
826
*/
812
827
LockBuffer (scan -> rs_cbuf , BUFFER_LOCK_UNLOCK );
813
-
814
- /* get the BlockNumber to scan next */
815
- block = heapgettup_advance_block (scan , block , dir );
816
828
}
817
829
818
830
/* end of scan */
@@ -832,9 +844,9 @@ heapgettup(HeapScanDesc scan,
832
844
*
833
845
* The internal logic is much the same as heapgettup's too, but there are some
834
846
* differences: we do not take the buffer content lock (that only needs to
835
- * happen inside heapgetpage ), and we iterate through just the tuples listed
836
- * in rs_vistuples[] rather than all tuples on the page. Notice that
837
- * lineindex is 0-based, where the corresponding loop variable lineoff in
847
+ * happen inside heap_prepare_pagescan ), and we iterate through just the
848
+ * tuples listed in rs_vistuples[] rather than all tuples on the page. Notice
849
+ * that lineindex is 0-based, where the corresponding loop variable lineoff in
838
850
* heapgettup is 1-based.
839
851
* ----------------
840
852
*/
@@ -845,22 +857,13 @@ heapgettup_pagemode(HeapScanDesc scan,
845
857
ScanKey key )
846
858
{
847
859
HeapTuple tuple = & (scan -> rs_ctup );
848
- BlockNumber block ;
849
860
Page page ;
850
861
int lineindex ;
851
862
int linesleft ;
852
863
853
- if (unlikely (!scan -> rs_inited ))
854
- {
855
- block = heapgettup_initial_block (scan , dir );
856
- /* ensure rs_cbuf is invalid when we get InvalidBlockNumber */
857
- Assert (block != InvalidBlockNumber || !BufferIsValid (scan -> rs_cbuf ));
858
- scan -> rs_inited = true;
859
- }
860
- else
864
+ if (likely (scan -> rs_inited ))
861
865
{
862
866
/* continue from previously returned page/tuple */
863
- block = scan -> rs_cblock ; /* current page */
864
867
page = BufferGetPage (scan -> rs_cbuf );
865
868
866
869
lineindex = scan -> rs_cindex + dir ;
@@ -877,10 +880,15 @@ heapgettup_pagemode(HeapScanDesc scan,
877
880
* advance the scan until we find a qualifying tuple or run out of stuff
878
881
* to scan
879
882
*/
880
- while (block != InvalidBlockNumber )
883
+ while (true )
881
884
{
882
- /* read the page */
883
- heapfetchbuf (scan , block );
885
+ heap_fetch_next_buffer (scan , dir );
886
+
887
+ /* did we run out of blocks to scan? */
888
+ if (!BufferIsValid (scan -> rs_cbuf ))
889
+ break ;
890
+
891
+ Assert (BufferGetBlockNumber (scan -> rs_cbuf ) == scan -> rs_cblock );
884
892
885
893
/* prune the page and determine visible tuple offsets */
886
894
heap_prepare_pagescan ((TableScanDesc ) scan );
@@ -902,7 +910,7 @@ heapgettup_pagemode(HeapScanDesc scan,
902
910
903
911
tuple -> t_data = (HeapTupleHeader ) PageGetItem (page , lpp );
904
912
tuple -> t_len = ItemIdGetLength (lpp );
905
- ItemPointerSet (& (tuple -> t_self ), block , lineoff );
913
+ ItemPointerSet (& (tuple -> t_self ), scan -> rs_cblock , lineoff );
906
914
907
915
/* skip any tuples that don't match the scan key */
908
916
if (key != NULL &&
@@ -913,9 +921,6 @@ heapgettup_pagemode(HeapScanDesc scan,
913
921
scan -> rs_cindex = lineindex ;
914
922
return ;
915
923
}
916
-
917
- /* get the BlockNumber to scan next */
918
- block = heapgettup_advance_block (scan , block , dir );
919
924
}
920
925
921
926
/* end of scan */
0 commit comments