@@ -137,8 +137,9 @@ static BufferAccessStrategy vac_strategy;
137
137
138
138
139
139
/* non-export function prototypes */
140
- static void lazy_scan_heap (Relation onerel , LVRelStats * vacrelstats ,
141
- Relation * Irel , int nindexes , bool aggressive );
140
+ static void lazy_scan_heap (Relation onerel , int options ,
141
+ LVRelStats * vacrelstats , Relation * Irel , int nindexes ,
142
+ bool aggressive );
142
143
static void lazy_vacuum_heap (Relation onerel , LVRelStats * vacrelstats );
143
144
static bool lazy_check_needs_freeze (Buffer buf , bool * hastup );
144
145
static void lazy_vacuum_index (Relation indrel ,
@@ -223,15 +224,17 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
223
224
& MultiXactCutoff , & mxactFullScanLimit );
224
225
225
226
/*
226
- * We request an aggressive scan if either the table's frozen Xid is now
227
- * older than or equal to the requested Xid full-table scan limit; or if
228
- * the table's minimum MultiXactId is older than or equal to the requested
229
- * mxid full-table scan limit.
227
+ * We request an aggressive scan if the table's frozen Xid is now older
228
+ * than or equal to the requested Xid full-table scan limit; or if the
229
+ * table's minimum MultiXactId is older than or equal to the requested
230
+ * mxid full-table scan limit; or if DISABLE_PAGE_SKIPPING was specified .
230
231
*/
231
232
aggressive = TransactionIdPrecedesOrEquals (onerel -> rd_rel -> relfrozenxid ,
232
233
xidFullScanLimit );
233
234
aggressive |= MultiXactIdPrecedesOrEquals (onerel -> rd_rel -> relminmxid ,
234
235
mxactFullScanLimit );
236
+ if (options & VACOPT_DISABLE_PAGE_SKIPPING )
237
+ aggressive = true;
235
238
236
239
vacrelstats = (LVRelStats * ) palloc0 (sizeof (LVRelStats ));
237
240
@@ -246,7 +249,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
246
249
vacrelstats -> hasindex = (nindexes > 0 );
247
250
248
251
/* Do the vacuuming */
249
- lazy_scan_heap (onerel , vacrelstats , Irel , nindexes , aggressive );
252
+ lazy_scan_heap (onerel , options , vacrelstats , Irel , nindexes , aggressive );
250
253
251
254
/* Done with indexes */
252
255
vac_close_indexes (nindexes , Irel , NoLock );
@@ -441,7 +444,7 @@ vacuum_log_cleanup_info(Relation rel, LVRelStats *vacrelstats)
441
444
* reference them have been killed.
442
445
*/
443
446
static void
444
- lazy_scan_heap (Relation onerel , LVRelStats * vacrelstats ,
447
+ lazy_scan_heap (Relation onerel , int options , LVRelStats * vacrelstats ,
445
448
Relation * Irel , int nindexes , bool aggressive )
446
449
{
447
450
BlockNumber nblocks ,
@@ -542,25 +545,28 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
542
545
* the last page. This is worth avoiding mainly because such a lock must
543
546
* be replayed on any hot standby, where it can be disruptive.
544
547
*/
545
- for (next_unskippable_block = 0 ;
546
- next_unskippable_block < nblocks ;
547
- next_unskippable_block ++ )
548
+ next_unskippable_block = 0 ;
549
+ if ((options & VACOPT_DISABLE_PAGE_SKIPPING ) == 0 )
548
550
{
549
- uint8 vmstatus ;
550
-
551
- vmstatus = visibilitymap_get_status (onerel , next_unskippable_block ,
552
- & vmbuffer );
553
- if (aggressive )
551
+ while (next_unskippable_block < nblocks )
554
552
{
555
- if ((vmstatus & VISIBILITYMAP_ALL_FROZEN ) == 0 )
556
- break ;
557
- }
558
- else
559
- {
560
- if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
561
- break ;
553
+ uint8 vmstatus ;
554
+
555
+ vmstatus = visibilitymap_get_status (onerel , next_unskippable_block ,
556
+ & vmbuffer );
557
+ if (aggressive )
558
+ {
559
+ if ((vmstatus & VISIBILITYMAP_ALL_FROZEN ) == 0 )
560
+ break ;
561
+ }
562
+ else
563
+ {
564
+ if ((vmstatus & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
565
+ break ;
566
+ }
567
+ vacuum_delay_point ();
568
+ next_unskippable_block ++ ;
562
569
}
563
- vacuum_delay_point ();
564
570
}
565
571
566
572
if (next_unskippable_block >= SKIP_PAGES_THRESHOLD )
@@ -594,26 +600,29 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
594
600
if (blkno == next_unskippable_block )
595
601
{
596
602
/* Time to advance next_unskippable_block */
597
- for (next_unskippable_block ++ ;
598
- next_unskippable_block < nblocks ;
599
- next_unskippable_block ++ )
603
+ next_unskippable_block ++ ;
604
+ if ((options & VACOPT_DISABLE_PAGE_SKIPPING ) == 0 )
600
605
{
601
- uint8 vmskipflags ;
602
-
603
- vmskipflags = visibilitymap_get_status (onerel ,
604
- next_unskippable_block ,
605
- & vmbuffer );
606
- if (aggressive )
606
+ while (next_unskippable_block < nblocks )
607
607
{
608
- if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN ) == 0 )
609
- break ;
610
- }
611
- else
612
- {
613
- if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
614
- break ;
608
+ uint8 vmskipflags ;
609
+
610
+ vmskipflags = visibilitymap_get_status (onerel ,
611
+ next_unskippable_block ,
612
+ & vmbuffer );
613
+ if (aggressive )
614
+ {
615
+ if ((vmskipflags & VISIBILITYMAP_ALL_FROZEN ) == 0 )
616
+ break ;
617
+ }
618
+ else
619
+ {
620
+ if ((vmskipflags & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
621
+ break ;
622
+ }
623
+ vacuum_delay_point ();
624
+ next_unskippable_block ++ ;
615
625
}
616
- vacuum_delay_point ();
617
626
}
618
627
619
628
/*
@@ -1054,7 +1063,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
1054
1063
}
1055
1064
else
1056
1065
{
1057
- bool tuple_totally_frozen ;
1066
+ bool tuple_totally_frozen ;
1058
1067
1059
1068
num_tuples += 1 ;
1060
1069
hastup = true;
@@ -1064,8 +1073,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
1064
1073
* freezing. Note we already have exclusive buffer lock.
1065
1074
*/
1066
1075
if (heap_prepare_freeze_tuple (tuple .t_data , FreezeLimit ,
1067
- MultiXactCutoff , & frozen [nfrozen ],
1068
- & tuple_totally_frozen ))
1076
+ MultiXactCutoff , & frozen [nfrozen ],
1077
+ & tuple_totally_frozen ))
1069
1078
frozen [nfrozen ++ ].offset = offnum ;
1070
1079
1071
1080
if (!tuple_totally_frozen )
0 commit comments