@@ -189,6 +189,21 @@ typedef struct LVRelState
189
189
BlockNumber scanned_pages ; /* # pages examined (not skipped via VM) */
190
190
BlockNumber removed_pages ; /* # pages removed by relation truncation */
191
191
BlockNumber new_frozen_tuple_pages ; /* # pages with newly frozen tuples */
192
+
193
+ /* # pages newly set all-visible in the VM */
194
+ BlockNumber vm_new_visible_pages ;
195
+
196
+ /*
197
+ * # pages newly set all-visible and all-frozen in the VM. This is a
198
+ * subset of vm_new_visible_pages. That is, vm_new_visible_pages includes
199
+ * all pages set all-visible, but vm_new_visible_frozen_pages includes
200
+ * only those which were also set all-frozen.
201
+ */
202
+ BlockNumber vm_new_visible_frozen_pages ;
203
+
204
+ /* # all-visible pages newly set all-frozen in the VM */
205
+ BlockNumber vm_new_frozen_pages ;
206
+
192
207
BlockNumber lpdead_item_pages ; /* # pages with LP_DEAD items */
193
208
BlockNumber missed_dead_pages ; /* # pages with missed dead tuples */
194
209
BlockNumber nonempty_pages ; /* actually, last nonempty page + 1 */
@@ -428,6 +443,10 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
428
443
vacrel -> recently_dead_tuples = 0 ;
429
444
vacrel -> missed_dead_tuples = 0 ;
430
445
446
+ vacrel -> vm_new_visible_pages = 0 ;
447
+ vacrel -> vm_new_visible_frozen_pages = 0 ;
448
+ vacrel -> vm_new_frozen_pages = 0 ;
449
+
431
450
/*
432
451
* Get cutoffs that determine which deleted tuples are considered DEAD,
433
452
* not just RECENTLY_DEAD, and which XIDs/MXIDs to freeze. Then determine
@@ -701,6 +720,13 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
701
720
100.0 * vacrel -> new_frozen_tuple_pages /
702
721
orig_rel_pages ,
703
722
(long long ) vacrel -> tuples_frozen );
723
+
724
+ appendStringInfo (& buf ,
725
+ _ ("visibility map: %u pages set all-visible, %u pages set all-frozen (%u were all-visible)\n" ),
726
+ vacrel -> vm_new_visible_pages ,
727
+ vacrel -> vm_new_visible_frozen_pages +
728
+ vacrel -> vm_new_frozen_pages ,
729
+ vacrel -> vm_new_frozen_pages );
704
730
if (vacrel -> do_index_vacuuming )
705
731
{
706
732
if (vacrel -> nindexes == 0 || vacrel -> num_index_scans == 0 )
@@ -1354,6 +1380,8 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
1354
1380
*/
1355
1381
if (!PageIsAllVisible (page ))
1356
1382
{
1383
+ uint8 old_vmbits ;
1384
+
1357
1385
START_CRIT_SECTION ();
1358
1386
1359
1387
/* mark buffer dirty before writing a WAL record */
@@ -1373,10 +1401,24 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
1373
1401
log_newpage_buffer (buf , true);
1374
1402
1375
1403
PageSetAllVisible (page );
1376
- visibilitymap_set (vacrel -> rel , blkno , buf , InvalidXLogRecPtr ,
1377
- vmbuffer , InvalidTransactionId ,
1378
- VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN );
1404
+ old_vmbits = visibilitymap_set (vacrel -> rel , blkno , buf ,
1405
+ InvalidXLogRecPtr ,
1406
+ vmbuffer , InvalidTransactionId ,
1407
+ VISIBILITYMAP_ALL_VISIBLE |
1408
+ VISIBILITYMAP_ALL_FROZEN );
1379
1409
END_CRIT_SECTION ();
1410
+
1411
+ /*
1412
+ * If the page wasn't already set all-visible and/or all-frozen in
1413
+ * the VM, count it as newly set for logging.
1414
+ */
1415
+ if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
1416
+ {
1417
+ vacrel -> vm_new_visible_pages ++ ;
1418
+ vacrel -> vm_new_visible_frozen_pages ++ ;
1419
+ }
1420
+ else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN ) == 0 )
1421
+ vacrel -> vm_new_frozen_pages ++ ;
1380
1422
}
1381
1423
1382
1424
freespace = PageGetHeapFreeSpace (page );
@@ -1531,6 +1573,7 @@ lazy_scan_prune(LVRelState *vacrel,
1531
1573
*/
1532
1574
if (!all_visible_according_to_vm && presult .all_visible )
1533
1575
{
1576
+ uint8 old_vmbits ;
1534
1577
uint8 flags = VISIBILITYMAP_ALL_VISIBLE ;
1535
1578
1536
1579
if (presult .all_frozen )
@@ -1554,9 +1597,24 @@ lazy_scan_prune(LVRelState *vacrel,
1554
1597
*/
1555
1598
PageSetAllVisible (page );
1556
1599
MarkBufferDirty (buf );
1557
- visibilitymap_set (vacrel -> rel , blkno , buf , InvalidXLogRecPtr ,
1558
- vmbuffer , presult .vm_conflict_horizon ,
1559
- flags );
1600
+ old_vmbits = visibilitymap_set (vacrel -> rel , blkno , buf ,
1601
+ InvalidXLogRecPtr ,
1602
+ vmbuffer , presult .vm_conflict_horizon ,
1603
+ flags );
1604
+
1605
+ /*
1606
+ * If the page wasn't already set all-visible and/or all-frozen in the
1607
+ * VM, count it as newly set for logging.
1608
+ */
1609
+ if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
1610
+ {
1611
+ vacrel -> vm_new_visible_pages ++ ;
1612
+ if (presult .all_frozen )
1613
+ vacrel -> vm_new_visible_frozen_pages ++ ;
1614
+ }
1615
+ else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN ) == 0 &&
1616
+ presult .all_frozen )
1617
+ vacrel -> vm_new_frozen_pages ++ ;
1560
1618
}
1561
1619
1562
1620
/*
@@ -1606,6 +1664,8 @@ lazy_scan_prune(LVRelState *vacrel,
1606
1664
else if (all_visible_according_to_vm && presult .all_visible &&
1607
1665
presult .all_frozen && !VM_ALL_FROZEN (vacrel -> rel , blkno , & vmbuffer ))
1608
1666
{
1667
+ uint8 old_vmbits ;
1668
+
1609
1669
/*
1610
1670
* Avoid relying on all_visible_according_to_vm as a proxy for the
1611
1671
* page-level PD_ALL_VISIBLE bit being set, since it might have become
@@ -1625,10 +1685,31 @@ lazy_scan_prune(LVRelState *vacrel,
1625
1685
* was logged when the page's tuples were frozen.
1626
1686
*/
1627
1687
Assert (!TransactionIdIsValid (presult .vm_conflict_horizon ));
1628
- visibilitymap_set (vacrel -> rel , blkno , buf , InvalidXLogRecPtr ,
1629
- vmbuffer , InvalidTransactionId ,
1630
- VISIBILITYMAP_ALL_VISIBLE |
1631
- VISIBILITYMAP_ALL_FROZEN );
1688
+ old_vmbits = visibilitymap_set (vacrel -> rel , blkno , buf ,
1689
+ InvalidXLogRecPtr ,
1690
+ vmbuffer , InvalidTransactionId ,
1691
+ VISIBILITYMAP_ALL_VISIBLE |
1692
+ VISIBILITYMAP_ALL_FROZEN );
1693
+
1694
+ /*
1695
+ * The page was likely already set all-visible in the VM. However,
1696
+ * there is a small chance that it was modified sometime between
1697
+ * setting all_visible_according_to_vm and checking the visibility
1698
+ * during pruning. Check the return value of old_vmbits anyway to
1699
+ * ensure the visibility map counters used for logging are accurate.
1700
+ */
1701
+ if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
1702
+ {
1703
+ vacrel -> vm_new_visible_pages ++ ;
1704
+ vacrel -> vm_new_visible_frozen_pages ++ ;
1705
+ }
1706
+
1707
+ /*
1708
+ * We already checked that the page was not set all-frozen in the VM
1709
+ * above, so we don't need to test the value of old_vmbits.
1710
+ */
1711
+ else
1712
+ vacrel -> vm_new_frozen_pages ++ ;
1632
1713
}
1633
1714
}
1634
1715
@@ -2274,6 +2355,7 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
2274
2355
if (heap_page_is_all_visible (vacrel , buffer , & visibility_cutoff_xid ,
2275
2356
& all_frozen ))
2276
2357
{
2358
+ uint8 old_vmbits ;
2277
2359
uint8 flags = VISIBILITYMAP_ALL_VISIBLE ;
2278
2360
2279
2361
if (all_frozen )
@@ -2283,8 +2365,25 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
2283
2365
}
2284
2366
2285
2367
PageSetAllVisible (page );
2286
- visibilitymap_set (vacrel -> rel , blkno , buffer , InvalidXLogRecPtr ,
2287
- vmbuffer , visibility_cutoff_xid , flags );
2368
+ old_vmbits = visibilitymap_set (vacrel -> rel , blkno , buffer ,
2369
+ InvalidXLogRecPtr ,
2370
+ vmbuffer , visibility_cutoff_xid ,
2371
+ flags );
2372
+
2373
+ /*
2374
+ * If the page wasn't already set all-visible and/or all-frozen in the
2375
+ * VM, count it as newly set for logging.
2376
+ */
2377
+ if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE ) == 0 )
2378
+ {
2379
+ vacrel -> vm_new_visible_pages ++ ;
2380
+ if (all_frozen )
2381
+ vacrel -> vm_new_visible_frozen_pages ++ ;
2382
+ }
2383
+
2384
+ else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN ) == 0 &&
2385
+ all_frozen )
2386
+ vacrel -> vm_new_frozen_pages ++ ;
2288
2387
}
2289
2388
2290
2389
/* Revert to the previous phase information for error traceback */
0 commit comments