@@ -382,7 +382,6 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace,
382
382
BTMetaPageData * metad ;
383
383
uint32 previouslevel ;
384
384
BtreeLevel current ;
385
- Snapshot snapshot = SnapshotAny ;
386
385
387
386
if (!readonly )
388
387
elog (DEBUG1 , "verifying consistency of tree structure for index \"%s\"" ,
@@ -433,38 +432,35 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace,
433
432
state -> heaptuplespresent = 0 ;
434
433
435
434
/*
436
- * Register our own snapshot in !readonly case , rather than asking
435
+ * Register our own snapshot for heapallindexed , rather than asking
437
436
* table_index_build_scan() to do this for us later. This needs to
438
437
* happen before index fingerprinting begins, so we can later be
439
438
* certain that index fingerprinting should have reached all tuples
440
439
* returned by table_index_build_scan().
441
440
*/
442
- if (!state -> readonly )
443
- {
444
- snapshot = RegisterSnapshot (GetTransactionSnapshot ());
441
+ state -> snapshot = RegisterSnapshot (GetTransactionSnapshot ());
445
442
446
- /*
447
- * GetTransactionSnapshot() always acquires a new MVCC snapshot in
448
- * READ COMMITTED mode. A new snapshot is guaranteed to have all
449
- * the entries it requires in the index.
450
- *
451
- * We must defend against the possibility that an old xact
452
- * snapshot was returned at higher isolation levels when that
453
- * snapshot is not safe for index scans of the target index. This
454
- * is possible when the snapshot sees tuples that are before the
455
- * index's indcheckxmin horizon. Throwing an error here should be
456
- * very rare. It doesn't seem worth using a secondary snapshot to
457
- * avoid this.
458
- */
459
- if (IsolationUsesXactSnapshot () && rel -> rd_index -> indcheckxmin &&
460
- !TransactionIdPrecedes (HeapTupleHeaderGetXmin (rel -> rd_indextuple -> t_data ),
461
- snapshot -> xmin ))
462
- ereport (ERROR ,
463
- (errcode (ERRCODE_T_R_SERIALIZATION_FAILURE ),
464
- errmsg ("index \"%s\" cannot be verified using transaction snapshot" ,
465
- RelationGetRelationName (rel ))));
466
- }
467
- }
443
+ /*
444
+ * GetTransactionSnapshot() always acquires a new MVCC snapshot in
445
+ * READ COMMITTED mode. A new snapshot is guaranteed to have all
446
+ * the entries it requires in the index.
447
+ *
448
+ * We must defend against the possibility that an old xact
449
+ * snapshot was returned at higher isolation levels when that
450
+ * snapshot is not safe for index scans of the target index. This
451
+ * is possible when the snapshot sees tuples that are before the
452
+ * index's indcheckxmin horizon. Throwing an error here should be
453
+ * very rare. It doesn't seem worth using a secondary snapshot to
454
+ * avoid this.
455
+ */
456
+ if (IsolationUsesXactSnapshot () && rel -> rd_index -> indcheckxmin &&
457
+ !TransactionIdPrecedes (HeapTupleHeaderGetXmin (rel -> rd_indextuple -> t_data ),
458
+ state -> snapshot -> xmin ))
459
+ ereport (ERROR ,
460
+ (errcode (ERRCODE_T_R_SERIALIZATION_FAILURE ),
461
+ errmsg ("index \"%s\" cannot be verified using transaction snapshot" ,
462
+ RelationGetRelationName (rel ))));
463
+ }
468
464
469
465
/*
470
466
* We need a snapshot to check the uniqueness of the index. For better
@@ -476,9 +472,7 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace,
476
472
state -> indexinfo = BuildIndexInfo (state -> rel );
477
473
if (state -> indexinfo -> ii_Unique )
478
474
{
479
- if (snapshot != SnapshotAny )
480
- state -> snapshot = snapshot ;
481
- else
475
+ if (state -> snapshot == InvalidSnapshot )
482
476
state -> snapshot = RegisterSnapshot (GetTransactionSnapshot ());
483
477
}
484
478
}
@@ -555,21 +549,20 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace,
555
549
/*
556
550
* Create our own scan for table_index_build_scan(), rather than
557
551
* getting it to do so for us. This is required so that we can
558
- * actually use the MVCC snapshot registered earlier in !readonly
559
- * case.
552
+ * actually use the MVCC snapshot registered earlier.
560
553
*
561
554
* Note that table_index_build_scan() calls heap_endscan() for us.
562
555
*/
563
556
scan = table_beginscan_strat (state -> heaprel , /* relation */
564
- snapshot , /* snapshot */
557
+ state -> snapshot , /* snapshot */
565
558
0 , /* number of keys */
566
559
NULL , /* scan key */
567
560
true, /* buffer access strategy OK */
568
561
true); /* syncscan OK? */
569
562
570
563
/*
571
564
* Scan will behave as the first scan of a CREATE INDEX CONCURRENTLY
572
- * behaves in !readonly case .
565
+ * behaves.
573
566
*
574
567
* It's okay that we don't actually use the same lock strength for the
575
568
* heap relation as any other ii_Concurrent caller would in !readonly
@@ -578,7 +571,7 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace,
578
571
* that needs to be sure that there was no concurrent recycling of
579
572
* TIDs.
580
573
*/
581
- indexinfo -> ii_Concurrent = ! state -> readonly ;
574
+ indexinfo -> ii_Concurrent = true ;
582
575
583
576
/*
584
577
* Don't wait for uncommitted tuple xact commit/abort when index is a
@@ -602,14 +595,11 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace,
602
595
state -> heaptuplespresent , RelationGetRelationName (heaprel ),
603
596
100.0 * bloom_prop_bits_set (state -> filter ))));
604
597
605
- if (snapshot != SnapshotAny )
606
- UnregisterSnapshot (snapshot );
607
-
608
598
bloom_free (state -> filter );
609
599
}
610
600
611
601
/* Be tidy: */
612
- if (snapshot == SnapshotAny && state -> snapshot != InvalidSnapshot )
602
+ if (state -> snapshot != InvalidSnapshot )
613
603
UnregisterSnapshot (state -> snapshot );
614
604
MemoryContextDelete (state -> targetcontext );
615
605
}
0 commit comments