@@ -4585,9 +4585,10 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
4585
4585
block = ItemPointerGetBlockNumber (tid );
4586
4586
4587
4587
/*
4588
- * Before locking the buffer, pin the visibility map page if it may be
4589
- * necessary. XXX: It might be possible for this to change after acquiring
4590
- * the lock below. We don't yet deal with that case.
4588
+ * Before locking the buffer, pin the visibility map page if it appears to
4589
+ * be necessary. Since we haven't got the lock yet, someone else might be
4590
+ * in the middle of changing this, so we'll need to recheck after we have
4591
+ * the lock.
4591
4592
*/
4592
4593
if (PageIsAllVisible (BufferGetPage (* buffer )))
4593
4594
visibilitymap_pin (relation , block , & vmbuffer );
@@ -5075,6 +5076,23 @@ heap_lock_tuple(Relation relation, HeapTuple tuple,
5075
5076
goto out_locked ;
5076
5077
}
5077
5078
5079
+ /*
5080
+ * If we didn't pin the visibility map page and the page has become all
5081
+ * visible while we were busy locking the buffer, or during some
5082
+ * subsequent window during which we had it unlocked, we'll have to unlock
5083
+ * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5084
+ * unfortunate, especially since we'll now have to recheck whether the
5085
+ * tuple has been locked or updated under us, but hopefully it won't
5086
+ * happen very often.
5087
+ */
5088
+ if (vmbuffer == InvalidBuffer && PageIsAllVisible (page ))
5089
+ {
5090
+ LockBuffer (* buffer , BUFFER_LOCK_UNLOCK );
5091
+ visibilitymap_pin (relation , block , & vmbuffer );
5092
+ LockBuffer (* buffer , BUFFER_LOCK_EXCLUSIVE );
5093
+ goto l3 ;
5094
+ }
5095
+
5078
5096
xmax = HeapTupleHeaderGetRawXmax (tuple -> t_data );
5079
5097
old_infomask = tuple -> t_data -> t_infomask ;
5080
5098
@@ -5665,9 +5683,10 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
5665
5683
CHECK_FOR_INTERRUPTS ();
5666
5684
5667
5685
/*
5668
- * Before locking the buffer, pin the visibility map page if it may be
5669
- * necessary. XXX: It might be possible for this to change after
5670
- * acquiring the lock below. We don't yet deal with that case.
5686
+ * Before locking the buffer, pin the visibility map page if it
5687
+ * appears to be necessary. Since we haven't got the lock yet,
5688
+ * someone else might be in the middle of changing this, so we'll need
5689
+ * to recheck after we have the lock.
5671
5690
*/
5672
5691
if (PageIsAllVisible (BufferGetPage (buf )))
5673
5692
visibilitymap_pin (rel , block , & vmbuffer );
@@ -5676,6 +5695,19 @@ heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
5676
5695
5677
5696
LockBuffer (buf , BUFFER_LOCK_EXCLUSIVE );
5678
5697
5698
+ /*
5699
+ * If we didn't pin the visibility map page and the page has become
5700
+ * all visible while we were busy locking the buffer, we'll have to
5701
+ * unlock and re-lock, to avoid holding the buffer lock across I/O.
5702
+ * That's a bit unfortunate, but hopefully shouldn't happen often.
5703
+ */
5704
+ if (vmbuffer == InvalidBuffer && PageIsAllVisible (BufferGetPage (buf )))
5705
+ {
5706
+ LockBuffer (buf , BUFFER_LOCK_UNLOCK );
5707
+ visibilitymap_pin (rel , block , & vmbuffer );
5708
+ LockBuffer (buf , BUFFER_LOCK_EXCLUSIVE );
5709
+ }
5710
+
5679
5711
/*
5680
5712
* Check the tuple XMIN against prior XMAX, if any. If we reached the
5681
5713
* end of the chain, we're done, so return success.
0 commit comments