@@ -96,6 +96,64 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
96
96
return buffer ;
97
97
}
98
98
99
+ /*
100
+ * For each heap page which is all-visible, acquire a pin on the appropriate
101
+ * visibility map page, if we haven't already got one.
102
+ *
103
+ * buffer2 may be InvalidBuffer, if only one buffer is involved. buffer1
104
+ * must not be InvalidBuffer. If both buffers are specified, buffer1 must
105
+ * be less than buffer2.
106
+ */
107
+ static void
108
+ GetVisibilityMapPins (Relation relation , Buffer buffer1 , Buffer buffer2 ,
109
+ BlockNumber block1 , BlockNumber block2 ,
110
+ Buffer * vmbuffer1 , Buffer * vmbuffer2 )
111
+ {
112
+ bool need_to_pin_buffer1 ;
113
+ bool need_to_pin_buffer2 ;
114
+
115
+ Assert (BufferIsValid (buffer1 ));
116
+ Assert (buffer2 == InvalidBuffer || buffer1 <= buffer2 );
117
+
118
+ while (1 )
119
+ {
120
+ /* Figure out which pins we need but don't have. */
121
+ need_to_pin_buffer1 = PageIsAllVisible (BufferGetPage (buffer1 ))
122
+ && !visibilitymap_pin_ok (block1 , * vmbuffer1 );
123
+ need_to_pin_buffer2 = buffer2 != InvalidBuffer
124
+ && PageIsAllVisible (BufferGetPage (buffer2 ))
125
+ && !visibilitymap_pin_ok (block2 , * vmbuffer2 );
126
+ if (!need_to_pin_buffer1 && !need_to_pin_buffer2 )
127
+ return ;
128
+
129
+ /* We must unlock both buffers before doing any I/O. */
130
+ LockBuffer (buffer1 , BUFFER_LOCK_UNLOCK );
131
+ if (buffer2 != InvalidBuffer && buffer2 != buffer1 )
132
+ LockBuffer (buffer2 , BUFFER_LOCK_UNLOCK );
133
+
134
+ /* Get pins. */
135
+ if (need_to_pin_buffer1 )
136
+ visibilitymap_pin (relation , block1 , vmbuffer1 );
137
+ if (need_to_pin_buffer2 )
138
+ visibilitymap_pin (relation , block2 , vmbuffer2 );
139
+
140
+ /* Relock buffers. */
141
+ LockBuffer (buffer1 , BUFFER_LOCK_EXCLUSIVE );
142
+ if (buffer2 != InvalidBuffer && buffer2 != buffer1 )
143
+ LockBuffer (buffer2 , BUFFER_LOCK_EXCLUSIVE );
144
+
145
+ /*
146
+ * If there are two buffers involved and we pinned just one of them,
147
+ * it's possible that the second one became all-visible while we were
148
+ * busy pinning the first one. If it looks like that's a possible
149
+ * scenario, we'll need to make a second pass through this loop.
150
+ */
151
+ if (buffer2 == InvalidBuffer || buffer1 == buffer2
152
+ || (need_to_pin_buffer1 && need_to_pin_buffer2 ))
153
+ break ;
154
+ }
155
+ }
156
+
99
157
/*
100
158
* RelationGetBufferForTuple
101
159
*
@@ -152,7 +210,7 @@ Buffer
152
210
RelationGetBufferForTuple (Relation relation , Size len ,
153
211
Buffer otherBuffer , int options ,
154
212
struct BulkInsertStateData * bistate ,
155
- Buffer * vmbuffer )
213
+ Buffer * vmbuffer , Buffer * vmbuffer_other )
156
214
{
157
215
bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM );
158
216
Buffer buffer = InvalidBuffer ;
@@ -284,11 +342,17 @@ RelationGetBufferForTuple(Relation relation, Size len,
284
342
}
285
343
286
344
/*
287
- * If the page is all visible but we don't have the right visibility
288
- * map page pinned, then give up our locks, go get the pin, and
289
- * re-lock. This is pretty painful, but hopefully shouldn't happen
290
- * often. Note that there's a small possibility that we didn't pin
291
- * the page above but still have the correct page pinned anyway, either
345
+ * We now have the target page (and the other buffer, if any) pinned
346
+ * and locked. However, since our initial PageIsAllVisible checks
347
+ * were performed before acquiring the lock, the results might now
348
+ * be out of date, either for the selected victim buffer, or for the
349
+ * other buffer passed by the caller. In that case, we'll need to give
350
+ * up our locks, go get the pin(s) we failed to get earlier, and
351
+ * re-lock. That's pretty painful, but hopefully shouldn't happen
352
+ * often.
353
+ *
354
+ * Note that there's a small possibility that we didn't pin the
355
+ * page above but still have the correct page pinned anyway, either
292
356
* because we've already made a previous pass through this loop, or
293
357
* because caller passed us the right page anyway.
294
358
*
@@ -297,19 +361,14 @@ RelationGetBufferForTuple(Relation relation, Size len,
297
361
* cleared by some other backend anyway. In that case, we'll have done
298
362
* a bit of extra work for no gain, but there's no real harm done.
299
363
*/
300
- if (PageIsAllVisible (BufferGetPage (buffer ))
301
- && !visibilitymap_pin_ok (targetBlock , * vmbuffer ))
302
- {
303
- LockBuffer (buffer , BUFFER_LOCK_UNLOCK );
304
- if (otherBlock != targetBlock )
305
- LockBuffer (otherBuffer , BUFFER_LOCK_UNLOCK );
306
- visibilitymap_pin (relation , targetBlock , vmbuffer );
307
- if (otherBuffer != InvalidBuffer && otherBlock < targetBlock )
308
- LockBuffer (otherBuffer , BUFFER_LOCK_EXCLUSIVE );
309
- LockBuffer (buffer , BUFFER_LOCK_EXCLUSIVE );
310
- if (otherBuffer != InvalidBuffer && otherBlock > targetBlock )
311
- LockBuffer (otherBuffer , BUFFER_LOCK_EXCLUSIVE );
312
- }
364
+ if (otherBuffer == InvalidBuffer || buffer <= otherBuffer )
365
+ GetVisibilityMapPins (relation , buffer , otherBuffer ,
366
+ targetBlock , otherBlock , vmbuffer ,
367
+ vmbuffer_other );
368
+ else
369
+ GetVisibilityMapPins (relation , otherBuffer , buffer ,
370
+ otherBlock , targetBlock , vmbuffer_other ,
371
+ vmbuffer );
313
372
314
373
/*
315
374
* Now we can check to see if there's enough free space here. If so,
0 commit comments