45
45
#include "utils/builtins.h"
46
46
#include "utils/rel.h"
47
47
48
- static TM_Result heapam_tuple_lock_internal (Relation relation , ItemPointer tid ,
49
- Snapshot snapshot , TupleTableSlot * slot ,
50
- CommandId cid , LockTupleMode mode ,
51
- LockWaitPolicy wait_policy , uint8 flags ,
52
- TM_FailureData * tmfd , bool updated );
53
-
54
48
static void reform_and_rewrite_tuple (HeapTuple tuple ,
55
49
Relation OldHeap , Relation NewHeap ,
56
50
Datum * values , bool * isnull , RewriteState rwstate );
@@ -305,55 +299,22 @@ heapam_tuple_complete_speculative(Relation relation, TupleTableSlot *slot,
305
299
static TM_Result
306
300
heapam_tuple_delete (Relation relation , ItemPointer tid , CommandId cid ,
307
301
Snapshot snapshot , Snapshot crosscheck , bool wait ,
308
- TM_FailureData * tmfd , bool changingPart ,
309
- LazyTupleTableSlot * lockedSlot )
302
+ TM_FailureData * tmfd , bool changingPart )
310
303
{
311
- TM_Result result ;
312
-
313
304
/*
314
305
* Currently Deleting of index tuples are handled at vacuum, in case if
315
306
* the storage itself is cleaning the dead tuples by itself, it is the
316
307
* time to call the index tuple deletion also.
317
308
*/
318
- result = heap_delete (relation , tid , cid , crosscheck , wait ,
319
- tmfd , changingPart );
320
-
321
- /*
322
- * If the tuple has been concurrently updated, then get the lock on it.
323
- * (Do this if caller asked for tat by providing a 'lockedSlot'.) With the
324
- * lock held retry of delete should succeed even if there are more
325
- * concurrent update attempts.
326
- */
327
- if (result == TM_Updated && lockedSlot )
328
- {
329
- TupleTableSlot * evalSlot ;
330
-
331
- Assert (wait );
332
-
333
- evalSlot = LAZY_TTS_EVAL (lockedSlot );
334
- result = heapam_tuple_lock_internal (relation , tid , snapshot ,
335
- evalSlot , cid , LockTupleExclusive ,
336
- LockWaitBlock ,
337
- TUPLE_LOCK_FLAG_FIND_LAST_VERSION ,
338
- tmfd , true);
339
-
340
- if (result == TM_Ok )
341
- {
342
- tmfd -> traversed = true;
343
- return TM_Updated ;
344
- }
345
- }
346
-
347
- return result ;
309
+ return heap_delete (relation , tid , cid , crosscheck , wait , tmfd , changingPart );
348
310
}
349
311
350
312
351
313
static TM_Result
352
314
heapam_tuple_update (Relation relation , ItemPointer otid , TupleTableSlot * slot ,
353
315
CommandId cid , Snapshot snapshot , Snapshot crosscheck ,
354
316
bool wait , TM_FailureData * tmfd ,
355
- LockTupleMode * lockmode , TU_UpdateIndexes * update_indexes ,
356
- LazyTupleTableSlot * lockedSlot )
317
+ LockTupleMode * lockmode , TU_UpdateIndexes * update_indexes )
357
318
{
358
319
bool shouldFree = true;
359
320
HeapTuple tuple = ExecFetchSlotHeapTuple (slot , true, & shouldFree );
@@ -391,32 +352,6 @@ heapam_tuple_update(Relation relation, ItemPointer otid, TupleTableSlot *slot,
391
352
if (shouldFree )
392
353
pfree (tuple );
393
354
394
- /*
395
- * If the tuple has been concurrently updated, then get the lock on it.
396
- * (Do this if caller asked for tat by providing a 'lockedSlot'.) With the
397
- * lock held retry of update should succeed even if there are more
398
- * concurrent update attempts.
399
- */
400
- if (result == TM_Updated && lockedSlot )
401
- {
402
- TupleTableSlot * evalSlot ;
403
-
404
- Assert (wait );
405
-
406
- evalSlot = LAZY_TTS_EVAL (lockedSlot );
407
- result = heapam_tuple_lock_internal (relation , otid , snapshot ,
408
- evalSlot , cid , * lockmode ,
409
- LockWaitBlock ,
410
- TUPLE_LOCK_FLAG_FIND_LAST_VERSION ,
411
- tmfd , true);
412
-
413
- if (result == TM_Ok )
414
- {
415
- tmfd -> traversed = true;
416
- return TM_Updated ;
417
- }
418
- }
419
-
420
355
return result ;
421
356
}
422
357
@@ -425,26 +360,10 @@ heapam_tuple_lock(Relation relation, ItemPointer tid, Snapshot snapshot,
425
360
TupleTableSlot * slot , CommandId cid , LockTupleMode mode ,
426
361
LockWaitPolicy wait_policy , uint8 flags ,
427
362
TM_FailureData * tmfd )
428
- {
429
- return heapam_tuple_lock_internal (relation , tid , snapshot , slot , cid ,
430
- mode , wait_policy , flags , tmfd , false);
431
- }
432
-
433
- /*
434
- * This routine does the work for heapam_tuple_lock(), but also support
435
- * `updated` argument to re-use the work done by heapam_tuple_update() or
436
- * heapam_tuple_delete() on figuring out that tuple was concurrently updated.
437
- */
438
- static TM_Result
439
- heapam_tuple_lock_internal (Relation relation , ItemPointer tid ,
440
- Snapshot snapshot , TupleTableSlot * slot ,
441
- CommandId cid , LockTupleMode mode ,
442
- LockWaitPolicy wait_policy , uint8 flags ,
443
- TM_FailureData * tmfd , bool updated )
444
363
{
445
364
BufferHeapTupleTableSlot * bslot = (BufferHeapTupleTableSlot * ) slot ;
446
365
TM_Result result ;
447
- Buffer buffer = InvalidBuffer ;
366
+ Buffer buffer ;
448
367
HeapTuple tuple = & bslot -> base .tupdata ;
449
368
bool follow_updates ;
450
369
@@ -455,26 +374,16 @@ heapam_tuple_lock_internal(Relation relation, ItemPointer tid,
455
374
456
375
tuple_lock_retry :
457
376
tuple -> t_self = * tid ;
458
- if (!updated )
459
- result = heap_lock_tuple (relation , tuple , cid , mode , wait_policy ,
460
- follow_updates , & buffer , tmfd );
461
- else
462
- result = TM_Updated ;
377
+ result = heap_lock_tuple (relation , tuple , cid , mode , wait_policy ,
378
+ follow_updates , & buffer , tmfd );
463
379
464
380
if (result == TM_Updated &&
465
381
(flags & TUPLE_LOCK_FLAG_FIND_LAST_VERSION ))
466
382
{
467
- if (!updated )
468
- {
469
- /* Should not encounter speculative tuple on recheck */
470
- Assert (!HeapTupleHeaderIsSpeculative (tuple -> t_data ));
383
+ /* Should not encounter speculative tuple on recheck */
384
+ Assert (!HeapTupleHeaderIsSpeculative (tuple -> t_data ));
471
385
472
- ReleaseBuffer (buffer );
473
- }
474
- else
475
- {
476
- updated = false;
477
- }
386
+ ReleaseBuffer (buffer );
478
387
479
388
if (!ItemPointerEquals (& tmfd -> ctid , & tuple -> t_self ))
480
389
{
0 commit comments