diff options
author | Alvaro Herrera | 2019-06-17 02:24:21 +0000 |
---|---|---|
committer | Alvaro Herrera | 2019-06-17 02:24:21 +0000 |
commit | 9d20b0ec8f2af43041b1a65e5fcd91acc47e9ace (patch) | |
tree | 19713e1f3369ac4f1b4d7f0097805fba404950c3 /src/backend | |
parent | 16c4e76f1b0a0145572551b7e919b967e406a0b1 (diff) |
Revert "Avoid spurious deadlocks when upgrading a tuple lock"
This reverts commits 3da73d6839dc and de87a084c0a5.
This code has some tricky corner cases that I'm not sure are correct and
not properly tested anyway, so I'm reverting the whole thing for next
week's releases (reintroducing the deadlock bug that we set to fix).
I'll try again afterwards.
Discussion: https://postgr.es/m/E1hbXKQ-0003g1-0C@gemulon.postgresql.org
Diffstat (limited to 'src/backend')
-rw-r--r-- | src/backend/access/heap/README.tuplock | 10 | ||||
-rw-r--r-- | src/backend/access/heap/heapam.c | 83 |
2 files changed, 21 insertions, 72 deletions
diff --git a/src/backend/access/heap/README.tuplock b/src/backend/access/heap/README.tuplock index d03ddf6cdcc..b2f3a4ce909 100644 --- a/src/backend/access/heap/README.tuplock +++ b/src/backend/access/heap/README.tuplock @@ -36,16 +36,6 @@ do LockTuple as well, if there is any conflict, to ensure that they don't starve out waiting exclusive-lockers. However, if there is not any active conflict for a tuple, we don't incur any extra overhead. -We make an exception to the above rule for those lockers that already hold -some lock on a tuple and attempt to acquire a stronger one on it. In that -case, we skip the LockTuple() call even when there are conflicts, provided -that the target tuple is being locked, updated or deleted by multiple sessions -concurrently. Failing to skip the lock would risk a deadlock, e.g., between a -session that was first to record its weaker lock in the tuple header and would -be waiting on the LockTuple() call to upgrade to the stronger lock level, and -another session that has already done LockTuple() and is waiting for the first -session transaction to release its tuple header-level lock. - We provide four levels of tuple locking strength: SELECT FOR UPDATE obtains an exclusive lock which prevents any kind of modification of the tuple. This is the lock level that is implicitly taken by DELETE operations, and also by diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index fa86efed2e8..8ac0f8a5134 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -95,7 +95,7 @@ static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask); static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, - LockTupleMode lockmode, bool *current_is_member); + LockTupleMode lockmode); static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining); @@ -2547,20 +2547,15 @@ l1: */ if (infomask & HEAP_XMAX_IS_MULTI) { - bool current_is_member = false; - + /* wait for multixact */ if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask, - LockTupleExclusive, ¤t_is_member)) + LockTupleExclusive)) { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - /* - * Acquire the lock, if necessary (but skip it when we're - * requesting a lock and already have one; avoids deadlock). - */ - if (!current_is_member) - heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive, - LockWaitBlock, &have_tuple_lock); + /* acquire tuple lock, if necessary */ + heap_acquire_tuplock(relation, &(tp.t_self), LockTupleExclusive, + LockWaitBlock, &have_tuple_lock); /* wait for multixact */ MultiXactIdWait((MultiXactId) xwait, MultiXactStatusUpdate, infomask, @@ -3131,20 +3126,15 @@ l2: { TransactionId update_xact; int remain; - bool current_is_member = false; if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask, - *lockmode, ¤t_is_member)) + *lockmode)) { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); - /* - * Acquire the lock, if necessary (but skip it when we're - * requesting a lock and already have one; avoids deadlock). - */ - if (!current_is_member) - heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode, - LockWaitBlock, &have_tuple_lock); + /* acquire tuple lock, if necessary */ + heap_acquire_tuplock(relation, &(oldtup.t_self), *lockmode, + LockWaitBlock, &have_tuple_lock); /* wait for multixact */ MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask, @@ -4038,7 +4028,6 @@ l3: uint16 infomask; uint16 infomask2; bool require_sleep; - bool skip_tuple_lock = false; ItemPointerData t_ctid; /* must copy state data before unlocking buffer */ @@ -4092,21 +4081,6 @@ l3: result = TM_Ok; goto out_unlocked; } - else - { - /* - * Disable acquisition of the heavyweight tuple lock. - * Otherwise, when promoting a weaker lock, we might - * deadlock with another locker that has acquired the - * heavyweight tuple lock and is waiting for our - * transaction to finish. - * - * Note that in this case we still need to wait for - * the multixact if required, to avoid acquiring - * conflicting locks. - */ - skip_tuple_lock = true; - } } if (members) @@ -4261,7 +4235,7 @@ l3: if (infomask & HEAP_XMAX_IS_MULTI) { if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask, - mode, NULL)) + mode)) { /* * No conflict, but if the xmax changed under us in the @@ -4338,15 +4312,13 @@ l3: /* * Acquire tuple lock to establish our priority for the tuple, or * die trying. LockTuple will release us when we are next-in-line - * for the tuple. We must do this even if we are share-locking, - * but not if we already have a weaker lock on the tuple. + * for the tuple. We must do this even if we are share-locking. * * If we are forced to "start over" below, we keep the tuple lock; * this arranges that we stay at the head of the line while * rechecking tuple state. */ - if (!skip_tuple_lock && - !heap_acquire_tuplock(relation, tid, mode, wait_policy, + if (!heap_acquire_tuplock(relation, tid, mode, wait_policy, &have_tuple_lock)) { /* @@ -6544,13 +6516,10 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple) * tuple lock of the given strength? * * The passed infomask pairs up with the given multixact in the tuple header. - * - * If current_is_member is not NULL, it is set to 'true' if the current - * transaction is a member of the given multixact. */ static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, - LockTupleMode lockmode, bool *current_is_member) + LockTupleMode lockmode) { int nmembers; MultiXactMember *members; @@ -6571,26 +6540,17 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, TransactionId memxid; LOCKMODE memlockmode; - if (result && (current_is_member == NULL || *current_is_member)) - break; - memlockmode = LOCKMODE_from_mxstatus(members[i].status); - /* ignore members from current xact (but track their presence) */ - memxid = members[i].xid; - if (TransactionIdIsCurrentTransactionId(memxid)) - { - if (current_is_member != NULL) - *current_is_member = true; - continue; - } - else if (result) - continue; - /* ignore members that don't conflict with the lock we want */ if (!DoLockModesConflict(memlockmode, wanted)) continue; + /* ignore members from current xact */ + memxid = members[i].xid; + if (TransactionIdIsCurrentTransactionId(memxid)) + continue; + if (ISUPDATE_from_mxstatus(members[i].status)) { /* ignore aborted updaters */ @@ -6607,11 +6567,10 @@ DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, /* * Whatever remains are either live lockers that conflict with our * wanted lock, and updaters that are not aborted. Those conflict - * with what we want. Set up to return true, but keep going to - * look for the current transaction among the multixact members, - * if needed. + * with what we want, so return true. */ result = true; + break; } pfree(members); } |