Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/nbtree/nbtree.c2
-rw-r--r--src/backend/access/nbtree/nbtutils.c47
-rw-r--r--src/backend/commands/copyfromparse.c2
-rw-r--r--src/backend/libpq/be-secure-gssapi.c6
-rw-r--r--src/backend/utils/adt/inet_net_pton.c3
5 files changed, 33 insertions, 27 deletions
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 03a1d7b027a..fdff960c130 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -417,6 +417,8 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
* way, so we might as well avoid wasting cycles on acquiring page LSNs.
*
* See nbtree/README section on making concurrent TID recycling safe.
+ *
+ * Note: so->dropPin should never change across rescans.
*/
so->dropPin = (!scan->xs_want_itup &&
IsMVCCSnapshot(scan->xs_snapshot) &&
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 29f0dca1b08..86293cba5ab 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -3323,24 +3323,26 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
* current page and killed tuples thereon (generally, this should only be
* called if so->numKilled > 0).
*
- * The caller does not have a lock on the page and may or may not have the
- * page pinned in a buffer. Note that read-lock is sufficient for setting
- * LP_DEAD status (which is only a hint).
+ * Caller should not have a lock on the so->currPos page, but must hold a
+ * buffer pin when !so->dropPin. When we return, it still won't be locked.
+ * It'll continue to hold whatever pins were held before calling here.
*
- * We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
- * If an item has moved off the current page due to a split, we'll fail to
- * find it and do nothing (this is not an error case --- we assume the item
- * will eventually get marked in a future indexscan).
- *
- * Note that if we hold a pin on the target page continuously from initially
- * reading the items until applying this function, VACUUM cannot have deleted
- * any items on the page, so the page's TIDs can't have been recycled by now.
- * There's no risk that we'll confuse a new index tuple that happens to use a
- * recycled TID with a now-removed tuple with the same TID (that used to be on
- * this same page). We can't rely on that during scans that drop pins eagerly
+ * We match items by heap TID before assuming they are the right ones to set
+ * LP_DEAD. If the scan is one that holds a buffer pin on the target page
+ * continuously from initially reading the items until applying this function
+ * (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
+ * page, so the page's TIDs can't have been recycled by now. There's no risk
+ * that we'll confuse a new index tuple that happens to use a recycled TID
+ * with a now-removed tuple with the same TID (that used to be on this same
+ * page). We can't rely on that during scans that drop buffer pins eagerly
* (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
* the page LSN having not changed since back when _bt_readpage saw the page.
+ * We totally give up on setting LP_DEAD bits when the page LSN changed.
+ *
+ * We give up much less often during !so->dropPin scans, but it still happens.
+ * We cope with cases where items have moved right due to insertions. If an
+ * item has moved off the current page due to a split, we'll fail to find it
+ * and just give up on it.
*/
void
_bt_killitems(IndexScanDesc scan)
@@ -3353,6 +3355,7 @@ _bt_killitems(IndexScanDesc scan)
OffsetNumber maxoff;
int numKilled = so->numKilled;
bool killedsomething = false;
+ Buffer buf;
Assert(numKilled > 0);
Assert(BTScanPosIsValid(so->currPos));
@@ -3369,11 +3372,11 @@ _bt_killitems(IndexScanDesc scan)
* concurrent VACUUMs from recycling any of the TIDs on the page.
*/
Assert(BTScanPosIsPinned(so->currPos));
- _bt_lockbuf(rel, so->currPos.buf, BT_READ);
+ buf = so->currPos.buf;
+ _bt_lockbuf(rel, buf, BT_READ);
}
else
{
- Buffer buf;
XLogRecPtr latestlsn;
Assert(!BTScanPosIsPinned(so->currPos));
@@ -3391,10 +3394,9 @@ _bt_killitems(IndexScanDesc scan)
}
/* Unmodified, hinting is safe */
- so->currPos.buf = buf;
}
- page = BufferGetPage(so->currPos.buf);
+ page = BufferGetPage(buf);
opaque = BTPageGetOpaque(page);
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
@@ -3511,10 +3513,13 @@ _bt_killitems(IndexScanDesc scan)
if (killedsomething)
{
opaque->btpo_flags |= BTP_HAS_GARBAGE;
- MarkBufferDirtyHint(so->currPos.buf, true);
+ MarkBufferDirtyHint(buf, true);
}
- _bt_unlockbuf(rel, so->currPos.buf);
+ if (!so->dropPin)
+ _bt_unlockbuf(rel, buf);
+ else
+ _bt_relbuf(rel, buf);
}
diff --git a/src/backend/commands/copyfromparse.c b/src/backend/commands/copyfromparse.c
index f5fc346e201..f52f2477df1 100644
--- a/src/backend/commands/copyfromparse.c
+++ b/src/backend/commands/copyfromparse.c
@@ -1538,7 +1538,7 @@ GetDecimalFromHex(char hex)
if (isdigit((unsigned char) hex))
return hex - '0';
else
- return tolower((unsigned char) hex) - 'a' + 10;
+ return pg_ascii_tolower((unsigned char) hex) - 'a' + 10;
}
/*
diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c
index 3534f0b8111..5d98c58ffa8 100644
--- a/src/backend/libpq/be-secure-gssapi.c
+++ b/src/backend/libpq/be-secure-gssapi.c
@@ -121,9 +121,9 @@ be_gssapi_write(Port *port, const void *ptr, size_t len)
* again, so if it offers a len less than that, something is wrong.
*
* Note: it may seem attractive to report partial write completion once
- * we've successfully sent any encrypted packets. However, that can cause
- * problems for callers; notably, pqPutMsgEnd's heuristic to send only
- * full 8K blocks interacts badly with such a hack. We won't save much,
+ * we've successfully sent any encrypted packets. However, doing that
+ * expands the state space of this processing and has been responsible for
+ * bugs in the past (cf. commit d053a879b). We won't save much,
* typically, by letting callers discard data early, so don't risk it.
*/
if (len < PqGSSSendConsumed)
diff --git a/src/backend/utils/adt/inet_net_pton.c b/src/backend/utils/adt/inet_net_pton.c
index ef2236d9f04..3b0db2a3799 100644
--- a/src/backend/utils/adt/inet_net_pton.c
+++ b/src/backend/utils/adt/inet_net_pton.c
@@ -115,8 +115,7 @@ inet_cidr_pton_ipv4(const char *src, u_char *dst, size_t size)
src++; /* skip x or X. */
while ((ch = *src++) != '\0' && isxdigit((unsigned char) ch))
{
- if (isupper((unsigned char) ch))
- ch = tolower((unsigned char) ch);
+ ch = pg_ascii_tolower((unsigned char) ch);
n = strchr(xdigits, ch) - xdigits;
assert(n >= 0 && n <= 15);
if (dirty == 0)