Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.shlib4
-rw-r--r--src/backend/access/nbtree/nbtree.c2
-rw-r--r--src/backend/access/nbtree/nbtutils.c47
-rw-r--r--src/backend/commands/copyfromparse.c2
-rw-r--r--src/backend/libpq/be-secure-gssapi.c6
-rw-r--r--src/backend/utils/adt/inet_net_pton.c3
-rw-r--r--src/bin/pg_dump/pg_backup_directory.c11
-rw-r--r--src/bin/psql/tab-complete.in.c5
-rw-r--r--src/interfaces/libpq/fe-misc.c28
-rw-r--r--src/interfaces/libpq/fe-secure-gssapi.c6
10 files changed, 76 insertions, 38 deletions
diff --git a/src/Makefile.shlib b/src/Makefile.shlib
index fa81f6ffdd6..3825af5b228 100644
--- a/src/Makefile.shlib
+++ b/src/Makefile.shlib
@@ -112,7 +112,7 @@ ifeq ($(PORTNAME), darwin)
ifneq ($(SO_MAJOR_VERSION), 0)
version_link = -compatibility_version $(SO_MAJOR_VERSION) -current_version $(SO_MAJOR_VERSION).$(SO_MINOR_VERSION)
endif
- LINK.shared = $(COMPILER) -dynamiclib -install_name '$(libdir)/lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)' $(version_link) $(exported_symbols_list)
+ LINK.shared = $(COMPILER) -dynamiclib -install_name '$(libdir)/lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)' $(version_link)
shlib = lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)
shlib_major = lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)
else
@@ -122,7 +122,7 @@ ifeq ($(PORTNAME), darwin)
BUILD.exports = $(AWK) '/^[^\#]/ {printf "_%s\n",$$1}' $< >$@
exports_file = $(SHLIB_EXPORTS:%.txt=%.list)
ifneq (,$(exports_file))
- exported_symbols_list = -exported_symbols_list $(exports_file)
+ LINK.shared += -exported_symbols_list $(exports_file)
endif
endif
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 03a1d7b027a..fdff960c130 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -417,6 +417,8 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
* way, so we might as well avoid wasting cycles on acquiring page LSNs.
*
* See nbtree/README section on making concurrent TID recycling safe.
+ *
+ * Note: so->dropPin should never change across rescans.
*/
so->dropPin = (!scan->xs_want_itup &&
IsMVCCSnapshot(scan->xs_snapshot) &&
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 29f0dca1b08..86293cba5ab 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -3323,24 +3323,26 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
* current page and killed tuples thereon (generally, this should only be
* called if so->numKilled > 0).
*
- * The caller does not have a lock on the page and may or may not have the
- * page pinned in a buffer. Note that read-lock is sufficient for setting
- * LP_DEAD status (which is only a hint).
+ * Caller should not have a lock on the so->currPos page, but must hold a
+ * buffer pin when !so->dropPin. When we return, it still won't be locked.
+ * It'll continue to hold whatever pins were held before calling here.
*
- * We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
- * If an item has moved off the current page due to a split, we'll fail to
- * find it and do nothing (this is not an error case --- we assume the item
- * will eventually get marked in a future indexscan).
- *
- * Note that if we hold a pin on the target page continuously from initially
- * reading the items until applying this function, VACUUM cannot have deleted
- * any items on the page, so the page's TIDs can't have been recycled by now.
- * There's no risk that we'll confuse a new index tuple that happens to use a
- * recycled TID with a now-removed tuple with the same TID (that used to be on
- * this same page). We can't rely on that during scans that drop pins eagerly
+ * We match items by heap TID before assuming they are the right ones to set
+ * LP_DEAD. If the scan is one that holds a buffer pin on the target page
+ * continuously from initially reading the items until applying this function
+ * (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
+ * page, so the page's TIDs can't have been recycled by now. There's no risk
+ * that we'll confuse a new index tuple that happens to use a recycled TID
+ * with a now-removed tuple with the same TID (that used to be on this same
+ * page). We can't rely on that during scans that drop buffer pins eagerly
* (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
* the page LSN having not changed since back when _bt_readpage saw the page.
+ * We totally give up on setting LP_DEAD bits when the page LSN changed.
+ *
+ * We give up much less often during !so->dropPin scans, but it still happens.
+ * We cope with cases where items have moved right due to insertions. If an
+ * item has moved off the current page due to a split, we'll fail to find it
+ * and just give up on it.
*/
void
_bt_killitems(IndexScanDesc scan)
@@ -3353,6 +3355,7 @@ _bt_killitems(IndexScanDesc scan)
OffsetNumber maxoff;
int numKilled = so->numKilled;
bool killedsomething = false;
+ Buffer buf;
Assert(numKilled > 0);
Assert(BTScanPosIsValid(so->currPos));
@@ -3369,11 +3372,11 @@ _bt_killitems(IndexScanDesc scan)
* concurrent VACUUMs from recycling any of the TIDs on the page.
*/
Assert(BTScanPosIsPinned(so->currPos));
- _bt_lockbuf(rel, so->currPos.buf, BT_READ);
+ buf = so->currPos.buf;
+ _bt_lockbuf(rel, buf, BT_READ);
}
else
{
- Buffer buf;
XLogRecPtr latestlsn;
Assert(!BTScanPosIsPinned(so->currPos));
@@ -3391,10 +3394,9 @@ _bt_killitems(IndexScanDesc scan)
}
/* Unmodified, hinting is safe */
- so->currPos.buf = buf;
}
- page = BufferGetPage(so->currPos.buf);
+ page = BufferGetPage(buf);
opaque = BTPageGetOpaque(page);
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
@@ -3511,10 +3513,13 @@ _bt_killitems(IndexScanDesc scan)
if (killedsomething)
{
opaque->btpo_flags |= BTP_HAS_GARBAGE;
- MarkBufferDirtyHint(so->currPos.buf, true);
+ MarkBufferDirtyHint(buf, true);
}
- _bt_unlockbuf(rel, so->currPos.buf);
+ if (!so->dropPin)
+ _bt_unlockbuf(rel, buf);
+ else
+ _bt_relbuf(rel, buf);
}
diff --git a/src/backend/commands/copyfromparse.c b/src/backend/commands/copyfromparse.c
index f5fc346e201..f52f2477df1 100644
--- a/src/backend/commands/copyfromparse.c
+++ b/src/backend/commands/copyfromparse.c
@@ -1538,7 +1538,7 @@ GetDecimalFromHex(char hex)
if (isdigit((unsigned char) hex))
return hex - '0';
else
- return tolower((unsigned char) hex) - 'a' + 10;
+ return pg_ascii_tolower((unsigned char) hex) - 'a' + 10;
}
/*
diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c
index 3534f0b8111..5d98c58ffa8 100644
--- a/src/backend/libpq/be-secure-gssapi.c
+++ b/src/backend/libpq/be-secure-gssapi.c
@@ -121,9 +121,9 @@ be_gssapi_write(Port *port, const void *ptr, size_t len)
* again, so if it offers a len less than that, something is wrong.
*
* Note: it may seem attractive to report partial write completion once
- * we've successfully sent any encrypted packets. However, that can cause
- * problems for callers; notably, pqPutMsgEnd's heuristic to send only
- * full 8K blocks interacts badly with such a hack. We won't save much,
+ * we've successfully sent any encrypted packets. However, doing that
+ * expands the state space of this processing and has been responsible for
+ * bugs in the past (cf. commit d053a879b). We won't save much,
* typically, by letting callers discard data early, so don't risk it.
*/
if (len < PqGSSSendConsumed)
diff --git a/src/backend/utils/adt/inet_net_pton.c b/src/backend/utils/adt/inet_net_pton.c
index ef2236d9f04..3b0db2a3799 100644
--- a/src/backend/utils/adt/inet_net_pton.c
+++ b/src/backend/utils/adt/inet_net_pton.c
@@ -115,8 +115,7 @@ inet_cidr_pton_ipv4(const char *src, u_char *dst, size_t size)
src++; /* skip x or X. */
while ((ch = *src++) != '\0' && isxdigit((unsigned char) ch))
{
- if (isupper((unsigned char) ch))
- ch = tolower((unsigned char) ch);
+ ch = pg_ascii_tolower((unsigned char) ch);
n = strchr(xdigits, ch) - xdigits;
assert(n >= 0 && n <= 15);
if (dirty == 0)
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 21b00792a8a..bc2a2fb4797 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -412,10 +412,15 @@ _LoadLOs(ArchiveHandle *AH, TocEntry *te)
/*
* Note: before archive v16, there was always only one BLOBS TOC entry,
- * now there can be multiple. We don't need to worry what version we are
- * reading though, because tctx->filename should be correct either way.
+ * now there can be multiple. Furthermore, although the actual filename
+ * was always "blobs.toc" before v16, the value of tctx->filename did not
+ * match that before commit 548e50976 fixed it. For simplicity we assume
+ * it must be "blobs.toc" in all archives before v16.
*/
- setFilePath(AH, tocfname, tctx->filename);
+ if (AH->version < K_VERS_1_16)
+ setFilePath(AH, tocfname, "blobs.toc");
+ else
+ setFilePath(AH, tocfname, tctx->filename);
CFH = ctx->LOsTocFH = InitDiscoverCompressFileHandle(tocfname, PG_BINARY_R);
diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c
index ec65ab79fec..620830feb9d 100644
--- a/src/bin/psql/tab-complete.in.c
+++ b/src/bin/psql/tab-complete.in.c
@@ -3664,9 +3664,10 @@ match_previous_words(int pattern_id,
TailMatches("CREATE", "TEMP|TEMPORARY|UNLOGGED", "TABLE", MatchAny, "(*)", "AS"))
COMPLETE_WITH("EXECUTE", "SELECT", "TABLE", "VALUES", "WITH");
/* Complete CREATE TABLE name (...) with supported options */
- else if (TailMatches("CREATE", "TABLE", MatchAny, "(*)") ||
- TailMatches("CREATE", "UNLOGGED", "TABLE", MatchAny, "(*)"))
+ else if (TailMatches("CREATE", "TABLE", MatchAny, "(*)"))
COMPLETE_WITH("AS", "INHERITS (", "PARTITION BY", "USING", "TABLESPACE", "WITH (");
+ else if (TailMatches("CREATE", "UNLOGGED", "TABLE", MatchAny, "(*)"))
+ COMPLETE_WITH("AS", "INHERITS (", "USING", "TABLESPACE", "WITH (");
else if (TailMatches("CREATE", "TEMP|TEMPORARY", "TABLE", MatchAny, "(*)"))
COMPLETE_WITH("AS", "INHERITS (", "ON COMMIT", "PARTITION BY", "USING",
"TABLESPACE", "WITH (");
diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c
index c14e3c95250..dca44fdc5d2 100644
--- a/src/interfaces/libpq/fe-misc.c
+++ b/src/interfaces/libpq/fe-misc.c
@@ -553,9 +553,35 @@ pqPutMsgEnd(PGconn *conn)
/* Make message eligible to send */
conn->outCount = conn->outMsgEnd;
+ /* If appropriate, try to push out some data */
if (conn->outCount >= 8192)
{
- int toSend = conn->outCount - (conn->outCount % 8192);
+ int toSend = conn->outCount;
+
+ /*
+ * On Unix-pipe connections, it seems profitable to prefer sending
+ * pipe-buffer-sized packets not randomly-sized ones, so retain the
+ * last partial-8K chunk in our buffer for now. On TCP connections,
+ * the advantage of that is far less clear. Moreover, it flat out
+ * isn't safe when using SSL or GSSAPI, because those code paths have
+ * API stipulations that if they fail to send all the data that was
+ * offered in the previous write attempt, we mustn't offer less data
+ * in this write attempt. The previous write attempt might've been
+ * pqFlush attempting to send everything in the buffer, so we mustn't
+ * offer less now. (Presently, we won't try to use SSL or GSSAPI on
+ * Unix connections, so those checks are just Asserts. They'll have
+ * to become part of the regular if-test if we ever change that.)
+ */
+ if (conn->raddr.addr.ss_family == AF_UNIX)
+ {
+#ifdef USE_SSL
+ Assert(!conn->ssl_in_use);
+#endif
+#ifdef ENABLE_GSS
+ Assert(!conn->gssenc);
+#endif
+ toSend -= toSend % 8192;
+ }
if (pqSendSome(conn, toSend) < 0)
return EOF;
diff --git a/src/interfaces/libpq/fe-secure-gssapi.c b/src/interfaces/libpq/fe-secure-gssapi.c
index 62d05f68496..bc9e1ce06fa 100644
--- a/src/interfaces/libpq/fe-secure-gssapi.c
+++ b/src/interfaces/libpq/fe-secure-gssapi.c
@@ -112,9 +112,9 @@ pg_GSS_write(PGconn *conn, const void *ptr, size_t len)
* again, so if it offers a len less than that, something is wrong.
*
* Note: it may seem attractive to report partial write completion once
- * we've successfully sent any encrypted packets. However, that can cause
- * problems for callers; notably, pqPutMsgEnd's heuristic to send only
- * full 8K blocks interacts badly with such a hack. We won't save much,
+ * we've successfully sent any encrypted packets. However, doing that
+ * expands the state space of this processing and has been responsible for
+ * bugs in the past (cf. commit d053a879b). We won't save much,
* typically, by letting callers discard data early, so don't risk it.
*/
if (len < PqGSSSendConsumed)