Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Paquier2019-08-13 04:53:41 +0000
committerMichael Paquier2019-08-13 04:53:41 +0000
commit66bde49d96a9ddacc49dcbdf1b47b5bd6e31ead5 (patch)
tree638a0aacba92dab819aa9add708297202211c4aa /src/backend
parent2d7d67cc74d0f59e76464bd5009bc74f1591018e (diff)
Fix inconsistencies and typos in the tree, take 10
This addresses some issues with unnecessary code comments, fixes various typos in docs and comments, and removes some orphaned structures and definitions. Author: Alexander Lakhin Discussion: https://postgr.es/m/9aabc775-5494-b372-8bcb-4dfc0bd37c68@gmail.com
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/brin/brin_revmap.c7
-rw-r--r--src/backend/access/gist/gist.c4
-rw-r--r--src/backend/access/hash/hashpage.c2
-rw-r--r--src/backend/access/heap/heapam.c2
-rw-r--r--src/backend/access/nbtree/nbtutils.c2
-rw-r--r--src/backend/access/transam/slru.c2
-rw-r--r--src/backend/access/transam/xact.c2
-rw-r--r--src/backend/access/transam/xlogreader.c2
-rw-r--r--src/backend/catalog/catalog.c7
-rw-r--r--src/backend/catalog/pg_constraint.c2
-rw-r--r--src/backend/commands/cluster.c2
-rw-r--r--src/backend/commands/sequence.c2
-rw-r--r--src/backend/executor/spi.c4
-rw-r--r--src/backend/libpq/auth.c2
-rw-r--r--src/backend/libpq/pqcomm.c8
-rw-r--r--src/backend/optimizer/path/costsize.c2
-rw-r--r--src/backend/partitioning/partbounds.c2
-rw-r--r--src/backend/partitioning/partprune.c2
-rw-r--r--src/backend/port/posix_sema.c2
-rw-r--r--src/backend/port/win32/signal.c2
-rw-r--r--src/backend/postmaster/checkpointer.c2
-rw-r--r--src/backend/replication/basebackup.c2
-rw-r--r--src/backend/replication/logical/snapbuild.c2
-rw-r--r--src/backend/replication/logical/worker.c2
-rw-r--r--src/backend/replication/slot.c4
-rw-r--r--src/backend/storage/file/fd.c4
-rw-r--r--src/backend/storage/ipc/procarray.c8
-rw-r--r--src/backend/storage/lmgr/lwlock.c4
-rw-r--r--src/backend/storage/lmgr/predicate.c4
-rw-r--r--src/backend/utils/adt/pg_locale.c2
-rw-r--r--src/backend/utils/adt/rangetypes_typanalyze.c2
-rw-r--r--src/backend/utils/adt/txid.c2
-rw-r--r--src/backend/utils/adt/varlena.c2
-rw-r--r--src/backend/utils/adt/xml.c2
-rwxr-xr-xsrc/backend/utils/misc/check_guc2
-rw-r--r--src/backend/utils/mmgr/slab.c2
-rw-r--r--src/backend/utils/sort/tuplesort.c2
37 files changed, 48 insertions, 60 deletions
diff --git a/src/backend/access/brin/brin_revmap.c b/src/backend/access/brin/brin_revmap.c
index e2bfbf8a680..647350c1018 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -395,9 +395,10 @@ brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
*/
/*
- * Because of SUE lock, this function shouldn't run concurrently with
- * summarization. Placeholder tuples can only exist as leftovers from
- * crashed summarization, so if we detect any, we complain but proceed.
+ * Because of ShareUpdateExclusive lock, this function shouldn't run
+ * concurrently with summarization. Placeholder tuples can only exist as
+ * leftovers from crashed summarization, so if we detect any, we complain
+ * but proceed.
*/
if (BrinTupleIsPlaceholder(tup))
ereport(WARNING,
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index e9ca4b82527..0cc87911d6b 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -37,7 +37,7 @@ static bool gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
Buffer leftchild, Buffer rightchild,
bool unlockbuf, bool unlockleftchild);
static void gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
- GISTSTATE *giststate, List *splitinfo, bool releasebuf);
+ GISTSTATE *giststate, List *splitinfo, bool unlockbuf);
static void gistprunepage(Relation rel, Page page, Buffer buffer,
Relation heapRel);
@@ -1047,7 +1047,7 @@ gistFindCorrectParent(Relation r, GISTInsertStack *child)
{
/*
* End of chain and still didn't find parent. It's a very-very
- * rare situation when root splited.
+ * rare situation when root splitted.
*/
break;
}
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index defdc9b4085..838ee68c867 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -1510,7 +1510,7 @@ _hash_getcachedmetap(Relation rel, Buffer *metabuf, bool force_refresh)
* It's important that we don't set rd_amcache to an invalid value.
* Either MemoryContextAlloc or _hash_getbuf could fail, so don't
* install a pointer to the newly-allocated storage in the actual
- * relcache entry until both have succeeeded.
+ * relcache entry until both have succeeded.
*/
if (rel->rd_amcache == NULL)
cache = MemoryContextAlloc(rel->rd_indexcxt,
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 6b42bdc77b2..718f07f4f65 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -3029,7 +3029,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
* the value ends up being the same, this test will fail and we will use
* the stronger lock. This is acceptable; the important case to optimize
* is updates that don't manipulate key columns, not those that
- * serendipitiously arrive at the same key values.
+ * serendipitously arrive at the same key values.
*/
if (!bms_overlap(modified_attrs, key_attrs))
{
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 93fab264ae4..9b172c1a191 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -664,7 +664,7 @@ _bt_restore_array_keys(IndexScanDesc scan)
* scan->numberOfKeys is the number of input keys, so->numberOfKeys gets
* the number of output keys (possibly less, never greater).
*
- * The output keys are marked with additional sk_flag bits beyond the
+ * The output keys are marked with additional sk_flags bits beyond the
* system-standard bits supplied by the caller. The DESC and NULLS_FIRST
* indoption bits for the relevant index attribute are copied into the flags.
* Also, for a DESC column, we commute (flip) all the sk_strategy numbers
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 13aa8e1b95a..0fbcb4e6fec 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -1364,7 +1364,7 @@ SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
}
/*
- * Scan the SimpleLRU directory and apply a callback to each file found in it.
+ * Scan the SimpleLru directory and apply a callback to each file found in it.
*
* If the callback returns true, the scan is stopped. The last return value
* from the callback is returned.
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 1bbaeeebf4d..52e96433059 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1232,7 +1232,7 @@ RecordTransactionCommit(void)
if (!markXidCommitted)
{
/*
- * We expect that every smgrscheduleunlink is followed by a catalog
+ * We expect that every RelationDropStorage is followed by a catalog
* update, and hence XID assignment, so we shouldn't get here with any
* pending deletes. Use a real test not just an Assert to check this,
* since it's a bit fragile.
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index 0d6e968ee62..c6faf48d24f 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -702,7 +702,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
* We assume all of the record (that is, xl_tot_len bytes) has been read
* into memory at *record. Also, ValidXLogRecordHeader() has accepted the
* record's header, which means in particular that xl_tot_len is at least
- * SizeOfXlogRecord.
+ * SizeOfXLogRecord.
*/
static bool
ValidXLogRecord(XLogReaderState *state, XLogRecord *record, XLogRecPtr recptr)
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index a065419cdb2..1af31c2b9f9 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -317,13 +317,6 @@ IsSharedRelation(Oid relationId)
* consecutive existing OIDs. This is a mostly reasonable assumption for
* system catalogs.
*
- * This is exported separately because there are cases where we want to use
- * an index that will not be recognized by RelationGetOidIndex: TOAST tables
- * have indexes that are usable, but have multiple columns and are on
- * ordinary columns rather than a true OID column. This code will work
- * anyway, so long as the OID is the index's first column. The caller must
- * pass in the actual heap attnum of the OID column, however.
- *
* Caller must have a suitable lock on the relation.
*/
Oid
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index b6145593a38..56568b0105f 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -580,7 +580,7 @@ RemoveConstraintById(Oid conId)
rel = table_open(con->conrelid, AccessExclusiveLock);
/*
- * We need to update the relcheck count if it is a check constraint
+ * We need to update the relchecks count if it is a check constraint
* being dropped. This update will force backends to rebuild relcache
* entries when we commit.
*/
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index cedb4ee844d..28985a07ec1 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -1522,7 +1522,7 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap,
/*
* Get a list of tables that the current user owns and
- * have indisclustered set. Return the list in a List * of rvsToCluster
+ * have indisclustered set. Return the list in a List * of RelToCluster
* with the tableOid and the indexOid on which the table is already
* clustered.
*/
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 0960b339cab..a13322b6938 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -1115,7 +1115,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
/*
* Initialize the new hash table entry if it did not exist already.
*
- * NOTE: seqtable entries are stored for the life of a backend (unless
+ * NOTE: seqhashtab entries are stored for the life of a backend (unless
* explicitly discarded with DISCARD). If the sequence itself is deleted
* then the entry becomes wasted memory, but it's small enough that this
* should not matter.
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index b8eb83b2e0a..2c0ae395ba6 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -2603,7 +2603,7 @@ _SPI_cursor_operation(Portal portal, FetchDirection direction, long count,
/*
* Think not to combine this store with the preceding function call. If
- * the portal contains calls to functions that use SPI, then SPI_stack is
+ * the portal contains calls to functions that use SPI, then _SPI_stack is
* likely to move around while the portal runs. When control returns,
* _SPI_current will point to the correct stack entry... but the pointer
* may be different than it was beforehand. So we must be sure to re-fetch
@@ -2733,7 +2733,7 @@ _SPI_make_plan_non_temp(SPIPlanPtr plan)
ALLOCSET_SMALL_SIZES);
oldcxt = MemoryContextSwitchTo(plancxt);
- /* Copy the SPI_plan struct and subsidiary data into the new context */
+ /* Copy the _SPI_plan struct and subsidiary data into the new context */
newplan = (SPIPlanPtr) palloc0(sizeof(_SPI_plan));
newplan->magic = _SPI_PLAN_MAGIC;
newplan->plancxt = plancxt;
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 5fb1f1b7d7a..0e0a6d87528 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -1797,7 +1797,7 @@ interpret_ident_response(const char *ident_response,
/*
* Talk to the ident server on "remote_addr" and find out who
* owns the tcp connection to "local_addr"
- * It the username successfully retrieved, check the usermap.
+ * If the username is successfully retrieved, check the usermap.
*
* XXX: Using WaitLatchOrSocket() and doing a CHECK_FOR_INTERRUPTS() if the
* latch was set would improve the responsiveness to timeouts/cancellations.
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index d8d863dda20..cd517e8bb41 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -485,10 +485,10 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
* error on TCP ports.
*
* On win32, however, this behavior only happens if the
- * SO_EXLUSIVEADDRUSE is set. With SO_REUSEADDR, win32 allows multiple
- * servers to listen on the same address, resulting in unpredictable
- * behavior. With no flags at all, win32 behaves as Unix with
- * SO_REUSEADDR.
+ * SO_EXCLUSIVEADDRUSE is set. With SO_REUSEADDR, win32 allows
+ * multiple servers to listen on the same address, resulting in
+ * unpredictable behavior. With no flags at all, win32 behaves as Unix
+ * with SO_REUSEADDR.
*/
if (!IS_AF_UNIX(addr->ai_family))
{
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index bc6bc999573..c5f65934859 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1811,7 +1811,7 @@ append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
return 0;
/*
- * Array length is number of workers or number of relevants paths,
+ * Array length is number of workers or number of relevant paths,
* whichever is less.
*/
arrlen = Min(parallel_workers, numpaths);
diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c
index 5a29b9673b8..46d03f3b9b3 100644
--- a/src/backend/partitioning/partbounds.c
+++ b/src/backend/partitioning/partbounds.c
@@ -1514,7 +1514,7 @@ partition_rbound_cmp(int partnatts, FmgrInfo *partsupfunc,
/*
* partition_rbound_datum_cmp
*
- * Return whether range bound (specified in rb_datums, rb_kind, and rb_lower)
+ * Return whether range bound (specified in rb_datums and rb_kind)
* is <, =, or > partition key of tuple (tuple_datums)
*
* n_tuple_datums, partsupfunc and partcollation give number of attributes in
diff --git a/src/backend/partitioning/partprune.c b/src/backend/partitioning/partprune.c
index a1bd4efd0b0..735311eac45 100644
--- a/src/backend/partitioning/partprune.c
+++ b/src/backend/partitioning/partprune.c
@@ -2127,7 +2127,7 @@ match_clause_to_partition_key(GeneratePruningStepsContext *context,
/*
* Now generate a list of clauses, one for each array element, of the
- * form saop_leftop saop_op elem_expr
+ * form leftop saop_op elem_expr
*/
elem_clauses = NIL;
foreach(lc1, elem_exprs)
diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c
index 5e77cb0c12e..3370adf35e2 100644
--- a/src/backend/port/posix_sema.c
+++ b/src/backend/port/posix_sema.c
@@ -189,7 +189,7 @@ PGSemaphoreShmemSize(int maxSemas)
* maxSemas parameter is just used to size the arrays. For unnamed
* semaphores, there is an array of PGSemaphoreData structs in shared memory.
* For named semaphores, we keep a postmaster-local array of sem_t pointers,
- * which we use for releasing the semphores when done.
+ * which we use for releasing the semaphores when done.
* (This design minimizes the dependency of postmaster shutdown on the
* contents of shared memory, which a failed backend might have clobbered.
* We can't do much about the possibility of sem_destroy() crashing, but
diff --git a/src/backend/port/win32/signal.c b/src/backend/port/win32/signal.c
index 7d8961f4a44..9b5e544febb 100644
--- a/src/backend/port/win32/signal.c
+++ b/src/backend/port/win32/signal.c
@@ -102,7 +102,7 @@ pgwin32_signal_initialize(void)
/*
* Dispatch all signals currently queued and not blocked
* Blocked signals are ignored, and will be fired at the time of
- * the sigsetmask() call.
+ * the pqsigsetmask() call.
*/
void
pgwin32_dispatch_queued_signals(void)
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 11bbe2c397f..61544f65ada 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -750,7 +750,7 @@ IsCheckpointOnSchedule(double progress)
* We compare the current WAL insert location against the location
* computed before calling CreateCheckPoint. The code in XLogInsert that
* actually triggers a checkpoint when CheckPointSegments is exceeded
- * compares against RedoRecptr, so this is not completely accurate.
+ * compares against RedoRecPtr, so this is not completely accurate.
* However, it's good enough for our purposes, we're only calculating an
* estimate anyway.
*
diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c
index d5f9b617c84..c91f66dcbeb 100644
--- a/src/backend/replication/basebackup.c
+++ b/src/backend/replication/basebackup.c
@@ -792,7 +792,7 @@ SendBackupHeader(List *tablespaces)
pq_sendint32(&buf, 0); /* typmod */
pq_sendint16(&buf, 0); /* format code */
- /* Second field - spcpath */
+ /* Second field - spclocation */
pq_sendstring(&buf, "spclocation");
pq_sendint32(&buf, 0);
pq_sendint16(&buf, 0);
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index dc64b1e0c2f..0bd1d0f9545 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -269,7 +269,7 @@ static void SnapBuildSnapIncRefcount(Snapshot snap);
static void SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn);
-/* xlog reading helper functions for SnapBuildProcessRecord */
+/* xlog reading helper functions for SnapBuildProcessRunningXacts */
static bool SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *running);
static void SnapBuildWaitSnapshot(xl_running_xacts *running, TransactionId cutoff);
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 43edfef0895..11e6331f494 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -1658,7 +1658,7 @@ ApplyWorkerMain(Datum main_arg)
{
char *syncslotname;
- /* This is table synchroniation worker, call initial sync. */
+ /* This is table synchronization worker, call initial sync. */
syncslotname = LogicalRepSyncTableStart(&origin_startpos);
/* The slot name needs to be allocated in permanent memory context. */
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index 62342a69cbf..b1bcd93345d 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -108,7 +108,7 @@ static void CreateSlotOnDisk(ReplicationSlot *slot);
static void SaveSlotToPath(ReplicationSlot *slot, const char *path, int elevel);
/*
- * Report shared-memory space needed by ReplicationSlotShmemInit.
+ * Report shared-memory space needed by ReplicationSlotsShmemInit.
*/
Size
ReplicationSlotsShmemSize(void)
@@ -298,7 +298,7 @@ ReplicationSlotCreate(const char *name, bool db_specific,
* We need to briefly prevent any other backend from iterating over the
* slots while we flip the in_use flag. We also need to set the active
* flag while holding the ControlLock as otherwise a concurrent
- * SlotAcquire() could acquire the slot as well.
+ * ReplicationSlotAcquire() could acquire the slot as well.
*/
LWLockAcquire(ReplicationSlotControlLock, LW_EXCLUSIVE);
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 60f39031fb5..a76112d6cde 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -1971,10 +1971,6 @@ retry:
{
/*
* Maintain fileSize and temporary_files_size if it's a temp file.
- *
- * If seekPos is -1 (unknown), this will do nothing; but we could only
- * get here in that state if we're not enforcing temporary_files_size,
- * so we don't care.
*/
if (vfdP->fdstate & FD_TEMP_FILE_LIMIT)
{
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 469aac38c40..8abcfdf841f 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -734,8 +734,6 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
Assert(standbyState == STANDBY_INITIALIZED);
/*
- * OK, we need to initialise from the RunningTransactionsData record.
- *
* NB: this can be reached at least twice, so make sure new code can deal
* with that.
*/
@@ -750,11 +748,11 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
* sort them first.
*
* Some of the new xids are top-level xids and some are subtransactions.
- * We don't call SubtransSetParent because it doesn't matter yet. If we
+ * We don't call SubTransSetParent because it doesn't matter yet. If we
* aren't overflowed then all xids will fit in snapshot and so we don't
* need subtrans. If we later overflow, an xid assignment record will add
- * xids to subtrans. If RunningXacts is overflowed then we don't have
- * enough information to correctly update subtrans anyway.
+ * xids to subtrans. If RunningTransactionsData is overflowed then we
+ * don't have enough information to correctly update subtrans anyway.
*/
/*
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index c77d47c01c6..fb0bf442640 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -1075,8 +1075,8 @@ LWLockDequeueSelf(LWLock *lock)
*/
/*
- * Reset releaseOk if somebody woke us before we removed ourselves -
- * they'll have set it to false.
+ * Reset RELEASE_OK flag if somebody woke us before we removed
+ * ourselves - they'll have set it to false.
*/
pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_RELEASE_OK);
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 85a629f4fce..78fb90fb1bd 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -849,7 +849,7 @@ OldSerXidInit(void)
/*
* Record a committed read write serializable xid and the minimum
* commitSeqNo of any transactions to which this xid had a rw-conflict out.
- * An invalid seqNo means that there were no conflicts out from xid.
+ * An invalid commitSeqNo means that there were no conflicts out from xid.
*/
static void
OldSerXidAdd(TransactionId xid, SerCommitSeqNo minConflictCommitSeqNo)
@@ -1685,7 +1685,7 @@ SetSerializableTransactionSnapshot(Snapshot snapshot,
/*
* Guts of GetSerializableTransactionSnapshot
*
- * If sourcexid is valid, this is actually an import operation and we should
+ * If sourcevxid is valid, this is actually an import operation and we should
* skip calling GetSnapshotData, because the snapshot contents are already
* loaded up. HOWEVER: to avoid race conditions, we must check that the
* source xact is still running after we acquire SerializableXactHashLock.
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 2376bda497b..15fda7f1225 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -79,7 +79,7 @@
#undef StrNCpy
#include <shlwapi.h>
#ifdef StrNCpy
-#undef STrNCpy
+#undef StrNCpy
#endif
#endif
diff --git a/src/backend/utils/adt/rangetypes_typanalyze.c b/src/backend/utils/adt/rangetypes_typanalyze.c
index 631e4210626..d01d3032cca 100644
--- a/src/backend/utils/adt/rangetypes_typanalyze.c
+++ b/src/backend/utils/adt/rangetypes_typanalyze.c
@@ -1,6 +1,6 @@
/*-------------------------------------------------------------------------
*
- * ragetypes_typanalyze.c
+ * rangetypes_typanalyze.c
* Functions for gathering statistics from range columns
*
* For a range type column, histograms of lower and upper bounds, and
diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c
index 4483db573f3..90b2c9b6948 100644
--- a/src/backend/utils/adt/txid.c
+++ b/src/backend/utils/adt/txid.c
@@ -705,7 +705,7 @@ txid_snapshot_xip(PG_FUNCTION_ARGS)
TxidSnapshot *snap;
txid value;
- /* on first call initialize snap_state and get copy of snapshot */
+ /* on first call initialize fctx and get copy of snapshot */
if (SRF_IS_FIRSTCALL())
{
TxidSnapshot *arg = (TxidSnapshot *) PG_GETARG_VARLENA_P(0);
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 0864838867f..fa08b55eb62 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -2052,7 +2052,7 @@ varstr_sortsupport(SortSupport ssup, Oid typid, Oid collid)
/*
* If we're using abbreviated keys, or if we're using a locale-aware
- * comparison, we need to initialize a StringSortSupport object. Both
+ * comparison, we need to initialize a VarStringSortSupport object. Both
* cases will make use of the temporary buffers we initialize here for
* scratch space (and to detect requirement for BpChar semantics from
* caller), and the abbreviation case requires additional state.
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 5e629d29ea8..0280c2625c6 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -3987,7 +3987,7 @@ xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
/*
* Common code for xpath() and xmlexists()
*
- * Evaluate XPath expression and return number of nodes in res_items
+ * Evaluate XPath expression and return number of nodes in res_nitems
* and array of XML values in astate. Either of those pointers can be
* NULL if the corresponding result isn't wanted.
*
diff --git a/src/backend/utils/misc/check_guc b/src/backend/utils/misc/check_guc
index d228bbed685..416a0875b6c 100755
--- a/src/backend/utils/misc/check_guc
+++ b/src/backend/utils/misc/check_guc
@@ -18,7 +18,7 @@
## can be ignored
INTENTIONALLY_NOT_INCLUDED="debug_deadlocks \
is_superuser lc_collate lc_ctype lc_messages lc_monetary lc_numeric lc_time \
-pre_auth_delay role seed server_encoding server_version server_version_int \
+pre_auth_delay role seed server_encoding server_version server_version_num \
session_authorization trace_lock_oidmin trace_lock_table trace_locks trace_lwlocks \
trace_notify trace_userlocks transaction_isolation transaction_read_only \
zero_damaged_pages"
diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c
index bd8d2009829..700a91a2a37 100644
--- a/src/backend/utils/mmgr/slab.c
+++ b/src/backend/utils/mmgr/slab.c
@@ -182,7 +182,7 @@ static const MemoryContextMethods SlabMethods = {
* chunkSize: allocation chunk size
*
* The chunkSize may not exceed:
- * MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - SLAB_CHUNKHDRSZ
+ * MAXALIGN_DOWN(SIZE_MAX) - MAXALIGN(sizeof(SlabBlock)) - sizeof(SlabChunk)
*/
MemoryContext
SlabContextCreate(MemoryContext parent,
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index bda802b447f..d2bd2aee9f0 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -1150,7 +1150,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
* to compare. In a tuple sort, we could support that, because we can
* always extract the original datum from the tuple as needed. Here, we
* can't, because a datum sort only stores a single copy of the datum; the
- * "tuple" field of each sortTuple is NULL.
+ * "tuple" field of each SortTuple is NULL.
*/
state->sortKeys->abbreviate = !typbyval;