Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
Skip to content

Commit b0e5f02

Browse files
committed
Fix various typos and spelling mistakes in code comments
Author: Justin Pryzby Discussion: https://postgr.es/m/20220411020336.GB26620@telsasoft.com
1 parent bba3c35 commit b0e5f02

33 files changed

+46
-45
lines changed

contrib/ltree/ltree.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
* modified to look for -D compile flags in Makefiles, so here, in order to
2525
* get the historic behavior of LOWER_NODE not being defined on MSVC, we only
2626
* define it when not building in that environment. This is important as we
27-
* want to maintain the same LOWER_NODE behavior after a pg_update.
27+
* want to maintain the same LOWER_NODE behavior after a pg_upgrade.
2828
*/
2929
#ifndef _MSC_VER
3030
#define LOWER_NODE

src/backend/access/brin/brin_minmax_multi.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -310,7 +310,7 @@ AssertCheckRanges(Ranges *ranges, FmgrInfo *cmpFn, Oid colloid)
310310
*/
311311
AssertArrayOrder(cmpFn, colloid, ranges->values, 2 * ranges->nranges);
312312

313-
/* then the single-point ranges (with nvalues boundar values ) */
313+
/* then the single-point ranges (with nvalues boundary values ) */
314314
AssertArrayOrder(cmpFn, colloid, &ranges->values[2 * ranges->nranges],
315315
ranges->nsorted);
316316

src/backend/access/heap/heapam.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1470,7 +1470,7 @@ heap_getnextslot_tidrange(TableScanDesc sscan, ScanDirection direction,
14701470
* heap_set_tidrange will have used heap_setscanlimits to limit the
14711471
* range of pages we scan to only ones that can contain the TID range
14721472
* we're scanning for. Here we must filter out any tuples from these
1473-
* pages that are outwith that range.
1473+
* pages that are outside of that range.
14741474
*/
14751475
if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
14761476
{

src/backend/access/transam/xlogreader.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ XLogReleasePreviousRecord(XLogReaderState *state)
300300
/* Release the space. */
301301
if (unlikely(record->oversized))
302302
{
303-
/* It's not in the the decode buffer, so free it to release space. */
303+
/* It's not in the decode buffer, so free it to release space. */
304304
pfree(record);
305305
}
306306
else

src/backend/access/transam/xlogrecovery.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -2975,7 +2975,7 @@ ReadRecord(XLogPrefetcher *xlogprefetcher, int emode,
29752975
/*
29762976
* When not in standby mode we find that WAL ends in an incomplete
29772977
* record, keep track of that record. After recovery is done,
2978-
* we'll write a record to indicate downstream WAL readers that
2978+
* we'll write a record to indicate to downstream WAL readers that
29792979
* that portion is to be ignored.
29802980
*/
29812981
if (!StandbyMode &&

src/backend/commands/dbcommands.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -386,7 +386,7 @@ ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
386386
* needs to be copied from the source database to the destination database,
387387
* and if so, construct a CreateDBRelInfo for it.
388388
*
389-
* Visbility checks are handled by the caller, so our job here is just
389+
* Visibility checks are handled by the caller, so our job here is just
390390
* to assess the data stored in the tuple.
391391
*/
392392
CreateDBRelInfo *

src/backend/commands/vacuumparallel.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
* the memory space for storing dead items allocated in the DSM segment. We
1313
* launch parallel worker processes at the start of parallel index
1414
* bulk-deletion and index cleanup and once all indexes are processed, the
15-
* parallel worker processes exit. Each time we process indexes parallelly,
15+
* parallel worker processes exit. Each time we process indexes in parallel,
1616
* the parallel context is re-initialized so that the same DSM can be used for
1717
* multiple passes of index bulk-deletion and index cleanup.
1818
*

src/backend/executor/nodeMergeAppend.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
100100

101101
/*
102102
* When no run-time pruning is required and there's at least one
103-
* subplan, we can fill as_valid_subplans immediately, preventing
103+
* subplan, we can fill ms_valid_subplans immediately, preventing
104104
* later calls to ExecFindMatchingSubPlans.
105105
*/
106106
if (!prunestate->do_exec_prune && nplans > 0)

src/backend/optimizer/path/costsize.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -1976,8 +1976,8 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys,
19761976
* by calling estimate_num_groups_incremental(), which estimates the
19771977
* group size for "new" pathkeys.
19781978
*
1979-
* Note: estimate_num_groups_incremntal does not handle fake Vars, so use
1980-
* a default estimate otherwise.
1979+
* Note: estimate_num_groups_incremental does not handle fake Vars, so
1980+
* use a default estimate otherwise.
19811981
*/
19821982
if (!has_fake_var)
19831983
nGroups = estimate_num_groups_incremental(root, pathkeyExprs,
@@ -6471,7 +6471,7 @@ compute_bitmap_pages(PlannerInfo *root, RelOptInfo *baserel, Path *bitmapqual,
64716471
exact_pages = heap_pages - lossy_pages;
64726472

64736473
/*
6474-
* If there are lossy pages then recompute the number of tuples
6474+
* If there are lossy pages then recompute the number of tuples
64756475
* processed by the bitmap heap node. We assume here that the chance
64766476
* of a given tuple coming from an exact page is the same as the
64776477
* chance that a given page is exact. This might not be true, but

src/backend/optimizer/path/pathkeys.c

+5-5
Original file line numberDiff line numberDiff line change
@@ -2383,16 +2383,16 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys)
23832383
* Count the number of pathkeys that are useful for grouping (instead of
23842384
* explicit sort)
23852385
*
2386-
* Group pathkeys could be reordered to benefit from the odering. The ordering
2387-
* may not be "complete" and may require incremental sort, but that's fine. So
2388-
* we simply count prefix pathkeys with a matching group key, and stop once we
2389-
* find the first pathkey without a match.
2386+
* Group pathkeys could be reordered to benefit from the ordering. The
2387+
* ordering may not be "complete" and may require incremental sort, but that's
2388+
* fine. So we simply count prefix pathkeys with a matching group key, and
2389+
* stop once we find the first pathkey without a match.
23902390
*
23912391
* So e.g. with pathkeys (a,b,c) and group keys (a,b,e) this determines (a,b)
23922392
* pathkeys are useful for grouping, and we might do incremental sort to get
23932393
* path ordered by (a,b,e).
23942394
*
2395-
* This logic is necessary to retain paths with ordeding not matching grouping
2395+
* This logic is necessary to retain paths with ordering not matching grouping
23962396
* keys directly, without the reordering.
23972397
*
23982398
* Returns the length of pathkey prefix with matching group keys.

src/backend/parser/parse_expr.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -3507,7 +3507,7 @@ transformJsonOutput(ParseState *pstate, const JsonOutput *output,
35073507
}
35083508

35093509
/*
3510-
* Transform JSON output clause of JSON contructor functions.
3510+
* Transform JSON output clause of JSON constructor functions.
35113511
*
35123512
* Derive RETURNING type, if not specified, from argument types.
35133513
*/

src/backend/replication/basebackup_server.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -195,9 +195,9 @@ bbsink_server_end_archive(bbsink *sink)
195195

196196
/*
197197
* We intentionally don't use data_sync_elevel here, because the server
198-
* shouldn't PANIC just because we can't guarantee the the backup has been
199-
* written down to disk. Running recovery won't fix anything in this case
200-
* anyway.
198+
* shouldn't PANIC just because we can't guarantee that the backup has
199+
* been written down to disk. Running recovery won't fix anything in this
200+
* case anyway.
201201
*/
202202
if (FileSync(mysink->file, WAIT_EVENT_BASEBACKUP_SYNC) < 0)
203203
ereport(ERROR,

src/backend/replication/logical/reorderbuffer.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1876,7 +1876,7 @@ ReorderBufferStreamCommit(ReorderBuffer *rb, ReorderBufferTXN *txn)
18761876
* xid 502 which is not visible to our snapshot. And when we will try to
18771877
* decode with that catalog tuple, it can lead to a wrong result or a crash.
18781878
* So, it is necessary to detect concurrent aborts to allow streaming of
1879-
* in-progress transactions or decoding of prepared transactions.
1879+
* in-progress transactions or decoding of prepared transactions.
18801880
*
18811881
* For detecting the concurrent abort we set CheckXidAlive to the current
18821882
* (sub)transaction's xid for which this change belongs to. And, during

src/backend/replication/slot.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -247,7 +247,7 @@ ReplicationSlotValidateName(const char *name, int elevel)
247247
* to be enabled only at the slot creation time. If we allow this option
248248
* to be changed during decoding then it is quite possible that we skip
249249
* prepare first time because this option was not enabled. Now next time
250-
* during getting changes, if the two_phase option is enabled it can skip
250+
* during getting changes, if the two_phase option is enabled it can skip
251251
* prepare because by that time start decoding point has been moved. So the
252252
* user will only get commit prepared.
253253
*/

src/backend/storage/ipc/procarray.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -2338,7 +2338,7 @@ GetSnapshotData(Snapshot snapshot)
23382338

23392339
/*
23402340
* We don't include our own XIDs (if any) in the snapshot. It
2341-
* needs to be includeded in the xmin computation, but we did so
2341+
* needs to be included in the xmin computation, but we did so
23422342
* outside the loop.
23432343
*/
23442344
if (pgxactoff == mypgxactoff)

src/backend/tsearch/ts_parse.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -248,7 +248,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
248248
dict = lookup_ts_dictionary_cache(ld->curDictId);
249249

250250
/*
251-
* Dictionary ld->curDictId asks us about following words
251+
* Dictionary ld->curDictId asks us about following words
252252
*/
253253

254254
while (ld->curSub)

src/backend/utils/adt/genfile.c

+3-3
Original file line numberDiff line numberDiff line change
@@ -59,9 +59,9 @@ convert_and_check_filename(text *arg)
5959
canonicalize_path(filename); /* filename can change length here */
6060

6161
/*
62-
* Roles with privleges of the 'pg_read_server_files' role are allowed to access
63-
* any files on the server as the PG user, so no need to do any further checks
64-
* here.
62+
* Roles with privileges of the 'pg_read_server_files' role are allowed to
63+
* access any files on the server as the PG user, so no need to do any
64+
* further checks here.
6565
*/
6666
if (has_privs_of_role(GetUserId(), ROLE_PG_READ_SERVER_FILES))
6767
return filename;

src/backend/utils/adt/geo_ops.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -3878,7 +3878,7 @@ lseg_inside_poly(Point *a, Point *b, POLYGON *poly, int start)
38783878
Point p;
38793879

38803880
/*
3881-
* if X-intersection wasn't found then check central point of tested
3881+
* if X-intersection wasn't found, then check central point of tested
38823882
* segment. In opposite case we already check all subsegments
38833883
*/
38843884
p.x = float8_div(float8_pl(t.p[0].x, t.p[1].x), 2.0);

src/backend/utils/adt/pg_locale.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -991,7 +991,7 @@ search_locale_enum(LPWSTR pStr, DWORD dwFlags, LPARAM lparam)
991991
test_locale, LOCALE_NAME_MAX_LENGTH))
992992
{
993993
/*
994-
* If the enumerated locale does not have a hyphen ("en") OR the
994+
* If the enumerated locale does not have a hyphen ("en") OR the
995995
* lc_message input does not have an underscore ("English"), we only
996996
* need to compare the <Language> tags.
997997
*/

src/backend/utils/adt/tsquery.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -261,7 +261,7 @@ parse_or_operator(TSQueryParserState pstate)
261261
/*
262262
* Suppose, we found an operand, but could be a not correct operand.
263263
* So we still treat OR literal as operation with possibly incorrect
264-
* operand and will not search it as lexeme
264+
* operand and will not search it as lexeme
265265
*/
266266
if (!t_isspace(ptr))
267267
break;

src/backend/utils/cache/relmapper.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -1044,7 +1044,7 @@ perform_relmap_update(bool shared, const RelMapFile *updates)
10441044
(shared ? "global" : DatabasePath));
10451045

10461046
/*
1047-
* We succesfully wrote the updated file, so it's now safe to rely on the
1047+
* We successfully wrote the updated file, so it's now safe to rely on the
10481048
* new values in this process, too.
10491049
*/
10501050
if (shared)
@@ -1093,7 +1093,7 @@ relmap_redo(XLogReaderState *record)
10931093
* an existing database as we do for creating a new database. In
10941094
* the latter case, taking the relmap log and sending sinval messages
10951095
* is unnecessary, but harmless. If we wanted to avoid it, we could
1096-
* add a flag to the WAL record to indicate which opration is being
1096+
* add a flag to the WAL record to indicate which operation is being
10971097
* performed.
10981098
*/
10991099
LWLockAcquire(RelationMappingLock, LW_EXCLUSIVE);

src/backend/utils/error/csvlog.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
* CSV logging
55
*
66
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7-
* Portions Copyright (c) 1994, Regents of the University of Californi
7+
* Portions Copyright (c) 1994, Regents of the University of California
88
*
99
*
1010
* IDENTIFICATION

src/backend/utils/error/elog.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -2269,7 +2269,7 @@ write_console(const char *line, int len)
22692269
/*
22702270
* Conversion on non-win32 platforms is not implemented yet. It requires
22712271
* non-throw version of pg_do_encoding_conversion(), that converts
2272-
* unconvertable characters to '?' without errors.
2272+
* unconvertible characters to '?' without errors.
22732273
*
22742274
* XXX: We have a no-throw version now. It doesn't convert to '?' though.
22752275
*/

src/backend/utils/error/jsonlog.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
* JSON logging
55
*
66
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
7-
* Portions Copyright (c) 1994, Regents of the University of Californi
7+
* Portions Copyright (c) 1994, Regents of the University of California
88
*
99
*
1010
* IDENTIFICATION

src/backend/utils/fmgr/funcapi.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@ static TypeFuncClass get_type_func_class(Oid typid, Oid *base_typeid);
6969
* descriptor coming from expectedDesc, which is the tuple descriptor
7070
* expected by the caller. SRF_SINGLE_BLESS can be set to complete the
7171
* information associated to the tuple descriptor, which is necessary
72-
* in some cases where the tuple descriptor comes from a transient
72+
* in some cases where the tuple descriptor comes from a transient
7373
* RECORD datatype.
7474
*/
7575
void

src/backend/utils/mmgr/generation.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
* context's 'freeblock' field. If the freeblock field is already occupied
2828
* by another free block we simply return the newly empty block to malloc.
2929
*
30-
* This approach to free blocks requires fewer malloc/free calls for truely
30+
* This approach to free blocks requires fewer malloc/free calls for truly
3131
* first allocated, first free'd allocation patterns.
3232
*
3333
*-------------------------------------------------------------------------

src/bin/pg_basebackup/pg_basebackup.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1464,7 +1464,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
14641464
GetCopyDataEnd(r, copybuf, cursor);
14651465

14661466
/*
1467-
* The server shouldn't send progres report messages too
1467+
* The server shouldn't send progress report messages too
14681468
* often, so we force an update each time we receive one.
14691469
*/
14701470
progress_report(state->tablespacenum, true, false);

src/bin/pgbench/pgbench.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -3450,7 +3450,7 @@ discardUntilSync(CState *st)
34503450
PQclear(res);
34513451
}
34523452

3453-
/* exit pipline */
3453+
/* exit pipeline */
34543454
if (PQexitPipelineMode(st->con) != 1)
34553455
{
34563456
pg_log_error("client %d aborted: failed to exit pipeline mode for rolling back the failed transaction",
@@ -7261,7 +7261,7 @@ main(int argc, char **argv)
72617261

72627262
/*
72637263
* All connections should be already closed in threadRun(), so this
7264-
* disconnect_all() will be a no-op, but clean up the connecions just to
7264+
* disconnect_all() will be a no-op, but clean up the connections just to
72657265
* be sure. We don't need to measure the disconnection delays here.
72667266
*/
72677267
disconnect_all(state, nclients);

src/bin/psql/copy.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -653,7 +653,8 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary, PGresult **res)
653653
*
654654
* Make sure there's always space for four more bytes in the
655655
* buffer, plus a NUL terminator. That way, an EOF marker is
656-
* never split across two fgets() calls, which simplies the logic.
656+
* never split across two fgets() calls, which simplifies the
657+
* logic.
657658
*/
658659
if (buflen >= COPYBUFSIZ - 5 || (copydone && buflen > 0))
659660
{

src/bin/psql/describe.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -3080,7 +3080,7 @@ describeOneTableDetails(const char *schemaname,
30803080
* servers between v11 and v14, though these must still be shown to
30813081
* the user. So we use another property that is true for such
30823082
* inherited triggers to avoid them being hidden, which is their
3083-
* dependendence on another trigger.
3083+
* dependence on another trigger.
30843084
*/
30853085
if (pset.sversion >= 110000 && pset.sversion < 150000)
30863086
appendPQExpBufferStr(&buf, "(NOT t.tgisinternal OR (t.tgisinternal AND t.tgenabled = 'D') \n"

src/bin/psql/tab-complete.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -2257,7 +2257,7 @@ psql_completion(const char *text, int start, int end)
22572257
COMPLETE_WITH("COLUMN", "CONSTRAINT", "CHECK", "UNIQUE", "PRIMARY KEY",
22582258
"EXCLUDE", "FOREIGN KEY");
22592259
}
2260-
/* ATER TABLE xxx ADD [COLUMN] yyy */
2260+
/* ALTER TABLE xxx ADD [COLUMN] yyy */
22612261
else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "COLUMN", MatchAny) ||
22622262
(Matches("ALTER", "TABLE", MatchAny, "ADD", MatchAny) &&
22632263
!Matches("ALTER", "TABLE", MatchAny, "ADD", "COLUMN|CONSTRAINT|CHECK|UNIQUE|PRIMARY|EXCLUDE|FOREIGN")))

src/include/utils/sortsupport.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
* function will have a shim set up by sort support automatically. However,
2525
* opclasses that support the optional additional abbreviated key capability
2626
* must always provide an authoritative comparator used to tie-break
27-
* inconclusive abbreviated comparisons and also used when aborting
27+
* inconclusive abbreviated comparisons and also used when aborting
2828
* abbreviation. Furthermore, a converter and abort/costing function must be
2929
* provided.
3030
*

src/tools/mark_pgdllimport.pl

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
# Perl script that tries to add PGDLLIMPORT markings to PostgreSQL
77
# header files.
88
#
9-
# This relies on a few idiosyncracies of the PostgreSQL cding style,
9+
# This relies on a few idiosyncracies of the PostgreSQL coding style,
1010
# such as the fact that we always use "extern" in function
1111
# declarations, and that we don't use // comments. It's not very
1212
# smart and may not catch all cases.

0 commit comments

Comments
 (0)