Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/gin/ginbtree.c1
-rw-r--r--src/backend/access/gist/gistbuild.c8
-rw-r--r--src/backend/access/gist/gistget.c5
-rw-r--r--src/backend/access/gist/gistutil.c6
-rw-r--r--src/backend/access/gist/gistxlog.c4
-rw-r--r--src/backend/access/hash/hashinsert.c2
-rw-r--r--src/backend/access/hash/hashsearch.c2
-rw-r--r--src/backend/access/heap/heapam.c461
-rw-r--r--src/backend/access/heap/rewriteheap.c2
-rw-r--r--src/backend/access/heap/visibilitymap.c2
-rw-r--r--src/backend/access/nbtree/nbtpage.c8
-rw-r--r--src/backend/access/nbtree/nbtxlog.c22
-rw-r--r--src/backend/access/rmgrdesc/clogdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/dbasedesc.c4
-rw-r--r--src/backend/access/rmgrdesc/gindesc.c4
-rw-r--r--src/backend/access/rmgrdesc/gistdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/hashdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/heapdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/mxactdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/nbtdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/relmapdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/seqdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/smgrdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/spgdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/standbydesc.c4
-rw-r--r--src/backend/access/rmgrdesc/tblspcdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/xactdesc.c4
-rw-r--r--src/backend/access/rmgrdesc/xlogdesc.c9
-rw-r--r--src/backend/access/spgist/spgtextproc.c2
-rw-r--r--src/backend/access/transam/multixact.c184
-rw-r--r--src/backend/access/transam/timeline.c21
-rw-r--r--src/backend/access/transam/xact.c15
-rw-r--r--src/backend/access/transam/xlog.c641
-rw-r--r--src/backend/access/transam/xlogarchive.c41
-rw-r--r--src/backend/access/transam/xlogfuncs.c10
-rw-r--r--src/backend/access/transam/xlogreader.c6
-rw-r--r--src/backend/bootstrap/bootstrap.c6
-rw-r--r--src/backend/catalog/aclchk.c2
-rw-r--r--src/backend/catalog/catalog.c2
-rw-r--r--src/backend/catalog/dependency.c2
-rw-r--r--src/backend/catalog/heap.c13
-rw-r--r--src/backend/catalog/namespace.c7
-rw-r--r--src/backend/catalog/objectaccess.c38
-rw-r--r--src/backend/catalog/objectaddress.c79
-rw-r--r--src/backend/catalog/pg_constraint.c4
-rw-r--r--src/backend/catalog/pg_enum.c2
-rw-r--r--src/backend/catalog/pg_operator.c10
-rw-r--r--src/backend/catalog/pg_proc.c10
-rw-r--r--src/backend/catalog/pg_shdepend.c2
-rw-r--r--src/backend/catalog/storage.c13
-rw-r--r--src/backend/commands/aggregatecmds.c6
-rw-r--r--src/backend/commands/alter.c36
-rw-r--r--src/backend/commands/async.c4
-rw-r--r--src/backend/commands/cluster.c4
-rw-r--r--src/backend/commands/copy.c57
-rw-r--r--src/backend/commands/createas.c4
-rw-r--r--src/backend/commands/dbcommands.c15
-rw-r--r--src/backend/commands/event_trigger.c275
-rw-r--r--src/backend/commands/explain.c4
-rw-r--r--src/backend/commands/functioncmds.c4
-rw-r--r--src/backend/commands/indexcmds.c4
-rw-r--r--src/backend/commands/matview.c12
-rw-r--r--src/backend/commands/opclasscmds.c4
-rw-r--r--src/backend/commands/operatorcmds.c16
-rw-r--r--src/backend/commands/proclang.c4
-rw-r--r--src/backend/commands/sequence.c8
-rw-r--r--src/backend/commands/tablecmds.c50
-rw-r--r--src/backend/commands/trigger.c10
-rw-r--r--src/backend/commands/typecmds.c64
-rw-r--r--src/backend/commands/user.c2
-rw-r--r--src/backend/commands/vacuum.c20
-rw-r--r--src/backend/commands/vacuumlazy.c61
-rw-r--r--src/backend/executor/execMain.c39
-rw-r--r--src/backend/executor/execQual.c2
-rw-r--r--src/backend/executor/functions.c4
-rw-r--r--src/backend/executor/nodeLockRows.c3
-rw-r--r--src/backend/executor/nodeModifyTable.c36
-rw-r--r--src/backend/executor/nodeSeqscan.c2
-rw-r--r--src/backend/executor/spi.c8
-rw-r--r--src/backend/lib/binaryheap.c4
-rw-r--r--src/backend/libpq/auth.c30
-rw-r--r--src/backend/libpq/hba.c38
-rw-r--r--src/backend/libpq/pqcomm.c2
-rw-r--r--src/backend/main/main.c8
-rw-r--r--src/backend/optimizer/geqo/geqo_cx.c1
-rw-r--r--src/backend/optimizer/geqo/geqo_px.c1
-rw-r--r--src/backend/optimizer/path/allpaths.c2
-rw-r--r--src/backend/optimizer/path/costsize.c4
-rw-r--r--src/backend/optimizer/path/equivclass.c8
-rw-r--r--src/backend/optimizer/path/indxpath.c24
-rw-r--r--src/backend/optimizer/path/joinpath.c4
-rw-r--r--src/backend/optimizer/plan/analyzejoins.c2
-rw-r--r--src/backend/optimizer/plan/initsplan.c28
-rw-r--r--src/backend/optimizer/plan/planagg.c4
-rw-r--r--src/backend/optimizer/plan/planner.c55
-rw-r--r--src/backend/optimizer/prep/prepjointree.c26
-rw-r--r--src/backend/optimizer/util/clauses.c2
-rw-r--r--src/backend/optimizer/util/pathnode.c13
-rw-r--r--src/backend/parser/analyze.c28
-rw-r--r--src/backend/parser/check_keywords.pl7
-rw-r--r--src/backend/parser/parse_agg.c4
-rw-r--r--src/backend/parser/parse_clause.c20
-rw-r--r--src/backend/parser/parse_expr.c10
-rw-r--r--src/backend/parser/parse_relation.c4
-rw-r--r--src/backend/parser/parse_target.c4
-rw-r--r--src/backend/parser/parse_utilcmd.c17
-rw-r--r--src/backend/port/sysv_shmem.c50
-rw-r--r--src/backend/postmaster/autovacuum.c22
-rw-r--r--src/backend/postmaster/checkpointer.c6
-rw-r--r--src/backend/postmaster/fork_process.c8
-rw-r--r--src/backend/postmaster/pgarch.c2
-rw-r--r--src/backend/postmaster/pgstat.c8
-rw-r--r--src/backend/postmaster/postmaster.c48
-rw-r--r--src/backend/postmaster/syslogger.c6
-rw-r--r--src/backend/regex/regc_nfa.c4
-rw-r--r--src/backend/regex/regprefix.c18
-rw-r--r--src/backend/replication/basebackup.c64
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c11
-rw-r--r--src/backend/replication/syncrep.c2
-rw-r--r--src/backend/replication/walreceiver.c89
-rw-r--r--src/backend/replication/walsender.c182
-rw-r--r--src/backend/rewrite/rewriteDefine.c14
-rw-r--r--src/backend/rewrite/rewriteHandler.c94
-rw-r--r--src/backend/rewrite/rewriteManip.c15
-rw-r--r--src/backend/storage/buffer/bufmgr.c124
-rw-r--r--src/backend/storage/buffer/localbuf.c8
-rw-r--r--src/backend/storage/ipc/procarray.c30
-rw-r--r--src/backend/storage/ipc/standby.c8
-rw-r--r--src/backend/storage/lmgr/lock.c34
-rw-r--r--src/backend/storage/lmgr/predicate.c4
-rw-r--r--src/backend/storage/lmgr/proc.c26
-rw-r--r--src/backend/storage/lmgr/spin.c2
-rw-r--r--src/backend/storage/page/bufpage.c36
-rw-r--r--src/backend/storage/page/checksum.c17
-rw-r--r--src/backend/storage/smgr/smgr.c21
-rw-r--r--src/backend/tcop/postgres.c2
-rw-r--r--src/backend/tcop/pquery.c2
-rw-r--r--src/backend/tsearch/ts_selfuncs.c2
-rw-r--r--src/backend/utils/adt/array_typanalyze.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c12
-rw-r--r--src/backend/utils/adt/date.c4
-rw-r--r--src/backend/utils/adt/datetime.c5
-rw-r--r--src/backend/utils/adt/formatting.c11
-rw-r--r--src/backend/utils/adt/json.c2
-rw-r--r--src/backend/utils/adt/jsonfuncs.c10
-rw-r--r--src/backend/utils/adt/misc.c10
-rw-r--r--src/backend/utils/adt/numeric.c2
-rw-r--r--src/backend/utils/adt/pg_locale.c10
-rw-r--r--src/backend/utils/adt/pseudotypes.c2
-rw-r--r--src/backend/utils/adt/rangetypes.c10
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c41
-rw-r--r--src/backend/utils/adt/rangetypes_selfuncs.c165
-rw-r--r--src/backend/utils/adt/rangetypes_spgist.c74
-rw-r--r--src/backend/utils/adt/rangetypes_typanalyze.c50
-rw-r--r--src/backend/utils/adt/regproc.c5
-rw-r--r--src/backend/utils/adt/ri_triggers.c44
-rw-r--r--src/backend/utils/adt/ruleutils.c18
-rw-r--r--src/backend/utils/adt/selfuncs.c80
-rw-r--r--src/backend/utils/adt/timestamp.c12
-rw-r--r--src/backend/utils/adt/tsquery_rewrite.c2
-rw-r--r--src/backend/utils/adt/varlena.c6
-rw-r--r--src/backend/utils/adt/xml.c7
-rw-r--r--src/backend/utils/cache/catcache.c16
-rw-r--r--src/backend/utils/cache/evtcache.c42
-rw-r--r--src/backend/utils/cache/plancache.c14
-rw-r--r--src/backend/utils/cache/relcache.c193
-rw-r--r--src/backend/utils/cache/syscache.c4
-rw-r--r--src/backend/utils/error/elog.c4
-rw-r--r--src/backend/utils/hash/dynahash.c12
-rw-r--r--src/backend/utils/init/miscinit.c16
-rw-r--r--src/backend/utils/init/postinit.c6
-rw-r--r--src/backend/utils/mb/mbutils.c6
-rw-r--r--src/backend/utils/mb/wchar.c20
-rw-r--r--src/backend/utils/misc/guc.c4
-rw-r--r--src/backend/utils/resowner/resowner.c10
-rw-r--r--src/backend/utils/sort/tuplestore.c4
-rw-r--r--src/backend/utils/time/tqual.c59
177 files changed, 2434 insertions, 2331 deletions
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 7acc8f646ec..2a6be4b1a99 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -173,7 +173,6 @@ void
ginFindParents(GinBtree btree, GinBtreeStack *stack,
BlockNumber rootBlkno)
{
-
Page page;
Buffer buffer;
BlockNumber blkno,
diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c
index 46f7ce65635..2f2edb83626 100644
--- a/src/backend/access/gist/gistbuild.c
+++ b/src/backend/access/gist/gistbuild.c
@@ -610,9 +610,9 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
newtup = gistgetadjusted(indexrel, idxtuple, itup, giststate);
if (newtup)
{
- blkno = gistbufferinginserttuples(buildstate, buffer, level,
- &newtup, 1, childoffnum,
- InvalidBlockNumber, InvalidOffsetNumber);
+ blkno = gistbufferinginserttuples(buildstate, buffer, level,
+ &newtup, 1, childoffnum,
+ InvalidBlockNumber, InvalidOffsetNumber);
/* gistbufferinginserttuples() released the buffer */
}
else
@@ -680,7 +680,7 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
GISTBuildBuffers *gfbb = buildstate->gfbb;
List *splitinfo;
bool is_split;
- BlockNumber placed_to_blk = InvalidBlockNumber;
+ BlockNumber placed_to_blk = InvalidBlockNumber;
is_split = gistplacetopage(buildstate->indexrel,
buildstate->freespace,
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index cef31ce66e9..e97ab8f3fd5 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -364,8 +364,9 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
item->blkno = ItemPointerGetBlockNumber(&it->t_tid);
/*
- * LSN of current page is lsn of parent page for child. We only
- * have a shared lock, so we need to get the LSN atomically.
+ * LSN of current page is lsn of parent page for child. We
+ * only have a shared lock, so we need to get the LSN
+ * atomically.
*/
item->data.parentlsn = BufferGetLSNAtomic(buffer);
}
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index f7d50ddb712..b9c1967ebc0 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */
* some inserts to go to other equally-good subtrees.
*
* keep_current_best is -1 if we haven't yet had to make a random choice
- * whether to keep the current best tuple. If we have done so, and
+ * whether to keep the current best tuple. If we have done so, and
* decided to keep it, keep_current_best is 1; if we've decided to
* replace, keep_current_best is 0. (This state will be reset to -1 as
* soon as we've made the replacement, but sometimes we make the choice in
@@ -810,8 +810,8 @@ gistGetFakeLSN(Relation rel)
if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
{
/*
- * Temporary relations are only accessible in our session, so a
- * simple backend-local counter will do.
+ * Temporary relations are only accessible in our session, so a simple
+ * backend-local counter will do.
*/
return counter++;
}
diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c
index 3daeea396be..17946bfec3f 100644
--- a/src/backend/access/gist/gistxlog.c
+++ b/src/backend/access/gist/gistxlog.c
@@ -38,7 +38,7 @@ static MemoryContext opCtx; /* working memory for operations */
* follow-right flag, because that change is not included in the full-page
* image. To be sure that the intermediate state with the wrong flag value is
* not visible to concurrent Hot Standby queries, this function handles
- * restoring the full-page image as well as updating the flag. (Note that
+ * restoring the full-page image as well as updating the flag. (Note that
* we never need to do anything else to the child page in the current WAL
* action.)
*/
@@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
/*
* We need to acquire and hold lock on target page while updating the left
- * child page. If we have a full-page image of target page, getting the
+ * child page. If we have a full-page image of target page, getting the
* lock is a side-effect of restoring that image. Note that even if the
* target page no longer exists, we'll still attempt to replay the change
* on the child page.
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 63be2f37872..4508a36bd05 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -90,7 +90,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index ceb9ef72baa..91661ba0e03 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir)
/*
* If the previous iteration of this loop locked what is still the
- * correct target bucket, we are done. Otherwise, drop any old lock
+ * correct target bucket, we are done. Otherwise, drop any old lock
* and lock what now appears to be the correct bucket.
*/
if (retry)
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 9498cbb8a51..834a566f7e0 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -120,32 +120,34 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi,
static const struct
{
LOCKMODE hwlock;
- MultiXactStatus lockstatus;
- MultiXactStatus updstatus;
+ MultiXactStatus lockstatus;
+ MultiXactStatus updstatus;
}
-tupleLockExtraInfo[MaxLockTupleMode + 1] =
+
+ tupleLockExtraInfo[MaxLockTupleMode + 1] =
{
- { /* LockTupleKeyShare */
+ { /* LockTupleKeyShare */
AccessShareLock,
MultiXactStatusForKeyShare,
- -1 /* KeyShare does not allow updating tuples */
+ -1 /* KeyShare does not allow updating tuples */
},
- { /* LockTupleShare */
+ { /* LockTupleShare */
RowShareLock,
MultiXactStatusForShare,
- -1 /* Share does not allow updating tuples */
+ -1 /* Share does not allow updating tuples */
},
- { /* LockTupleNoKeyExclusive */
+ { /* LockTupleNoKeyExclusive */
ExclusiveLock,
MultiXactStatusForNoKeyUpdate,
MultiXactStatusNoKeyUpdate
},
- { /* LockTupleExclusive */
+ { /* LockTupleExclusive */
AccessExclusiveLock,
MultiXactStatusForUpdate,
MultiXactStatusUpdate
}
};
+
/* Get the LOCKMODE for a given MultiXactStatus */
#define LOCKMODE_from_mxstatus(status) \
(tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
@@ -168,12 +170,12 @@ tupleLockExtraInfo[MaxLockTupleMode + 1] =
*/
static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
{
- LockTupleKeyShare, /* ForKeyShare */
- LockTupleShare, /* ForShare */
- LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
- LockTupleExclusive, /* ForUpdate */
- LockTupleNoKeyExclusive, /* NoKeyUpdate */
- LockTupleExclusive /* Update */
+ LockTupleKeyShare, /* ForKeyShare */
+ LockTupleShare, /* ForShare */
+ LockTupleNoKeyExclusive, /* ForNoKeyUpdate */
+ LockTupleExclusive, /* ForUpdate */
+ LockTupleNoKeyExclusive, /* NoKeyUpdate */
+ LockTupleExclusive /* Update */
};
/* Get the LockTupleMode for a given MultiXactStatus */
@@ -365,10 +367,10 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
* page. That's how index-only scans work fine in hot standby. A crucial
* difference between index-only scans and heap scans is that the
* index-only scan completely relies on the visibility map where as heap
- * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if the
- * page-level flag can be trusted in the same way, because it might get
- * propagated somehow without being explicitly WAL-logged, e.g. via a full
- * page write. Until we can prove that beyond doubt, let's check each
+ * scan looks at the page-level PD_ALL_VISIBLE flag. We are not sure if
+ * the page-level flag can be trusted in the same way, because it might
+ * get propagated somehow without being explicitly WAL-logged, e.g. via a
+ * full page write. Until we can prove that beyond doubt, let's check each
* tuple for visibility the hard way.
*/
all_visible = PageIsAllVisible(dp) && !snapshot->takenDuringRecovery;
@@ -1880,7 +1882,7 @@ heap_get_latest_tid(Relation relation,
* tuple. Check for XMIN match.
*/
if (TransactionIdIsValid(priorXmax) &&
- !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
+ !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(tp.t_data)))
{
UnlockReleaseBuffer(buffer);
break;
@@ -2488,7 +2490,7 @@ compute_infobits(uint16 infomask, uint16 infomask2)
((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
- /* note we ignore HEAP_XMAX_SHR_LOCK here */
+ /* note we ignore HEAP_XMAX_SHR_LOCK here */
((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
XLHL_KEYS_UPDATED : 0);
@@ -2730,13 +2732,12 @@ l1:
}
/*
- * If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * If this is the first possibly-multixact-able operation in the current
+ * transaction, set my per-backend OldestMemberMXactId setting. We can be
+ * certain that the transaction will never become a member of any older
+ * MultiXactIds than that. (We have to do this even if we end up just
+ * using our own TransactionId below, since some other backend could
+ * incorporate our XID into a MultiXact immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@@ -2846,7 +2847,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
result = heap_delete(relation, tid,
GetCurrentCommandId(true), InvalidSnapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd);
switch (result)
{
@@ -2936,7 +2937,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
bool checked_lockers;
bool locker_remains;
TransactionId xmax_new_tuple,
- xmax_old_tuple;
+ xmax_old_tuple;
uint16 infomask_old_tuple,
infomask2_old_tuple,
infomask_new_tuple,
@@ -3006,13 +3007,13 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* If we're not updating any "key" column, we can grab a weaker lock type.
- * This allows for more concurrency when we are running simultaneously with
- * foreign key checks.
+ * This allows for more concurrency when we are running simultaneously
+ * with foreign key checks.
*
- * Note that if a column gets detoasted while executing the update, but the
- * value ends up being the same, this test will fail and we will use the
- * stronger lock. This is acceptable; the important case to optimize is
- * updates that don't manipulate key columns, not those that
+ * Note that if a column gets detoasted while executing the update, but
+ * the value ends up being the same, this test will fail and we will use
+ * the stronger lock. This is acceptable; the important case to optimize
+ * is updates that don't manipulate key columns, not those that
* serendipitiously arrive at the same key values.
*/
HeapSatisfiesHOTandKeyUpdate(relation, hot_attrs, key_attrs,
@@ -3026,12 +3027,12 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
/*
* If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * current transaction, set my per-backend OldestMemberMXactId
+ * setting. We can be certain that the transaction will never become a
+ * member of any older MultiXactIds than that. (We have to do this
+ * even if we end up just using our own TransactionId below, since
+ * some other backend could incorporate our XID into a MultiXact
+ * immediately afterwards.)
*/
MultiXactIdSetOldestMember();
}
@@ -3064,7 +3065,7 @@ l2:
}
else if (result == HeapTupleBeingUpdated && wait)
{
- TransactionId xwait;
+ TransactionId xwait;
uint16 infomask;
bool can_continue = false;
@@ -3073,13 +3074,14 @@ l2:
/*
* XXX note that we don't consider the "no wait" case here. This
* isn't a problem currently because no caller uses that case, but it
- * should be fixed if such a caller is introduced. It wasn't a problem
- * previously because this code would always wait, but now that some
- * tuple locks do not conflict with one of the lock modes we use, it is
- * possible that this case is interesting to handle specially.
+ * should be fixed if such a caller is introduced. It wasn't a
+ * problem previously because this code would always wait, but now
+ * that some tuple locks do not conflict with one of the lock modes we
+ * use, it is possible that this case is interesting to handle
+ * specially.
*
- * This may cause failures with third-party code that calls heap_update
- * directly.
+ * This may cause failures with third-party code that calls
+ * heap_update directly.
*/
/* must copy state data before unlocking buffer */
@@ -3109,15 +3111,15 @@ l2:
* gone (or even not sleep at all in some cases); we need to preserve
* it as locker, unless it is gone completely.
*
- * If it's not a multi, we need to check for sleeping conditions before
- * actually going to sleep. If the update doesn't conflict with the
- * locks, we just continue without sleeping (but making sure it is
- * preserved).
+ * If it's not a multi, we need to check for sleeping conditions
+ * before actually going to sleep. If the update doesn't conflict
+ * with the locks, we just continue without sleeping (but making sure
+ * it is preserved).
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId update_xact;
- int remain;
+ TransactionId update_xact;
+ int remain;
/* wait for multixact */
MultiXactIdWait((MultiXactId) xwait, mxact_status, &remain,
@@ -3135,18 +3137,18 @@ l2:
goto l2;
/*
- * Note that the multixact may not be done by now. It could have
+ * Note that the multixact may not be done by now. It could have
* surviving members; our own xact or other subxacts of this
* backend, and also any other concurrent transaction that locked
- * the tuple with KeyShare if we only got TupleLockUpdate. If this
- * is the case, we have to be careful to mark the updated tuple
- * with the surviving members in Xmax.
+ * the tuple with KeyShare if we only got TupleLockUpdate. If
+ * this is the case, we have to be careful to mark the updated
+ * tuple with the surviving members in Xmax.
*
- * Note that there could have been another update in the MultiXact.
- * In that case, we need to check whether it committed or aborted.
- * If it aborted we are safe to update it again; otherwise there is
- * an update conflict, and we have to return HeapTupleUpdated
- * below.
+ * Note that there could have been another update in the
+ * MultiXact. In that case, we need to check whether it committed
+ * or aborted. If it aborted we are safe to update it again;
+ * otherwise there is an update conflict, and we have to return
+ * HeapTupleUpdated below.
*
* In the LockTupleExclusive case, we still need to preserve the
* surviving members: those would include the tuple locks we had
@@ -3167,21 +3169,21 @@ l2:
else
{
/*
- * If it's just a key-share locker, and we're not changing the
- * key columns, we don't need to wait for it to end; but we
- * need to preserve it as locker.
+ * If it's just a key-share locker, and we're not changing the key
+ * columns, we don't need to wait for it to end; but we need to
+ * preserve it as locker.
*/
if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) && key_intact)
{
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * recheck the locker; if someone else changed the tuple while we
- * weren't looking, start over.
+ * recheck the locker; if someone else changed the tuple while
+ * we weren't looking, start over.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
can_continue = true;
@@ -3194,13 +3196,13 @@ l2:
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * xwait is done, but if xwait had just locked the tuple then some
- * other xact could update this tuple before we get to this point.
- * Check for xmax change, and start over if so.
+ * xwait is done, but if xwait had just locked the tuple then
+ * some other xact could update this tuple before we get to
+ * this point. Check for xmax change, and start over if so.
*/
if ((oldtup.t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(oldtup.t_data),
+ xwait))
goto l2;
/* Otherwise check if it committed or aborted */
@@ -3247,8 +3249,8 @@ l2:
* visible while we were busy locking the buffer, or during some
* subsequent window during which we had it unlocked, we'll have to unlock
* and re-lock, to avoid holding the buffer lock across an I/O. That's a
- * bit unfortunate, especially since we'll now have to recheck whether
- * the tuple has been locked or updated under us, but hopefully it won't
+ * bit unfortunate, especially since we'll now have to recheck whether the
+ * tuple has been locked or updated under us, but hopefully it won't
* happen very often.
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@@ -3656,9 +3658,9 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum,
/*
* Extract the corresponding values. XXX this is pretty inefficient if
- * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do a
- * single heap_deform_tuple call on each tuple, instead? But that doesn't
- * work for system columns ...
+ * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do
+ * a single heap_deform_tuple call on each tuple, instead? But that
+ * doesn't work for system columns ...
*/
value1 = heap_getattr(tup1, attrnum, tupdesc, &isnull1);
value2 = heap_getattr(tup2, attrnum, tupdesc, &isnull2);
@@ -3720,12 +3722,12 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
bool *satisfies_hot, bool *satisfies_key,
HeapTuple oldtup, HeapTuple newtup)
{
- int next_hot_attnum;
- int next_key_attnum;
- bool hot_result = true;
- bool key_result = true;
- bool key_done = false;
- bool hot_done = false;
+ int next_hot_attnum;
+ int next_key_attnum;
+ bool hot_result = true;
+ bool key_result = true;
+ bool key_done = false;
+ bool hot_done = false;
next_hot_attnum = bms_first_member(hot_attrs);
if (next_hot_attnum == -1)
@@ -3743,8 +3745,8 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation,
for (;;)
{
- int check_now;
- bool changed;
+ int check_now;
+ bool changed;
/* both bitmapsets are now empty */
if (key_done && hot_done)
@@ -3813,7 +3815,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
result = heap_update(relation, otid, tup,
GetCurrentCommandId(true), InvalidSnapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd, &lockmode);
switch (result)
{
@@ -3843,7 +3845,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
static MultiXactStatus
get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
{
- MultiXactStatus retval;
+ MultiXactStatus retval;
if (is_update)
retval = tupleLockExtraInfo[mode].updstatus;
@@ -3933,7 +3935,7 @@ l3:
uint16 infomask;
uint16 infomask2;
bool require_sleep;
- ItemPointerData t_ctid;
+ ItemPointerData t_ctid;
/* must copy state data before unlocking buffer */
xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
@@ -3944,22 +3946,22 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
/*
- * If any subtransaction of the current top transaction already holds a
- * lock as strong or stronger than what we're requesting, we
+ * If any subtransaction of the current top transaction already holds
+ * a lock as strong or stronger than what we're requesting, we
* effectively hold the desired lock already. We *must* succeed
- * without trying to take the tuple lock, else we will deadlock against
- * anyone wanting to acquire a stronger lock.
+ * without trying to take the tuple lock, else we will deadlock
+ * against anyone wanting to acquire a stronger lock.
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- int i;
- int nmembers;
+ int i;
+ int nmembers;
MultiXactMember *members;
/*
- * We don't need to allow old multixacts here; if that had been the
- * case, HeapTupleSatisfiesUpdate would have returned MayBeUpdated
- * and we wouldn't be here.
+ * We don't need to allow old multixacts here; if that had been
+ * the case, HeapTupleSatisfiesUpdate would have returned
+ * MayBeUpdated and we wouldn't be here.
*/
nmembers = GetMultiXactIdMembers(xwait, &members, false);
@@ -3967,7 +3969,7 @@ l3:
{
if (TransactionIdIsCurrentTransactionId(members[i].xid))
{
- LockTupleMode membermode;
+ LockTupleMode membermode;
membermode = TUPLOCK_from_mxstatus(members[i].status);
@@ -4001,8 +4003,8 @@ l3:
if (!ConditionalLockTupleTuplock(relation, tid, mode))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on row in relation \"%s\"",
- RelationGetRelationName(relation))));
+ errmsg("could not obtain lock on row in relation \"%s\"",
+ RelationGetRelationName(relation))));
}
else
LockTupleTuplock(relation, tid, mode);
@@ -4023,34 +4025,34 @@ l3:
* continue if the key hasn't been modified.
*
* However, if there are updates, we need to walk the update chain
- * to mark future versions of the row as locked, too. That way, if
- * somebody deletes that future version, we're protected against
- * the key going away. This locking of future versions could block
- * momentarily, if a concurrent transaction is deleting a key; or
- * it could return a value to the effect that the transaction
- * deleting the key has already committed. So we do this before
- * re-locking the buffer; otherwise this would be prone to
- * deadlocks.
+ * to mark future versions of the row as locked, too. That way,
+ * if somebody deletes that future version, we're protected
+ * against the key going away. This locking of future versions
+ * could block momentarily, if a concurrent transaction is
+ * deleting a key; or it could return a value to the effect that
+ * the transaction deleting the key has already committed. So we
+ * do this before re-locking the buffer; otherwise this would be
+ * prone to deadlocks.
*
* Note that the TID we're locking was grabbed before we unlocked
- * the buffer. For it to change while we're not looking, the other
- * properties we're testing for below after re-locking the buffer
- * would also change, in which case we would restart this loop
- * above.
+ * the buffer. For it to change while we're not looking, the
+ * other properties we're testing for below after re-locking the
+ * buffer would also change, in which case we would restart this
+ * loop above.
*/
if (!(infomask2 & HEAP_KEYS_UPDATED))
{
- bool updated;
+ bool updated;
updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
/*
- * If there are updates, follow the update chain; bail out
- * if that cannot be done.
+ * If there are updates, follow the update chain; bail out if
+ * that cannot be done.
*/
if (follow_updates && updated)
{
- HTSU_Result res;
+ HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
@@ -4069,8 +4071,9 @@ l3:
/*
* Make sure it's still an appropriate lock, else start over.
* Also, if it wasn't updated before we released the lock, but
- * is updated now, we start over too; the reason is that we now
- * need to follow the update chain to lock the new versions.
+ * is updated now, we start over too; the reason is that we
+ * now need to follow the update chain to lock the new
+ * versions.
*/
if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
@@ -4114,20 +4117,20 @@ l3:
{
/*
* If we're requesting NoKeyExclusive, we might also be able to
- * avoid sleeping; just ensure that there's no other lock type than
- * KeyShare. Note that this is a bit more involved than just
+ * avoid sleeping; just ensure that there's no other lock type
+ * than KeyShare. Note that this is a bit more involved than just
* checking hint bits -- we need to expand the multixact to figure
* out lock modes for each one (unless there was only one such
* locker).
*/
if (infomask & HEAP_XMAX_IS_MULTI)
{
- int nmembers;
+ int nmembers;
MultiXactMember *members;
/*
- * We don't need to allow old multixacts here; if that had been
- * the case, HeapTupleSatisfiesUpdate would have returned
+ * We don't need to allow old multixacts here; if that had
+ * been the case, HeapTupleSatisfiesUpdate would have returned
* MayBeUpdated and we wouldn't be here.
*/
nmembers = GetMultiXactIdMembers(xwait, &members, false);
@@ -4135,15 +4138,15 @@ l3:
if (nmembers <= 0)
{
/*
- * No need to keep the previous xmax here. This is unlikely
- * to happen.
+ * No need to keep the previous xmax here. This is
+ * unlikely to happen.
*/
require_sleep = false;
}
else
{
- int i;
- bool allowed = true;
+ int i;
+ bool allowed = true;
for (i = 0; i < nmembers; i++)
{
@@ -4180,8 +4183,8 @@ l3:
/* if the xmax changed in the meantime, start over */
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/* otherwise, we're good */
require_sleep = false;
@@ -4221,7 +4224,7 @@ l3:
if (follow_updates &&
!HEAP_XMAX_IS_LOCKED_ONLY(infomask))
{
- HTSU_Result res;
+ HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
@@ -4243,15 +4246,15 @@ l3:
* for xmax change, and start over if so.
*/
if (!(tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/*
* Of course, the multixact might not be done here: if we're
* requesting a light lock mode, other transactions with light
* locks could still be alive, as well as locks owned by our
- * own xact or other subxacts of this backend. We need to
+ * own xact or other subxacts of this backend. We need to
* preserve the surviving MultiXact members. Note that it
* isn't absolutely necessary in the latter case, but doing so
* is simpler.
@@ -4275,7 +4278,7 @@ l3:
if (follow_updates &&
!HEAP_XMAX_IS_LOCKED_ONLY(infomask))
{
- HTSU_Result res;
+ HTSU_Result res;
res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
GetCurrentTransactionId(),
@@ -4294,15 +4297,15 @@ l3:
/*
* xwait is done, but if xwait had just locked the tuple then
* some other xact could update this tuple before we get to
- * this point. Check for xmax change, and start over if so.
+ * this point. Check for xmax change, and start over if so.
*/
if ((tuple->t_data->t_infomask & HEAP_XMAX_IS_MULTI) ||
- !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
- xwait))
+ !TransactionIdEquals(HeapTupleHeaderGetRawXmax(tuple->t_data),
+ xwait))
goto l3;
/*
- * Otherwise check if it committed or aborted. Note we cannot
+ * Otherwise check if it committed or aborted. Note we cannot
* be here if the tuple was only locked by somebody who didn't
* conflict with us; that should have been handled above. So
* that transaction must necessarily be gone by now.
@@ -4355,8 +4358,8 @@ failed:
* for cases where it is a plain TransactionId.
*
* Note in particular that this covers the case where we already hold
- * exclusive lock on the tuple and the caller only wants key share or share
- * lock. It would certainly not do to give up the exclusive lock.
+ * exclusive lock on the tuple and the caller only wants key share or
+ * share lock. It would certainly not do to give up the exclusive lock.
*/
if (!(old_infomask & (HEAP_XMAX_INVALID |
HEAP_XMAX_COMMITTED |
@@ -4379,13 +4382,12 @@ failed:
}
/*
- * If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * If this is the first possibly-multixact-able operation in the current
+ * transaction, set my per-backend OldestMemberMXactId setting. We can be
+ * certain that the transaction will never become a member of any older
+ * MultiXactIds than that. (We have to do this even if we end up just
+ * using our own TransactionId below, since some other backend could
+ * incorporate our XID into a MultiXact immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@@ -4419,11 +4421,11 @@ failed:
HeapTupleHeaderSetXmax(tuple->t_data, xid);
/*
- * Make sure there is no forward chain link in t_ctid. Note that in the
+ * Make sure there is no forward chain link in t_ctid. Note that in the
* cases where the tuple has been updated, we must not overwrite t_ctid,
* because it was set by the updater. Moreover, if the tuple has been
- * updated, we need to follow the update chain to lock the new versions
- * of the tuple as well.
+ * updated, we need to follow the update chain to lock the new versions of
+ * the tuple as well.
*/
if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
tuple->t_data->t_ctid = *tid;
@@ -4514,9 +4516,9 @@ compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask,
TransactionId *result_xmax, uint16 *result_infomask,
uint16 *result_infomask2)
{
- TransactionId new_xmax;
- uint16 new_infomask,
- new_infomask2;
+ TransactionId new_xmax;
+ uint16 new_infomask,
+ new_infomask2;
l5:
new_infomask = 0;
@@ -4562,11 +4564,11 @@ l5:
}
else if (old_infomask & HEAP_XMAX_IS_MULTI)
{
- MultiXactStatus new_status;
+ MultiXactStatus new_status;
/*
- * Currently we don't allow XMAX_COMMITTED to be set for multis,
- * so cross-check.
+ * Currently we don't allow XMAX_COMMITTED to be set for multis, so
+ * cross-check.
*/
Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
@@ -4587,10 +4589,11 @@ l5:
/*
* If the XMAX is already a MultiXactId, then we need to expand it to
- * include add_to_xmax; but if all the members were lockers and are all
- * gone, we can do away with the IS_MULTI bit and just set add_to_xmax
- * as the only locker/updater. If all lockers are gone and we have an
- * updater that aborted, we can also do without a multi.
+ * include add_to_xmax; but if all the members were lockers and are
+ * all gone, we can do away with the IS_MULTI bit and just set
+ * add_to_xmax as the only locker/updater. If all lockers are gone
+ * and we have an updater that aborted, we can also do without a
+ * multi.
*
* The cost of doing GetMultiXactIdMembers would be paid by
* MultiXactIdExpand if we weren't to do this, so this check is not
@@ -4624,8 +4627,8 @@ l5:
* It's a committed update, so we need to preserve him as updater of
* the tuple.
*/
- MultiXactStatus status;
- MultiXactStatus new_status;
+ MultiXactStatus status;
+ MultiXactStatus new_status;
if (old_infomask2 & HEAP_KEYS_UPDATED)
status = MultiXactStatusUpdate;
@@ -4633,6 +4636,7 @@ l5:
status = MultiXactStatusNoKeyUpdate;
new_status = get_mxact_status_for_lock(mode, is_update);
+
/*
* since it's not running, it's obviously impossible for the old
* updater to be identical to the current one, so we need not check
@@ -4648,8 +4652,8 @@ l5:
* create a new MultiXactId that includes both the old locker or
* updater and our own TransactionId.
*/
- MultiXactStatus status;
- MultiXactStatus new_status;
+ MultiXactStatus status;
+ MultiXactStatus new_status;
if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
{
@@ -4668,8 +4672,8 @@ l5:
{
/*
* LOCK_ONLY can be present alone only when a page has been
- * upgraded by pg_upgrade. But in that case,
- * TransactionIdIsInProgress() should have returned false. We
+ * upgraded by pg_upgrade. But in that case,
+ * TransactionIdIsInProgress() should have returned false. We
* assume it's no longer locked in this case.
*/
elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
@@ -4696,8 +4700,8 @@ l5:
*/
if (xmax == add_to_xmax)
{
- LockTupleMode old_mode = TUPLOCK_from_mxstatus(status);
- bool old_isupd = ISUPDATE_from_mxstatus(status);
+ LockTupleMode old_mode = TUPLOCK_from_mxstatus(status);
+ bool old_isupd = ISUPDATE_from_mxstatus(status);
/*
* We can do this if the new LockTupleMode is higher or equal than
@@ -4728,8 +4732,8 @@ l5:
* It's a committed update, so we gotta preserve him as updater of the
* tuple.
*/
- MultiXactStatus status;
- MultiXactStatus new_status;
+ MultiXactStatus status;
+ MultiXactStatus new_status;
if (old_infomask2 & HEAP_KEYS_UPDATED)
status = MultiXactStatusUpdate;
@@ -4737,6 +4741,7 @@ l5:
status = MultiXactStatusNoKeyUpdate;
new_status = get_mxact_status_for_lock(mode, is_update);
+
/*
* since it's not running, it's obviously impossible for the old
* updater to be identical to the current one, so we need not check
@@ -4774,14 +4779,14 @@ static HTSU_Result
heap_lock_updated_tuple_rec(Relation rel, ItemPointer tid, TransactionId xid,
LockTupleMode mode)
{
- ItemPointerData tupid;
- HeapTupleData mytup;
- Buffer buf;
- uint16 new_infomask,
- new_infomask2,
- old_infomask;
- TransactionId xmax,
- new_xmax;
+ ItemPointerData tupid;
+ HeapTupleData mytup;
+ Buffer buf;
+ uint16 new_infomask,
+ new_infomask2,
+ old_infomask;
+ TransactionId xmax,
+ new_xmax;
ItemPointerCopy(tid, &tupid);
@@ -4802,16 +4807,17 @@ l4:
xmax = HeapTupleHeaderGetRawXmax(mytup.t_data);
/*
- * If this tuple is updated and the key has been modified (or deleted),
- * what we do depends on the status of the updating transaction: if
- * it's live, we sleep until it finishes; if it has committed, we have
- * to fail (i.e. return HeapTupleUpdated); if it aborted, we ignore it.
- * For updates that didn't touch the key, we can just plough ahead.
+ * If this tuple is updated and the key has been modified (or
+ * deleted), what we do depends on the status of the updating
+ * transaction: if it's live, we sleep until it finishes; if it has
+ * committed, we have to fail (i.e. return HeapTupleUpdated); if it
+ * aborted, we ignore it. For updates that didn't touch the key, we
+ * can just plough ahead.
*/
if (!(old_infomask & HEAP_XMAX_INVALID) &&
(mytup.t_data->t_infomask2 & HEAP_KEYS_UPDATED))
{
- TransactionId update_xid;
+ TransactionId update_xid;
/*
* Note: we *must* check TransactionIdIsInProgress before
@@ -4832,7 +4838,7 @@ l4:
goto l4;
}
else if (TransactionIdDidAbort(update_xid))
- ; /* okay to proceed */
+ ; /* okay to proceed */
else if (TransactionIdDidCommit(update_xid))
{
UnlockReleaseBuffer(buf);
@@ -4861,7 +4867,7 @@ l4:
{
xl_heap_lock_updated xlrec;
XLogRecPtr recptr;
- XLogRecData rdata[2];
+ XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.target.node = rel->rd_node;
@@ -4889,7 +4895,7 @@ l4:
/* if we find the end of update chain, we're done. */
if (mytup.t_data->t_infomask & HEAP_XMAX_INVALID ||
- ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
+ ItemPointerEquals(&mytup.t_self, &mytup.t_data->t_ctid) ||
HeapTupleHeaderIsOnlyLocked(mytup.t_data))
{
UnlockReleaseBuffer(buf);
@@ -4904,13 +4910,13 @@ l4:
/*
* heap_lock_updated_tuple
- * Follow update chain when locking an updated tuple, acquiring locks (row
- * marks) on the updated versions.
+ * Follow update chain when locking an updated tuple, acquiring locks (row
+ * marks) on the updated versions.
*
* The initial tuple is assumed to be already locked.
*
* This function doesn't check visibility, it just inconditionally marks the
- * tuple(s) as locked. If any tuple in the updated chain is being deleted
+ * tuple(s) as locked. If any tuple in the updated chain is being deleted
* concurrently (or updated with the key being modified), sleep until the
* transaction doing it is finished.
*
@@ -4932,12 +4938,12 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid,
{
/*
* If this is the first possibly-multixact-able operation in the
- * current transaction, set my per-backend OldestMemberMXactId setting.
- * We can be certain that the transaction will never become a member of
- * any older MultiXactIds than that. (We have to do this even if we
- * end up just using our own TransactionId below, since some other
- * backend could incorporate our XID into a MultiXact immediately
- * afterwards.)
+ * current transaction, set my per-backend OldestMemberMXactId
+ * setting. We can be certain that the transaction will never become a
+ * member of any older MultiXactIds than that. (We have to do this
+ * even if we end up just using our own TransactionId below, since
+ * some other backend could incorporate our XID into a MultiXact
+ * immediately afterwards.)
*/
MultiXactIdSetOldestMember();
@@ -5117,9 +5123,9 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
HeapTupleHeaderSetXmax(tuple, InvalidTransactionId);
/*
- * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED
- * + LOCKED. Normalize to INVALID just to be sure no one gets
- * confused. Also get rid of the HEAP_KEYS_UPDATED bit.
+ * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED +
+ * LOCKED. Normalize to INVALID just to be sure no one gets confused.
+ * Also get rid of the HEAP_KEYS_UPDATED bit.
*/
tuple->t_infomask &= ~HEAP_XMAX_BITS;
tuple->t_infomask |= HEAP_XMAX_INVALID;
@@ -5172,13 +5178,13 @@ static void
GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
uint16 *new_infomask2)
{
- int nmembers;
- MultiXactMember *members;
- int i;
- uint16 bits = HEAP_XMAX_IS_MULTI;
- uint16 bits2 = 0;
- bool has_update = false;
- LockTupleMode strongest = LockTupleKeyShare;
+ int nmembers;
+ MultiXactMember *members;
+ int i;
+ uint16 bits = HEAP_XMAX_IS_MULTI;
+ uint16 bits2 = 0;
+ bool has_update = false;
+ LockTupleMode strongest = LockTupleKeyShare;
/*
* We only use this in multis we just created, so they cannot be values
@@ -5188,7 +5194,7 @@ GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
for (i = 0; i < nmembers; i++)
{
- LockTupleMode mode;
+ LockTupleMode mode;
/*
* Remember the strongest lock mode held by any member of the
@@ -5249,22 +5255,22 @@ GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask,
static TransactionId
MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
{
- TransactionId update_xact = InvalidTransactionId;
- MultiXactMember *members;
- int nmembers;
+ TransactionId update_xact = InvalidTransactionId;
+ MultiXactMember *members;
+ int nmembers;
Assert(!(t_infomask & HEAP_XMAX_LOCK_ONLY));
Assert(t_infomask & HEAP_XMAX_IS_MULTI);
/*
- * Since we know the LOCK_ONLY bit is not set, this cannot be a
- * multi from pre-pg_upgrade.
+ * Since we know the LOCK_ONLY bit is not set, this cannot be a multi from
+ * pre-pg_upgrade.
*/
nmembers = GetMultiXactIdMembers(xmax, &members, false);
if (nmembers > 0)
{
- int i;
+ int i;
for (i = 0; i < nmembers; i++)
{
@@ -5284,6 +5290,7 @@ MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
members[i].status == MultiXactStatusUpdate);
update_xact = members[i].xid;
#ifndef USE_ASSERT_CHECKING
+
/*
* in an assert-enabled build, walk the whole array to ensure
* there's no other updater.
@@ -5300,7 +5307,7 @@ MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
/*
* HeapTupleGetUpdateXid
- * As above, but use a HeapTupleHeader
+ * As above, but use a HeapTupleHeader
*
* See also HeapTupleHeaderGetUpdateXid, which can be used without previously
* checking the hint bits.
@@ -5314,7 +5321,7 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple)
/*
* Do_MultiXactIdWait
- * Actual implementation for the two functions below.
+ * Actual implementation for the two functions below.
*
* We do this by sleeping on each member using XactLockTableWait. Any
* members that belong to the current backend are *not* waited for, however;
@@ -5432,7 +5439,7 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
* heap_tuple_needs_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
- * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
+ * are older than the specified cutoff XID or MultiXactId. If so, return TRUE.
*
* It doesn't matter whether the tuple is alive or dead, we are checking
* to see if a tuple needs to be removed or frozen to avoid wraparound.
@@ -6091,7 +6098,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_freeze *xlrec = (xl_heap_freeze *) XLogRecGetData(record);
TransactionId cutoff_xid = xlrec->cutoff_xid;
- MultiXactId cutoff_multi = xlrec->cutoff_multi;
+ MultiXactId cutoff_multi = xlrec->cutoff_multi;
Buffer buffer;
Page page;
@@ -6361,7 +6368,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@@ -6729,7 +6736,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
goto newt;
page = (Page) BufferGetPage(obuffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
if (samepage)
{
@@ -6931,7 +6938,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@@ -6962,7 +6969,7 @@ static void
heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
{
xl_heap_lock_updated *xlrec =
- (xl_heap_lock_updated *) XLogRecGetData(record);
+ (xl_heap_lock_updated *) XLogRecGetData(record);
Buffer buffer;
Page page;
OffsetNumber offnum;
@@ -6983,7 +6990,7 @@ heap_xlog_lock_updated(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
@@ -7033,7 +7040,7 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
- if (lsn <= PageGetLSN(page)) /* changes are applied */
+ if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c
index a3aad3adf91..7105f0ab651 100644
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -129,7 +129,7 @@ typedef struct RewriteStateData
* determine tuple visibility */
TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
* point */
- MultiXactId rs_freeze_multi;/* MultiXactId that will be used as freeze
+ MultiXactId rs_freeze_multi;/* MultiXactId that will be used as freeze
* cutoff point for multixacts */
MemoryContext rs_cxt; /* for hash tables and entries and tuples in
* them */
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index af64fe97e89..ffec6cbcc0c 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -292,7 +292,7 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
*/
if (DataChecksumsEnabled())
{
- Page heapPage = BufferGetPage(heapBuf);
+ Page heapPage = BufferGetPage(heapBuf);
/* caller is expected to set PD_ALL_VISIBLE first */
Assert(PageIsAllVisible(heapPage));
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 1d9cb7d1a06..f4077533bf5 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -532,8 +532,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
START_CRIT_SECTION();
/*
- * We don't do MarkBufferDirty here because we're about to initialise
- * the page, and nobody else can see it yet.
+ * We don't do MarkBufferDirty here because we're about to initialise the
+ * page, and nobody else can see it yet.
*/
/* XLOG stuff */
@@ -552,8 +552,8 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX
XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE, rdata);
/*
- * We don't do PageSetLSN here because we're about to initialise
- * the page, so no need.
+ * We don't do PageSetLSN here because we're about to initialise the
+ * page, so no need.
*/
}
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index 4aabdba3d9e..cb5867ee3e6 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -373,7 +373,7 @@ btree_xlog_split(bool onleft, bool isroot,
* Note that this code ensures that the items remaining on the
* left page are in the correct item number order, but it does not
* reproduce the physical order they would have had. Is this
- * worth changing? See also _bt_restore_page().
+ * worth changing? See also _bt_restore_page().
*/
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
@@ -606,18 +606,18 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* In what follows, we have to examine the previous state of the index
- * page, as well as the heap page(s) it points to. This is only valid if
+ * page, as well as the heap page(s) it points to. This is only valid if
* WAL replay has reached a consistent database state; which means that
- * the preceding check is not just an optimization, but is *necessary*.
- * We won't have let in any user sessions before we reach consistency.
+ * the preceding check is not just an optimization, but is *necessary*. We
+ * won't have let in any user sessions before we reach consistency.
*/
if (!reachedConsistency)
elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data");
/*
- * Get index page. If the DB is consistent, this should not fail, nor
+ * Get index page. If the DB is consistent, this should not fail, nor
* should any of the heap page fetches below. If one does, we return
- * InvalidTransactionId to cancel all HS transactions. That's probably
+ * InvalidTransactionId to cancel all HS transactions. That's probably
* overkill, but it's safe, and certainly better than panicking here.
*/
ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false);
@@ -701,10 +701,10 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec)
/*
* XXX If all heap tuples were LP_DEAD then we will be returning
- * InvalidTransactionId here, causing conflict for all HS
- * transactions. That should happen very rarely (reasoning please?). Also
- * note that caller can't tell the difference between this case and the
- * fast path exit above. May need to change that in future.
+ * InvalidTransactionId here, causing conflict for all HS transactions.
+ * That should happen very rarely (reasoning please?). Also note that
+ * caller can't tell the difference between this case and the fast path
+ * exit above. May need to change that in future.
*/
return latestRemovedXid;
}
@@ -721,7 +721,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
* If we have any conflict processing to do, it must happen before we
* update the page.
*
- * Btree delete records can conflict with standby queries. You might
+ * Btree delete records can conflict with standby queries. You might
* think that vacuum records would conflict as well, but we've handled
* that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid
* cleaned by the vacuum of the heap and so we can resolve any conflicts
diff --git a/src/backend/access/rmgrdesc/clogdesc.c b/src/backend/access/rmgrdesc/clogdesc.c
index 92be7130382..2655f083bdc 100644
--- a/src/backend/access/rmgrdesc/clogdesc.c
+++ b/src/backend/access/rmgrdesc/clogdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* clogdesc.c
- * rmgr descriptor routines for access/transam/clog.c
+ * rmgr descriptor routines for access/transam/clog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/clogdesc.c
+ * src/backend/access/rmgrdesc/clogdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/dbasedesc.c b/src/backend/access/rmgrdesc/dbasedesc.c
index 55d435248f3..2354c5a5d83 100644
--- a/src/backend/access/rmgrdesc/dbasedesc.c
+++ b/src/backend/access/rmgrdesc/dbasedesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* dbasedesc.c
- * rmgr descriptor routines for commands/dbcommands.c
+ * rmgr descriptor routines for commands/dbcommands.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/dbasedesc.c
+ * src/backend/access/rmgrdesc/dbasedesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c
index 53bc482ec21..5400c8628fc 100644
--- a/src/backend/access/rmgrdesc/gindesc.c
+++ b/src/backend/access/rmgrdesc/gindesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* gindesc.c
- * rmgr descriptor routines for access/transam/gin/ginxlog.c
+ * rmgr descriptor routines for access/transam/gin/ginxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/gindesc.c
+ * src/backend/access/rmgrdesc/gindesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/gistdesc.c b/src/backend/access/rmgrdesc/gistdesc.c
index da81595fd41..c58c8a261ad 100644
--- a/src/backend/access/rmgrdesc/gistdesc.c
+++ b/src/backend/access/rmgrdesc/gistdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* gistdesc.c
- * rmgr descriptor routines for access/gist/gistxlog.c
+ * rmgr descriptor routines for access/gist/gistxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/gistdesc.c
+ * src/backend/access/rmgrdesc/gistdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/hashdesc.c b/src/backend/access/rmgrdesc/hashdesc.c
index a50008478e2..6d4a278adc2 100644
--- a/src/backend/access/rmgrdesc/hashdesc.c
+++ b/src/backend/access/rmgrdesc/hashdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* hashdesc.c
- * rmgr descriptor routines for access/hash/hash.c
+ * rmgr descriptor routines for access/hash/hash.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/hashdesc.c
+ * src/backend/access/rmgrdesc/hashdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/heapdesc.c b/src/backend/access/rmgrdesc/heapdesc.c
index 272208417a3..bc8b98528d6 100644
--- a/src/backend/access/rmgrdesc/heapdesc.c
+++ b/src/backend/access/rmgrdesc/heapdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* heapdesc.c
- * rmgr descriptor routines for access/heap/heapam.c
+ * rmgr descriptor routines for access/heap/heapam.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/heapdesc.c
+ * src/backend/access/rmgrdesc/heapdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/mxactdesc.c b/src/backend/access/rmgrdesc/mxactdesc.c
index 3e6cba062d3..b2466a1e2b6 100644
--- a/src/backend/access/rmgrdesc/mxactdesc.c
+++ b/src/backend/access/rmgrdesc/mxactdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* mxactdesc.c
- * rmgr descriptor routines for access/transam/multixact.c
+ * rmgr descriptor routines for access/transam/multixact.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/mxactdesc.c
+ * src/backend/access/rmgrdesc/mxactdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c
index 400e11b0506..b8f0d69df0c 100644
--- a/src/backend/access/rmgrdesc/nbtdesc.c
+++ b/src/backend/access/rmgrdesc/nbtdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* nbtdesc.c
- * rmgr descriptor routines for access/nbtree/nbtxlog.c
+ * rmgr descriptor routines for access/nbtree/nbtxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/nbtdesc.c
+ * src/backend/access/rmgrdesc/nbtdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/relmapdesc.c b/src/backend/access/rmgrdesc/relmapdesc.c
index 4c731c9b568..d3fe2674356 100644
--- a/src/backend/access/rmgrdesc/relmapdesc.c
+++ b/src/backend/access/rmgrdesc/relmapdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* relmapdesc.c
- * rmgr descriptor routines for utils/cache/relmapper.c
+ * rmgr descriptor routines for utils/cache/relmapper.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/relmapdesc.c
+ * src/backend/access/rmgrdesc/relmapdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/seqdesc.c b/src/backend/access/rmgrdesc/seqdesc.c
index 4d6a16adae1..90400e201a9 100644
--- a/src/backend/access/rmgrdesc/seqdesc.c
+++ b/src/backend/access/rmgrdesc/seqdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* seqdesc.c
- * rmgr descriptor routines for commands/sequence.c
+ * rmgr descriptor routines for commands/sequence.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/seqdesc.c
+ * src/backend/access/rmgrdesc/seqdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/smgrdesc.c b/src/backend/access/rmgrdesc/smgrdesc.c
index 176d8142a60..355153c613e 100644
--- a/src/backend/access/rmgrdesc/smgrdesc.c
+++ b/src/backend/access/rmgrdesc/smgrdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* smgrdesc.c
- * rmgr descriptor routines for catalog/storage.c
+ * rmgr descriptor routines for catalog/storage.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/smgrdesc.c
+ * src/backend/access/rmgrdesc/smgrdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/spgdesc.c b/src/backend/access/rmgrdesc/spgdesc.c
index aca22600d42..fa71a4d637a 100644
--- a/src/backend/access/rmgrdesc/spgdesc.c
+++ b/src/backend/access/rmgrdesc/spgdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* spgdesc.c
- * rmgr descriptor routines for access/spgist/spgxlog.c
+ * rmgr descriptor routines for access/spgist/spgxlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/spgdesc.c
+ * src/backend/access/rmgrdesc/spgdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/standbydesc.c b/src/backend/access/rmgrdesc/standbydesc.c
index 5fb6f54b3b6..8e0c37d2f51 100644
--- a/src/backend/access/rmgrdesc/standbydesc.c
+++ b/src/backend/access/rmgrdesc/standbydesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* standbydesc.c
- * rmgr descriptor routines for storage/ipc/standby.c
+ * rmgr descriptor routines for storage/ipc/standby.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/standbydesc.c
+ * src/backend/access/rmgrdesc/standbydesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/tblspcdesc.c b/src/backend/access/rmgrdesc/tblspcdesc.c
index c2c88cd6937..76f7ca71f24 100644
--- a/src/backend/access/rmgrdesc/tblspcdesc.c
+++ b/src/backend/access/rmgrdesc/tblspcdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* tblspcdesc.c
- * rmgr descriptor routines for commands/tablespace.c
+ * rmgr descriptor routines for commands/tablespace.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/tblspcdesc.c
+ * src/backend/access/rmgrdesc/tblspcdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index 11c6912753a..c9c7b4a2082 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* xactdesc.c
- * rmgr descriptor routines for access/transam/xact.c
+ * rmgr descriptor routines for access/transam/xact.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/xactdesc.c
+ * src/backend/access/rmgrdesc/xactdesc.c
*
*-------------------------------------------------------------------------
*/
diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c
index 4c68b6ae0a3..2bad52748a3 100644
--- a/src/backend/access/rmgrdesc/xlogdesc.c
+++ b/src/backend/access/rmgrdesc/xlogdesc.c
@@ -1,14 +1,14 @@
/*-------------------------------------------------------------------------
*
* xlogdesc.c
- * rmgr descriptor routines for access/transam/xlog.c
+ * rmgr descriptor routines for access/transam/xlog.c
*
* Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * src/backend/access/rmgrdesc/xlogdesc.c
+ * src/backend/access/rmgrdesc/xlogdesc.c
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
"tli %u; prev tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
"oldest xid %u in DB %u; oldest multi %u in DB %u; "
"oldest running xid %u; %s",
- (uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
+ (uint32) (checkpoint->redo >> 32), (uint32) checkpoint->redo,
checkpoint->ThisTimeLineID,
checkpoint->PrevTimeLineID,
checkpoint->fullPageWrites ? "true" : "false",
@@ -84,7 +84,8 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
}
else if (info == XLOG_HINT)
{
- BkpBlock *bkp = (BkpBlock *) rec;
+ BkpBlock *bkp = (BkpBlock *) rec;
+
appendStringInfo(buf, "page hint: %s block %u",
relpathperm(bkp->node, bkp->fork),
bkp->block);
diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c
index 8d50dcc6183..e430d9c1ace 100644
--- a/src/backend/access/spgist/spgtextproc.c
+++ b/src/backend/access/spgist/spgtextproc.c
@@ -30,7 +30,7 @@
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). So we can safely create prefixes up to
- * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
+ * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is
* already 4K, there is no safe prefix length when BLCKSZ is less than 8K;
* it is always possible to get "SPGiST inner tuple size exceeds maximum"
* if there are too many distinct next-byte values at a given place in the
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 69e85463996..a74678d967f 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -5,7 +5,7 @@
*
* The pg_multixact manager is a pg_clog-like manager that stores an array of
* MultiXactMember for each MultiXactId. It is a fundamental part of the
- * shared-row-lock implementation. Each MultiXactMember is comprised of a
+ * shared-row-lock implementation. Each MultiXactMember is comprised of a
* TransactionId and a set of flag bits. The name is a bit historical:
* originally, a MultiXactId consisted of more than one TransactionId (except
* in rare corner cases), hence "multi". Nowadays, however, it's perfectly
@@ -50,7 +50,7 @@
* The minimum value in each database is stored in pg_database, and the
* global minimum is part of pg_control. Any vacuum that is able to
* advance its database's minimum value also computes a new global minimum,
- * and uses this value to truncate older segments. When new multixactid
+ * and uses this value to truncate older segments. When new multixactid
* values are to be created, care is taken that the counter does not
* fall within the wraparound horizon considering the global minimum value.
*
@@ -108,7 +108,7 @@
* additional flag bits for each TransactionId. To do this without getting
* into alignment issues, we store four bytes of flags, and then the
* corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and
- * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
+ * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups
* per page. This wastes 12 bytes per page, but that's OK -- simplicity (and
* performance) trumps space efficiency here.
*
@@ -177,17 +177,17 @@ typedef struct MultiXactStateData
MultiXactId lastTruncationPoint;
/*
- * oldest multixact that is still on disk. Anything older than this should
- * not be consulted.
+ * oldest multixact that is still on disk. Anything older than this
+ * should not be consulted.
*/
- MultiXactId oldestMultiXactId;
- Oid oldestMultiXactDB;
+ MultiXactId oldestMultiXactId;
+ Oid oldestMultiXactDB;
/* support for anti-wraparound measures */
- MultiXactId multiVacLimit;
- MultiXactId multiWarnLimit;
- MultiXactId multiStopLimit;
- MultiXactId multiWrapLimit;
+ MultiXactId multiVacLimit;
+ MultiXactId multiWarnLimit;
+ MultiXactId multiStopLimit;
+ MultiXactId multiWrapLimit;
/*
* Per-backend data starts here. We have two arrays stored in the area
@@ -252,7 +252,7 @@ static MultiXactId *OldestVisibleMXactId;
* so they will be uninteresting by the time our next transaction starts.
* (XXX not clear that this is correct --- other members of the MultiXact
* could hang around longer than we did. However, it's not clear what a
- * better policy for flushing old cache entries would be.) FIXME actually
+ * better policy for flushing old cache entries would be.) FIXME actually
* this is plain wrong now that multixact's may contain update Xids.
*
* We allocate the cache entries in a memory context that is deleted at
@@ -291,7 +291,7 @@ static void RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset,
static MultiXactId GetNewMultiXactId(int nmembers, MultiXactOffset *offset);
/* MultiXact cache management */
-static int mxactMemberComparator(const void *arg1, const void *arg2);
+static int mxactMemberComparator(const void *arg1, const void *arg2);
static MultiXactId mXactCacheGetBySet(int nmembers, MultiXactMember *members);
static int mXactCacheGetById(MultiXactId multi, MultiXactMember **members);
static void mXactCachePut(MultiXactId multi, int nmembers,
@@ -387,15 +387,15 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
multi, xid, mxstatus_to_string(status));
/*
- * Note: we don't allow for old multis here. The reason is that the
- * only caller of this function does a check that the multixact is
- * no longer running.
+ * Note: we don't allow for old multis here. The reason is that the only
+ * caller of this function does a check that the multixact is no longer
+ * running.
*/
nmembers = GetMultiXactIdMembers(multi, &members, false);
if (nmembers < 0)
{
- MultiXactMember member;
+ MultiXactMember member;
/*
* The MultiXactId is obsolete. This can only happen if all the
@@ -430,14 +430,14 @@ MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
}
/*
- * Determine which of the members of the MultiXactId are still of interest.
- * This is any running transaction, and also any transaction that grabbed
- * something stronger than just a lock and was committed. (An update that
- * aborted is of no interest here.)
+ * Determine which of the members of the MultiXactId are still of
+ * interest. This is any running transaction, and also any transaction
+ * that grabbed something stronger than just a lock and was committed.
+ * (An update that aborted is of no interest here.)
*
- * (Removing dead members is just an optimization, but a useful one.
- * Note we have the same race condition here as above: j could be 0 at the
- * end of the loop.)
+ * (Removing dead members is just an optimization, but a useful one. Note
+ * we have the same race condition here as above: j could be 0 at the end
+ * of the loop.)
*/
newMembers = (MultiXactMember *)
palloc(sizeof(MultiXactMember) * (nmembers + 1));
@@ -641,12 +641,12 @@ MultiXactIdSetOldestVisible(void)
/*
* ReadNextMultiXactId
- * Return the next MultiXactId to be assigned, but don't allocate it
+ * Return the next MultiXactId to be assigned, but don't allocate it
*/
MultiXactId
ReadNextMultiXactId(void)
{
- MultiXactId mxid;
+ MultiXactId mxid;
/* XXX we could presumably do this without a lock. */
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -722,9 +722,9 @@ CreateMultiXactId(int nmembers, MultiXactMember *members)
/*
* XXX Note: there's a lot of padding space in MultiXactMember. We could
- * find a more compact representation of this Xlog record -- perhaps all the
- * status flags in one XLogRecData, then all the xids in another one? Not
- * clear that it's worth the trouble though.
+ * find a more compact representation of this Xlog record -- perhaps all
+ * the status flags in one XLogRecData, then all the xids in another one?
+ * Not clear that it's worth the trouble though.
*/
rdata[0].data = (char *) (&xlrec);
rdata[0].len = SizeOfMultiXactCreate;
@@ -878,7 +878,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
/*----------
* Check to see if it's safe to assign another MultiXactId. This protects
- * against catastrophic data loss due to multixact wraparound. The basic
+ * against catastrophic data loss due to multixact wraparound. The basic
* rules are:
*
* If we're past multiVacLimit, start trying to force autovacuum cycles.
@@ -892,7 +892,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
{
/*
* For safety's sake, we release MultiXactGenLock while sending
- * signals, warnings, etc. This is not so much because we care about
+ * signals, warnings, etc. This is not so much because we care about
* preserving concurrency in this situation, as to avoid any
* possibility of deadlock while doing get_database_name(). First,
* copy all the shared values we'll need in this path.
@@ -923,15 +923,15 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database \"%s\"",
oldest_datname),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
else
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("database is not accepting commands that generate new MultiXactIds to avoid wraparound data loss in database with OID %u",
oldest_datoid),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
}
else if (!MultiXactIdPrecedes(result, multiWarnLimit))
{
@@ -943,15 +943,15 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
(errmsg("database \"%s\" must be vacuumed before %u more MultiXactIds are used",
oldest_datname,
multiWrapLimit - result),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
else
ereport(WARNING,
(errmsg("database with OID %u must be vacuumed before %u more MultiXactIds are used",
oldest_datoid,
multiWrapLimit - result),
- errhint("Execute a database-wide VACUUM in that database.\n"
- "You might also need to commit or roll back old prepared transactions.")));
+ errhint("Execute a database-wide VACUUM in that database.\n"
+ "You might also need to commit or roll back old prepared transactions.")));
}
/* Re-acquire lock and start over */
@@ -995,10 +995,10 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
*
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
- * or the first value on a segment-beginning page after this routine exits,
- * so anyone else looking at the variable must be prepared to deal with
- * either case. Similarly, nextOffset may be zero, but we won't use that
- * as the actual start offset of the next multixact.
+ * or the first value on a segment-beginning page after this routine
+ * exits, so anyone else looking at the variable must be prepared to deal
+ * with either case. Similarly, nextOffset may be zero, but we won't use
+ * that as the actual start offset of the next multixact.
*/
(MultiXactState->nextMXact)++;
@@ -1066,18 +1066,18 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
*
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
* useful; it should have already been frozen by vacuum. We've truncated
- * the on-disk structures anyway. Returning the wrong values could lead to
- * an incorrect visibility result. However, to support pg_upgrade we need
- * to allow an empty set to be returned regardless, if the caller is
+ * the on-disk structures anyway. Returning the wrong values could lead
+ * to an incorrect visibility result. However, to support pg_upgrade we
+ * need to allow an empty set to be returned regardless, if the caller is
* willing to accept it; the caller is expected to check that it's an
* allowed condition (such as ensuring that the infomask bits set on the
- * tuple are consistent with the pg_upgrade scenario). If the caller is
+ * tuple are consistent with the pg_upgrade scenario). If the caller is
* expecting this to be called only on recently created multis, then we
* raise an error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. This raises
- * a hard error.
+ * seen, it implies undetected ID wraparound has occurred. This raises a
+ * hard error.
*
* Shared lock is enough here since we aren't modifying any global state.
* Acquire it just long enough to grab the current counter values. We may
@@ -1095,8 +1095,8 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
{
ereport(allow_old ? DEBUG1 : ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
- multi)));
+ errmsg("MultiXactId %u does no longer exist -- apparent wraparound",
+ multi)));
return -1;
}
@@ -1349,7 +1349,7 @@ mXactCacheGetById(MultiXactId multi, MultiXactMember **members)
memcpy(ptr, entry->members, size);
debug_elog3(DEBUG2, "CacheGet: found %s",
- mxid_to_string(multi, entry->nmembers, entry->members));
+ mxid_to_string(multi, entry->nmembers, entry->members));
return entry->nmembers;
}
}
@@ -1423,8 +1423,8 @@ mxstatus_to_string(MultiXactStatus status)
char *
mxid_to_string(MultiXactId multi, int nmembers, MultiXactMember *members)
{
- static char *str = NULL;
- StringInfoData buf;
+ static char *str = NULL;
+ StringInfoData buf;
int i;
if (str != NULL)
@@ -1721,7 +1721,7 @@ ZeroMultiXactMemberPage(int pageno, bool writeXlog)
*
* StartupXLOG has already established nextMXact/nextOffset by calling
* MultiXactSetNextMXact and/or MultiXactAdvanceNextMXact, and the oldestMulti
- * info from pg_control and/or MultiXactAdvanceOldest. Note that we may
+ * info from pg_control and/or MultiXactAdvanceOldest. Note that we may
* already have replayed WAL data into the SLRU files.
*
* We don't need any locks here, really; the SLRU locks are taken
@@ -1883,17 +1883,17 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
void
SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
{
- MultiXactId multiVacLimit;
- MultiXactId multiWarnLimit;
- MultiXactId multiStopLimit;
- MultiXactId multiWrapLimit;
- MultiXactId curMulti;
+ MultiXactId multiVacLimit;
+ MultiXactId multiWarnLimit;
+ MultiXactId multiStopLimit;
+ MultiXactId multiWrapLimit;
+ MultiXactId curMulti;
Assert(MultiXactIdIsValid(oldest_datminmxid));
/*
* The place where we actually get into deep trouble is halfway around
- * from the oldest potentially-existing XID/multi. (This calculation is
+ * from the oldest potentially-existing XID/multi. (This calculation is
* probably off by one or two counts for Xids, because the special XIDs
* reduce the size of the loop a little bit. But we throw in plenty of
* slop below, so it doesn't matter.)
@@ -1911,11 +1911,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
multiStopLimit -= FirstMultiXactId;
/*
- * We'll start complaining loudly when we get within 10M multis of the stop
- * point. This is kind of arbitrary, but if you let your gas gauge get
- * down to 1% of full, would you be looking for the next gas station? We
- * need to be fairly liberal about this number because there are lots of
- * scenarios where most transactions are done by automatic clients that
+ * We'll start complaining loudly when we get within 10M multis of the
+ * stop point. This is kind of arbitrary, but if you let your gas gauge
+ * get down to 1% of full, would you be looking for the next gas station?
+ * We need to be fairly liberal about this number because there are lots
+ * of scenarios where most transactions are done by automatic clients that
* won't pay attention to warnings. (No, we're not gonna make this
* configurable. If you know enough to configure it, you know enough to
* not get in this kind of trouble in the first place.)
@@ -1925,8 +1925,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
multiWarnLimit -= FirstMultiXactId;
/*
- * We'll start trying to force autovacuums when oldest_datminmxid gets
- * to be more than 200 million transactions old.
+ * We'll start trying to force autovacuums when oldest_datminmxid gets to
+ * be more than 200 million transactions old.
*/
multiVacLimit = oldest_datminmxid + 200000000;
if (multiVacLimit < FirstMultiXactId)
@@ -1945,8 +1945,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/* Log the info */
ereport(DEBUG1,
- (errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
- multiWrapLimit, oldest_datoid)));
+ (errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
+ multiWrapLimit, oldest_datoid)));
/*
* If past the autovacuum force point, immediately signal an autovac
@@ -2127,9 +2127,9 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
MultiXactId
GetOldestMultiXactId(void)
{
- MultiXactId oldestMXact;
- MultiXactId nextMXact;
- int i;
+ MultiXactId oldestMXact;
+ MultiXactId nextMXact;
+ int i;
/*
* This is the oldest valid value among all the OldestMemberMXactId[] and
@@ -2168,17 +2168,17 @@ GetOldestMultiXactId(void)
typedef struct mxtruncinfo
{
- int earliestExistingPage;
+ int earliestExistingPage;
} mxtruncinfo;
/*
* SlruScanDirectory callback
- * This callback determines the earliest existing page number.
+ * This callback determines the earliest existing page number.
*/
static bool
SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
{
- mxtruncinfo *trunc = (mxtruncinfo *) data;
+ mxtruncinfo *trunc = (mxtruncinfo *) data;
if (trunc->earliestExistingPage == -1 ||
ctl->PagePrecedes(segpage, trunc->earliestExistingPage))
@@ -2186,7 +2186,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
trunc->earliestExistingPage = segpage;
}
- return false; /* keep going */
+ return false; /* keep going */
}
/*
@@ -2200,16 +2200,16 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
void
TruncateMultiXact(MultiXactId oldestMXact)
{
- MultiXactOffset oldestOffset;
- mxtruncinfo trunc;
- MultiXactId earliest;
+ MultiXactOffset oldestOffset;
+ mxtruncinfo trunc;
+ MultiXactId earliest;
/*
* Note we can't just plow ahead with the truncation; it's possible that
* there are no segments to truncate, which is a problem because we are
- * going to attempt to read the offsets page to determine where to truncate
- * the members SLRU. So we first scan the directory to determine the
- * earliest offsets page number that we can read without error.
+ * going to attempt to read the offsets page to determine where to
+ * truncate the members SLRU. So we first scan the directory to determine
+ * the earliest offsets page number that we can read without error.
*/
trunc.earliestExistingPage = -1;
SlruScanDirectory(MultiXactOffsetCtl, SlruScanDirCbFindEarliest, &trunc);
@@ -2220,9 +2220,9 @@ TruncateMultiXact(MultiXactId oldestMXact)
return;
/*
- * First, compute the safe truncation point for MultiXactMember.
- * This is the starting offset of the multixact we were passed
- * as MultiXactOffset cutoff.
+ * First, compute the safe truncation point for MultiXactMember. This is
+ * the starting offset of the multixact we were passed as MultiXactOffset
+ * cutoff.
*/
{
int pageno;
@@ -2380,7 +2380,7 @@ multixact_redo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_MULTIXACT_CREATE_ID)
{
xl_multixact_create *xlrec =
- (xl_multixact_create *) XLogRecGetData(record);
+ (xl_multixact_create *) XLogRecGetData(record);
TransactionId max_xid;
int i;
@@ -2427,12 +2427,12 @@ pg_get_multixact_members(PG_FUNCTION_ARGS)
{
typedef struct
{
- MultiXactMember *members;
- int nmembers;
- int iter;
+ MultiXactMember *members;
+ int nmembers;
+ int iter;
} mxact;
- MultiXactId mxid = PG_GETARG_UINT32(0);
- mxact *multi;
+ MultiXactId mxid = PG_GETARG_UINT32(0);
+ mxact *multi;
FuncCallContext *funccxt;
if (mxid < FirstMultiXactId)
diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c
index 921da62c22a..7bb523a4fb4 100644
--- a/src/backend/access/transam/timeline.c
+++ b/src/backend/access/transam/timeline.c
@@ -15,7 +15,7 @@
* <parentTLI> <switchpoint> <reason>
*
* parentTLI ID of the parent timeline
- * switchpoint XLogRecPtr of the WAL position where the switch happened
+ * switchpoint XLogRecPtr of the WAL position where the switch happened
* reason human-readable explanation of why the timeline was changed
*
* The fields are separated by tabs. Lines beginning with # are comments, and
@@ -49,7 +49,7 @@ restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end)
{
char path[MAXPGPATH];
char histfname[MAXFNAMELEN];
- TimeLineID tli;
+ TimeLineID tli;
for (tli = begin; tli < end; tli++)
{
@@ -179,8 +179,8 @@ readTimeLineHistory(TimeLineID targetTLI)
errhint("Timeline IDs must be less than child timeline's ID.")));
/*
- * Create one more entry for the "tip" of the timeline, which has no
- * entry in the history file.
+ * Create one more entry for the "tip" of the timeline, which has no entry
+ * in the history file.
*/
entry = (TimeLineHistoryEntry *) palloc(sizeof(TimeLineHistoryEntry));
entry->tli = targetTLI;
@@ -418,7 +418,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
/*
* Prefer link() to rename() here just to be really sure that we don't
- * overwrite an existing file. However, there shouldn't be one, so
+ * overwrite an existing file. However, there shouldn't be one, so
* rename() is an acceptable substitute except for the truly paranoid.
*/
#if HAVE_WORKING_LINK
@@ -530,7 +530,7 @@ writeTimeLineHistoryFile(TimeLineID tli, char *content, int size)
bool
tliInHistory(TimeLineID tli, List *expectedTLEs)
{
- ListCell *cell;
+ ListCell *cell;
foreach(cell, expectedTLEs)
{
@@ -548,11 +548,12 @@ tliInHistory(TimeLineID tli, List *expectedTLEs)
TimeLineID
tliOfPointInHistory(XLogRecPtr ptr, List *history)
{
- ListCell *cell;
+ ListCell *cell;
foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
+
if ((XLogRecPtrIsInvalid(tle->begin) || tle->begin <= ptr) &&
(XLogRecPtrIsInvalid(tle->end) || ptr < tle->end))
{
@@ -563,7 +564,7 @@ tliOfPointInHistory(XLogRecPtr ptr, List *history)
/* shouldn't happen. */
elog(ERROR, "timeline history was not contiguous");
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
}
/*
@@ -579,7 +580,7 @@ tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
if (nextTLI)
*nextTLI = 0;
- foreach (cell, history)
+ foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
@@ -592,5 +593,5 @@ tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
ereport(ERROR,
(errmsg("requested timeline %u is not in this server's history",
tli)));
- return InvalidXLogRecPtr; /* keep compiler quiet */
+ return InvalidXLogRecPtr; /* keep compiler quiet */
}
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index e62286f9f98..31e868d4bc7 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1024,8 +1024,8 @@ RecordTransactionCommit(void)
*
* It's safe to change the delayChkpt flag of our own backend without
* holding the ProcArrayLock, since we're the only one modifying it.
- * This makes checkpoint's determination of which xacts are delayChkpt a
- * bit fuzzy, but it doesn't matter.
+ * This makes checkpoint's determination of which xacts are delayChkpt
+ * a bit fuzzy, but it doesn't matter.
*/
START_CRIT_SECTION();
MyPgXact->delayChkpt = true;
@@ -4683,12 +4683,11 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
* from the template database, and then commit the transaction. If we
* crash after all the files have been copied but before the commit, you
* have files in the data directory without an entry in pg_database. To
- * minimize the window
- * for that, we use ForceSyncCommit() to rush the commit record to disk as
- * quick as possible. We have the same window during recovery, and forcing
- * an XLogFlush() (which updates minRecoveryPoint during recovery) helps
- * to reduce that problem window, for any user that requested
- * ForceSyncCommit().
+ * minimize the window for that, we use ForceSyncCommit() to rush the
+ * commit record to disk as quick as possible. We have the same window
+ * during recovery, and forcing an XLogFlush() (which updates
+ * minRecoveryPoint during recovery) helps to reduce that problem window,
+ * for any user that requested ForceSyncCommit().
*/
if (XactCompletionForceSyncCommit(xinfo))
XLogFlush(lsn);
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 76b52fb1dcb..dcd33c931c0 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -200,14 +200,14 @@ static int LocalXLogInsertAllowed = -1;
* will switch to using offline XLOG archives as soon as we reach the end of
* WAL in pg_xlog.
*/
-bool ArchiveRecoveryRequested = false;
-bool InArchiveRecovery = false;
+bool ArchiveRecoveryRequested = false;
+bool InArchiveRecovery = false;
/* Was the last xlog file restored from archive, or local? */
static bool restoredFromArchive = false;
/* options taken from recovery.conf for archive recovery */
-char *recoveryRestoreCommand = NULL;
+char *recoveryRestoreCommand = NULL;
static char *recoveryEndCommand = NULL;
static char *archiveCleanupCommand = NULL;
static RecoveryTargetType recoveryTarget = RECOVERY_TARGET_UNSET;
@@ -223,7 +223,7 @@ static char *PrimaryConnInfo = NULL;
static char *TriggerFile = NULL;
/* are we currently in standby mode? */
-bool StandbyMode = false;
+bool StandbyMode = false;
/* whether request for fast promotion has been made yet */
static bool fast_promote = false;
@@ -403,10 +403,11 @@ typedef struct XLogCtlData
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
- XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */
+ XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG
+ * segment */
/* Fake LSN counter, for unlogged relations. Protected by ulsn_lck */
- XLogRecPtr unloggedLSN;
+ XLogRecPtr unloggedLSN;
slock_t ulsn_lck;
/* Protected by WALWriteLock: */
@@ -548,14 +549,14 @@ static XLogwrtResult LogwrtResult = {0, 0};
*/
typedef enum
{
- XLOG_FROM_ANY = 0, /* request to read WAL from any source */
- XLOG_FROM_ARCHIVE, /* restored using restore_command */
- XLOG_FROM_PG_XLOG, /* existing file in pg_xlog */
- XLOG_FROM_STREAM, /* streamed from master */
+ XLOG_FROM_ANY = 0, /* request to read WAL from any source */
+ XLOG_FROM_ARCHIVE, /* restored using restore_command */
+ XLOG_FROM_PG_XLOG, /* existing file in pg_xlog */
+ XLOG_FROM_STREAM, /* streamed from master */
} XLogSource;
/* human-readable names for XLogSources, for debugging output */
-static const char *xlogSourceNames[] = { "any", "archive", "pg_xlog", "stream" };
+static const char *xlogSourceNames[] = {"any", "archive", "pg_xlog", "stream"};
/*
* openLogFile is -1 or a kernel FD for an open log file segment.
@@ -589,7 +590,7 @@ static XLogSource readSource = 0; /* XLOG_FROM_* code */
* next.
*/
static XLogSource currentSource = 0; /* XLOG_FROM_* code */
-static bool lastSourceFailed = false;
+static bool lastSourceFailed = false;
typedef struct XLogPageReadPrivate
{
@@ -607,7 +608,7 @@ typedef struct XLogPageReadPrivate
* XLogReceiptSource tracks where we last successfully read some WAL.)
*/
static TimestampTz XLogReceiptTime = 0;
-static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */
+static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */
/* State information for XLOG reading */
static XLogRecPtr ReadRecPtr; /* start of last record read */
@@ -649,7 +650,7 @@ static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo);
static bool XLogCheckBuffer(XLogRecData *rdata, bool holdsExclusiveLock,
XLogRecPtr *lsn, BkpBlock *bkpb);
static Buffer RestoreBackupBlockContents(XLogRecPtr lsn, BkpBlock bkpb,
- char *blk, bool get_cleanup_lock, bool keep_buffer);
+ char *blk, bool get_cleanup_lock, bool keep_buffer);
static bool AdvanceXLInsertBuffer(bool new_segment);
static bool XLogCheckpointNeeded(XLogSegNo new_segno);
static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch);
@@ -658,7 +659,7 @@ static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath,
bool use_lock);
static int XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli,
int source, bool notexistOk);
-static int XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source);
+static int XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source);
static int XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
int reqLen, XLogRecPtr targetRecPtr, char *readBuf,
TimeLineID *readTLI);
@@ -823,7 +824,7 @@ begin:;
/* OK, put it in this slot */
dtbuf[i] = rdt->buffer;
if (doPageWrites && XLogCheckBuffer(rdt, true,
- &(dtbuf_lsn[i]), &(dtbuf_xlg[i])))
+ &(dtbuf_lsn[i]), &(dtbuf_xlg[i])))
{
dtbuf_bkp[i] = true;
rdt->data = NULL;
@@ -1251,10 +1252,10 @@ XLogCheckBuffer(XLogRecData *rdata, bool holdsExclusiveLock,
page = BufferGetPage(rdata->buffer);
/*
- * We assume page LSN is first data on *every* page that can be passed
- * to XLogInsert, whether it has the standard page layout or not. We
- * don't need to take the buffer header lock for PageGetLSN if we hold
- * an exclusive lock on the page and/or the relation.
+ * We assume page LSN is first data on *every* page that can be passed to
+ * XLogInsert, whether it has the standard page layout or not. We don't
+ * need to take the buffer header lock for PageGetLSN if we hold an
+ * exclusive lock on the page and/or the relation.
*/
if (holdsExclusiveLock)
*lsn = PageGetLSN(page);
@@ -1545,7 +1546,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
*/
if (LogwrtResult.Write >= XLogCtl->xlblocks[curridx])
elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
- (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
+ (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
(uint32) (XLogCtl->xlblocks[curridx] >> 32),
(uint32) XLogCtl->xlblocks[curridx]);
@@ -1611,9 +1612,9 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
if (lseek(openLogFile, (off_t) startoffset, SEEK_SET) < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not seek in log file %s to offset %u: %m",
- XLogFileNameP(ThisTimeLineID, openLogSegNo),
- startoffset)));
+ errmsg("could not seek in log file %s to offset %u: %m",
+ XLogFileNameP(ThisTimeLineID, openLogSegNo),
+ startoffset)));
openLogOff = startoffset;
}
@@ -1858,7 +1859,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
if (!force && newMinRecoveryPoint < lsn)
elog(WARNING,
"xlog min recovery request %X/%X is past current point %X/%X",
- (uint32) (lsn >> 32) , (uint32) lsn,
+ (uint32) (lsn >> 32), (uint32) lsn,
(uint32) (newMinRecoveryPoint >> 32),
(uint32) newMinRecoveryPoint);
@@ -1872,10 +1873,10 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
minRecoveryPointTLI = newMinRecoveryPointTLI;
ereport(DEBUG2,
- (errmsg("updated min recovery point to %X/%X on timeline %u",
- (uint32) (minRecoveryPoint >> 32),
- (uint32) minRecoveryPoint,
- newMinRecoveryPointTLI)));
+ (errmsg("updated min recovery point to %X/%X on timeline %u",
+ (uint32) (minRecoveryPoint >> 32),
+ (uint32) minRecoveryPoint,
+ newMinRecoveryPointTLI)));
}
}
LWLockRelease(ControlFileLock);
@@ -1915,7 +1916,7 @@ XLogFlush(XLogRecPtr record)
elog(LOG, "xlog flush request %X/%X; write %X/%X; flush %X/%X",
(uint32) (record >> 32), (uint32) record,
(uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
- (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+ (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
#endif
START_CRIT_SECTION();
@@ -1979,8 +1980,8 @@ XLogFlush(XLogRecPtr record)
/*
* Sleep before flush! By adding a delay here, we may give further
* backends the opportunity to join the backlog of group commit
- * followers; this can significantly improve transaction throughput, at
- * the risk of increasing transaction latency.
+ * followers; this can significantly improve transaction throughput,
+ * at the risk of increasing transaction latency.
*
* We do not sleep if enableFsync is not turned on, nor if there are
* fewer than CommitSiblings other backends with active transactions.
@@ -1995,7 +1996,7 @@ XLogFlush(XLogRecPtr record)
XLogCtlInsert *Insert = &XLogCtl->Insert;
uint32 freespace = INSERT_FREESPACE(Insert);
- if (freespace == 0) /* buffer is full */
+ if (freespace == 0) /* buffer is full */
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
else
{
@@ -2048,7 +2049,7 @@ XLogFlush(XLogRecPtr record)
elog(ERROR,
"xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
(uint32) (record >> 32), (uint32) record,
- (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+ (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
}
/*
@@ -2127,7 +2128,7 @@ XLogBackgroundFlush(void)
elog(LOG, "xlog bg flush request %X/%X; write %X/%X; flush %X/%X",
(uint32) (WriteRqstPtr >> 32), (uint32) WriteRqstPtr,
(uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
- (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
+ (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
#endif
START_CRIT_SECTION();
@@ -2379,7 +2380,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
if (fd < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", path)));
+ errmsg("could not open file \"%s\": %m", path)));
elog(DEBUG2, "done creating and filling new WAL file");
@@ -2719,7 +2720,7 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source)
* want to read.
*
* If we haven't read the timeline history file yet, read it now, so that
- * we know which TLIs to scan. We don't save the list in expectedTLEs,
+ * we know which TLIs to scan. We don't save the list in expectedTLEs,
* however, unless we actually find a valid segment. That way if there is
* neither a timeline history file nor a WAL segment in the archive, and
* streaming replication is set up, we'll read the timeline history file
@@ -3215,8 +3216,8 @@ RestoreBackupBlockContents(XLogRecPtr lsn, BkpBlock bkpb, char *blk,
}
/*
- * The checksum value on this page is currently invalid. We don't
- * need to reset it here since it will be set before being written.
+ * The checksum value on this page is currently invalid. We don't need to
+ * reset it here since it will be set before being written.
*/
PageSetLSN(page, lsn);
@@ -3258,7 +3259,7 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
for (;;)
{
- char *errormsg;
+ char *errormsg;
record = XLogReadRecord(xlogreader, RecPtr, &errormsg);
ReadRecPtr = xlogreader->ReadRecPtr;
@@ -3272,34 +3273,35 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
}
/*
- * We only end up here without a message when XLogPageRead() failed
- * - in that case we already logged something.
- * In StandbyMode that only happens if we have been triggered, so
- * we shouldn't loop anymore in that case.
+ * We only end up here without a message when XLogPageRead()
+ * failed - in that case we already logged something. In
+ * StandbyMode that only happens if we have been triggered, so we
+ * shouldn't loop anymore in that case.
*/
if (errormsg)
ereport(emode_for_corrupt_record(emode,
RecPtr ? RecPtr : EndRecPtr),
- (errmsg_internal("%s", errormsg) /* already translated */));
+ (errmsg_internal("%s", errormsg) /* already translated */ ));
}
+
/*
* Check page TLI is one of the expected values.
*/
else if (!tliInHistory(xlogreader->latestPageTLI, expectedTLEs))
{
char fname[MAXFNAMELEN];
- XLogSegNo segno;
- int32 offset;
+ XLogSegNo segno;
+ int32 offset;
XLByteToSeg(xlogreader->latestPagePtr, segno);
offset = xlogreader->latestPagePtr % XLogSegSize;
XLogFileName(fname, xlogreader->readPageTLI, segno);
ereport(emode_for_corrupt_record(emode,
RecPtr ? RecPtr : EndRecPtr),
- (errmsg("unexpected timeline ID %u in log segment %s, offset %u",
- xlogreader->latestPageTLI,
- fname,
- offset)));
+ (errmsg("unexpected timeline ID %u in log segment %s, offset %u",
+ xlogreader->latestPageTLI,
+ fname,
+ offset)));
record = NULL;
}
@@ -3314,10 +3316,10 @@ ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
lastSourceFailed = true;
/*
- * If archive recovery was requested, but we were still doing crash
- * recovery, switch to archive recovery and retry using the offline
- * archive. We have now replayed all the valid WAL in pg_xlog, so
- * we are presumably now consistent.
+ * If archive recovery was requested, but we were still doing
+ * crash recovery, switch to archive recovery and retry using the
+ * offline archive. We have now replayed all the valid WAL in
+ * pg_xlog, so we are presumably now consistent.
*
* We require that there's at least some valid WAL present in
* pg_xlog, however (!fetch_ckpt). We could recover using the WAL
@@ -3401,11 +3403,11 @@ rescanLatestTimeLine(void)
newExpectedTLEs = readTimeLineHistory(newtarget);
/*
- * If the current timeline is not part of the history of the new
- * timeline, we cannot proceed to it.
+ * If the current timeline is not part of the history of the new timeline,
+ * we cannot proceed to it.
*/
found = false;
- foreach (cell, newExpectedTLEs)
+ foreach(cell, newExpectedTLEs)
{
currentTle = (TimeLineHistoryEntry *) lfirst(cell);
@@ -3812,7 +3814,7 @@ DataChecksumsEnabled(void)
XLogRecPtr
GetFakeLSNForUnloggedRel(void)
{
- XLogRecPtr nextUnloggedLSN;
+ XLogRecPtr nextUnloggedLSN;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
@@ -4991,15 +4993,15 @@ StartupXLOG(void)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while allocating an XLog reading processor")));
+ errdetail("Failed while allocating an XLog reading processor")));
xlogreader->system_identifier = ControlFile->system_identifier;
if (read_backup_label(&checkPointLoc, &backupEndRequired,
&backupFromStandby))
{
/*
- * Archive recovery was requested, and thanks to the backup label file,
- * we know how far we need to replay to reach consistency. Enter
+ * Archive recovery was requested, and thanks to the backup label
+ * file, we know how far we need to replay to reach consistency. Enter
* archive recovery directly.
*/
InArchiveRecovery = true;
@@ -5017,7 +5019,7 @@ StartupXLOG(void)
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
ereport(DEBUG1,
(errmsg("checkpoint record is at %X/%X",
- (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+ (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
/*
@@ -5049,8 +5051,8 @@ StartupXLOG(void)
/*
* It's possible that archive recovery was requested, but we don't
* know how far we need to replay the WAL before we reach consistency.
- * This can happen for example if a base backup is taken from a running
- * server using an atomic filesystem snapshot, without calling
+ * This can happen for example if a base backup is taken from a
+ * running server using an atomic filesystem snapshot, without calling
* pg_start/stop_backup. Or if you just kill a running master server
* and put it into archive recovery by creating a recovery.conf file.
*
@@ -5058,8 +5060,8 @@ StartupXLOG(void)
* replaying all the WAL present in pg_xlog, and only enter archive
* recovery after that.
*
- * But usually we already know how far we need to replay the WAL (up to
- * minRecoveryPoint, up to backupEndPoint, or until we see an
+ * But usually we already know how far we need to replay the WAL (up
+ * to minRecoveryPoint, up to backupEndPoint, or until we see an
* end-of-backup record), and we can enter archive recovery directly.
*/
if (ArchiveRecoveryRequested &&
@@ -5084,7 +5086,7 @@ StartupXLOG(void)
{
ereport(DEBUG1,
(errmsg("checkpoint record is at %X/%X",
- (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+ (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
}
else if (StandbyMode)
{
@@ -5103,7 +5105,7 @@ StartupXLOG(void)
{
ereport(LOG,
(errmsg("using previous checkpoint record at %X/%X",
- (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
+ (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
@@ -5119,15 +5121,16 @@ StartupXLOG(void)
* timeline in the history of the requested timeline, we cannot proceed:
* the backup is not part of the history of the requested timeline.
*/
- Assert(expectedTLEs); /* was initialized by reading checkpoint record */
+ Assert(expectedTLEs); /* was initialized by reading checkpoint
+ * record */
if (tliOfPointInHistory(checkPointLoc, expectedTLEs) !=
- checkPoint.ThisTimeLineID)
+ checkPoint.ThisTimeLineID)
{
- XLogRecPtr switchpoint;
+ XLogRecPtr switchpoint;
/*
- * tliSwitchPoint will throw an error if the checkpoint's timeline
- * is not in expectedTLEs at all.
+ * tliSwitchPoint will throw an error if the checkpoint's timeline is
+ * not in expectedTLEs at all.
*/
switchpoint = tliSwitchPoint(ControlFile->checkPointCopy.ThisTimeLineID, expectedTLEs, NULL);
ereport(FATAL,
@@ -5146,8 +5149,8 @@ StartupXLOG(void)
* history, too.
*/
if (!XLogRecPtrIsInvalid(ControlFile->minRecoveryPoint) &&
- tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
- ControlFile->minRecoveryPointTLI)
+ tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
+ ControlFile->minRecoveryPointTLI)
ereport(FATAL,
(errmsg("requested timeline %u does not contain minimum recovery point %X/%X on timeline %u",
recoveryTargetTLI,
@@ -5159,7 +5162,7 @@ StartupXLOG(void)
ereport(DEBUG1,
(errmsg("redo record is at %X/%X; shutdown %s",
- (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
+ (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg("next transaction ID: %u/%u; next OID: %u",
@@ -5206,16 +5209,16 @@ StartupXLOG(void)
ThisTimeLineID = checkPoint.ThisTimeLineID;
/*
- * Copy any missing timeline history files between 'now' and the
- * recovery target timeline from archive to pg_xlog. While we don't need
- * those files ourselves - the history file of the recovery target
- * timeline covers all the previous timelines in the history too - a
- * cascading standby server might be interested in them. Or, if you
- * archive the WAL from this server to a different archive than the
- * master, it'd be good for all the history files to get archived there
- * after failover, so that you can use one of the old timelines as a
- * PITR target. Timeline history files are small, so it's better to copy
- * them unnecessarily than not copy them and regret later.
+ * Copy any missing timeline history files between 'now' and the recovery
+ * target timeline from archive to pg_xlog. While we don't need those
+ * files ourselves - the history file of the recovery target timeline
+ * covers all the previous timelines in the history too - a cascading
+ * standby server might be interested in them. Or, if you archive the WAL
+ * from this server to a different archive than the master, it'd be good
+ * for all the history files to get archived there after failover, so that
+ * you can use one of the old timelines as a PITR target. Timeline history
+ * files are small, so it's better to copy them unnecessarily than not
+ * copy them and regret later.
*/
restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
@@ -5271,10 +5274,10 @@ StartupXLOG(void)
"automatic recovery in progress")));
if (recoveryTargetTLI > ControlFile->checkPointCopy.ThisTimeLineID)
ereport(LOG,
- (errmsg("crash recovery starts in timeline %u "
- "and has target timeline %u",
- ControlFile->checkPointCopy.ThisTimeLineID,
- recoveryTargetTLI)));
+ (errmsg("crash recovery starts in timeline %u "
+ "and has target timeline %u",
+ ControlFile->checkPointCopy.ThisTimeLineID,
+ recoveryTargetTLI)));
ControlFile->state = DB_IN_CRASH_RECOVERY;
}
ControlFile->prevCheckPoint = ControlFile->checkPoint;
@@ -5509,14 +5512,15 @@ StartupXLOG(void)
ereport(LOG,
(errmsg("redo starts at %X/%X",
- (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
/*
* main redo apply loop
*/
do
{
- bool switchedTLI = false;
+ bool switchedTLI = false;
+
#ifdef WAL_DEBUG
if (XLOG_DEBUG ||
(rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
@@ -5526,8 +5530,8 @@ StartupXLOG(void)
initStringInfo(&buf);
appendStringInfo(&buf, "REDO @ %X/%X; LSN %X/%X: ",
- (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
- (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
+ (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
xlog_outrec(&buf, record);
appendStringInfo(&buf, " - ");
RmgrTable[record->xl_rmid].rm_desc(&buf,
@@ -5598,13 +5602,13 @@ StartupXLOG(void)
}
/*
- * Before replaying this record, check if this record
- * causes the current timeline to change. The record is
- * already considered to be part of the new timeline,
- * so we update ThisTimeLineID before replaying it.
- * That's important so that replayEndTLI, which is
- * recorded as the minimum recovery point's TLI if
- * recovery stops after this record, is set correctly.
+ * Before replaying this record, check if this record causes
+ * the current timeline to change. The record is already
+ * considered to be part of the new timeline, so we update
+ * ThisTimeLineID before replaying it. That's important so
+ * that replayEndTLI, which is recorded as the minimum
+ * recovery point's TLI if recovery stops after this record,
+ * is set correctly.
*/
if (record->xl_rmid == RM_XLOG_ID)
{
@@ -5622,7 +5626,7 @@ StartupXLOG(void)
}
else if (info == XLOG_END_OF_RECOVERY)
{
- xl_end_of_recovery xlrec;
+ xl_end_of_recovery xlrec;
memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_end_of_recovery));
newTLI = xlrec.ThisTimeLineID;
@@ -5699,7 +5703,7 @@ StartupXLOG(void)
ereport(LOG,
(errmsg("redo done at %X/%X",
- (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
+ (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
xtime = GetLatestXTime();
if (xtime)
ereport(LOG,
@@ -5804,7 +5808,7 @@ StartupXLOG(void)
PrevTimeLineID = ThisTimeLineID;
if (ArchiveRecoveryRequested)
{
- char reason[200];
+ char reason[200];
Assert(InArchiveRecovery);
@@ -5952,8 +5956,9 @@ StartupXLOG(void)
* allows some extra error checking in xlog_redo.
*
* In fast promotion, only create a lightweight end-of-recovery record
- * instead of a full checkpoint. A checkpoint is requested later, after
- * we're fully out of recovery mode and already accepting queries.
+ * instead of a full checkpoint. A checkpoint is requested later,
+ * after we're fully out of recovery mode and already accepting
+ * queries.
*/
if (bgwriterLaunched)
{
@@ -5972,14 +5977,15 @@ StartupXLOG(void)
fast_promoted = true;
/*
- * Insert a special WAL record to mark the end of recovery,
- * since we aren't doing a checkpoint. That means that the
- * checkpointer process may likely be in the middle of a
- * time-smoothed restartpoint and could continue to be for
- * minutes after this. That sounds strange, but the effect
- * is roughly the same and it would be stranger to try to
- * come out of the restartpoint and then checkpoint.
- * We request a checkpoint later anyway, just for safety.
+ * Insert a special WAL record to mark the end of
+ * recovery, since we aren't doing a checkpoint. That
+ * means that the checkpointer process may likely be in
+ * the middle of a time-smoothed restartpoint and could
+ * continue to be for minutes after this. That sounds
+ * strange, but the effect is roughly the same and it
+ * would be stranger to try to come out of the
+ * restartpoint and then checkpoint. We request a
+ * checkpoint later anyway, just for safety.
*/
CreateEndOfRecoveryRecord();
}
@@ -5987,8 +5993,8 @@ StartupXLOG(void)
if (!fast_promoted)
RequestCheckpoint(CHECKPOINT_END_OF_RECOVERY |
- CHECKPOINT_IMMEDIATE |
- CHECKPOINT_WAIT);
+ CHECKPOINT_IMMEDIATE |
+ CHECKPOINT_WAIT);
}
else
CreateCheckPoint(CHECKPOINT_END_OF_RECOVERY | CHECKPOINT_IMMEDIATE);
@@ -6092,8 +6098,8 @@ StartupXLOG(void)
}
/*
- * If there were cascading standby servers connected to us, nudge any
- * wal sender processes to notice that we've been promoted.
+ * If there were cascading standby servers connected to us, nudge any wal
+ * sender processes to notice that we've been promoted.
*/
WalSndWakeup();
@@ -6151,9 +6157,9 @@ CheckRecoveryConsistency(void)
}
/*
- * Have we passed our safe starting point? Note that minRecoveryPoint
- * is known to be incorrectly set if ControlFile->backupEndRequired,
- * until the XLOG_BACKUP_RECORD arrives to advise us of the correct
+ * Have we passed our safe starting point? Note that minRecoveryPoint is
+ * known to be incorrectly set if ControlFile->backupEndRequired, until
+ * the XLOG_BACKUP_RECORD arrives to advise us of the correct
* minRecoveryPoint. All we know prior to that is that we're not
* consistent yet.
*/
@@ -6770,7 +6776,7 @@ CreateCheckPoint(int flags)
uint32 freespace;
XLogSegNo _logSegNo;
VirtualTransactionId *vxids;
- int nvxids;
+ int nvxids;
/*
* An end-of-recovery checkpoint is really a shutdown checkpoint, just
@@ -6946,13 +6952,13 @@ CreateCheckPoint(int flags)
TRACE_POSTGRESQL_CHECKPOINT_START(flags);
/*
- * In some cases there are groups of actions that must all occur on
- * one side or the other of a checkpoint record. Before flushing the
+ * In some cases there are groups of actions that must all occur on one
+ * side or the other of a checkpoint record. Before flushing the
* checkpoint record we must explicitly wait for any backend currently
* performing those groups of actions.
*
* One example is end of transaction, so we must wait for any transactions
- * that are currently in commit critical sections. If an xact inserted
+ * that are currently in commit critical sections. If an xact inserted
* its commit record into XLOG just before the REDO point, then a crash
* restart from the REDO point would not replay that record, which means
* that our flushing had better include the xact's update of pg_clog. So
@@ -6977,7 +6983,7 @@ CreateCheckPoint(int flags)
vxids = GetVirtualXIDsDelayingChkpt(&nvxids);
if (nvxids > 0)
{
- uint32 nwaits = 0;
+ uint32 nwaits = 0;
do
{
@@ -7182,9 +7188,9 @@ CreateCheckPoint(int flags)
void
CreateEndOfRecoveryRecord(void)
{
- xl_end_of_recovery xlrec;
- XLogRecData rdata;
- XLogRecPtr recptr;
+ xl_end_of_recovery xlrec;
+ XLogRecData rdata;
+ XLogRecPtr recptr;
/* sanity check */
if (!RecoveryInProgress())
@@ -7211,8 +7217,8 @@ CreateEndOfRecoveryRecord(void)
XLogFlush(recptr);
/*
- * Update the control file so that crash recovery can follow
- * the timeline changes to this point.
+ * Update the control file so that crash recovery can follow the timeline
+ * changes to this point.
*/
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
ControlFile->time = (pg_time_t) xlrec.end_time;
@@ -7223,7 +7229,7 @@ CreateEndOfRecoveryRecord(void)
END_CRIT_SECTION();
- LocalXLogInsertAllowed = -1; /* return to "check" state */
+ LocalXLogInsertAllowed = -1; /* return to "check" state */
}
/*
@@ -7375,7 +7381,7 @@ CreateRestartPoint(int flags)
{
ereport(DEBUG2,
(errmsg("skipping restartpoint, already performed at %X/%X",
- (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo)));
+ (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo)));
UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
if (flags & CHECKPOINT_IS_SHUTDOWN)
@@ -7458,7 +7464,8 @@ CreateRestartPoint(int flags)
XLogRecPtr endptr;
/*
- * Get the current end of xlog replayed or received, whichever is later.
+ * Get the current end of xlog replayed or received, whichever is
+ * later.
*/
receivePtr = GetWalRcvWriteRecPtr(NULL, NULL);
replayPtr = GetXLogReplayRecPtr(NULL);
@@ -7468,8 +7475,8 @@ CreateRestartPoint(int flags)
_logSegNo--;
/*
- * Update ThisTimeLineID to the timeline we're currently replaying,
- * so that we install any recycled segments on that timeline.
+ * Update ThisTimeLineID to the timeline we're currently replaying, so
+ * that we install any recycled segments on that timeline.
*
* There is no guarantee that the WAL segments will be useful on the
* current timeline; if recovery proceeds to a new timeline right
@@ -7480,13 +7487,13 @@ CreateRestartPoint(int flags)
* It's possible or perhaps even likely that we finish recovery while
* a restartpoint is in progress. That means we may get to this point
* some minutes afterwards. Setting ThisTimeLineID at that time would
- * actually set it backwards, so we don't want that to persist; if
- * we do reset it here, make sure to reset it back afterwards. This
+ * actually set it backwards, so we don't want that to persist; if we
+ * do reset it here, make sure to reset it back afterwards. This
* doesn't look very clean or principled, but its the best of about
* five different ways of handling this edge case.
*/
if (RecoveryInProgress())
- (void) GetXLogReplayRecPtr(&ThisTimeLineID);
+ (void) GetXLogReplayRecPtr(&ThisTimeLineID);
RemoveOldXlogFiles(_logSegNo, endptr);
@@ -7519,7 +7526,7 @@ CreateRestartPoint(int flags)
xtime = GetLatestXTime();
ereport((log_checkpoints ? LOG : DEBUG2),
(errmsg("recovery restart point at %X/%X",
- (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo),
+ (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo),
xtime ? errdetail("last completed transaction was at log time %s",
timestamptz_to_str(xtime)) : 0));
@@ -7677,10 +7684,10 @@ XLogRestorePoint(const char *rpName)
XLogRecPtr
XLogSaveBufferForHint(Buffer buffer)
{
- XLogRecPtr recptr = InvalidXLogRecPtr;
- XLogRecPtr lsn;
+ XLogRecPtr recptr = InvalidXLogRecPtr;
+ XLogRecPtr lsn;
XLogRecData rdata[2];
- BkpBlock bkpb;
+ BkpBlock bkpb;
/*
* Ensure no checkpoint can change our view of RedoRecPtr.
@@ -7693,8 +7700,8 @@ XLogSaveBufferForHint(Buffer buffer)
GetRedoRecPtr();
/*
- * Setup phony rdata element for use within XLogCheckBuffer only.
- * We reuse and reset rdata for any actual WAL record insert.
+ * Setup phony rdata element for use within XLogCheckBuffer only. We reuse
+ * and reset rdata for any actual WAL record insert.
*/
rdata[0].buffer = buffer;
rdata[0].buffer_std = true;
@@ -7704,8 +7711,8 @@ XLogSaveBufferForHint(Buffer buffer)
*/
if (XLogCheckBuffer(rdata, false, &lsn, &bkpb))
{
- char copied_buffer[BLCKSZ];
- char *origdata = (char *) BufferGetBlock(buffer);
+ char copied_buffer[BLCKSZ];
+ char *origdata = (char *) BufferGetBlock(buffer);
/*
* Copy buffer so we don't have to worry about concurrent hint bit or
@@ -7714,8 +7721,8 @@ XLogSaveBufferForHint(Buffer buffer)
*/
memcpy(copied_buffer, origdata, bkpb.hole_offset);
memcpy(copied_buffer + bkpb.hole_offset,
- origdata + bkpb.hole_offset + bkpb.hole_length,
- BLCKSZ - bkpb.hole_offset - bkpb.hole_length);
+ origdata + bkpb.hole_offset + bkpb.hole_length,
+ BLCKSZ - bkpb.hole_offset - bkpb.hole_length);
/*
* Header for backup block.
@@ -7861,25 +7868,24 @@ checkTimeLineSwitch(XLogRecPtr lsn, TimeLineID newTLI, TimeLineID prevTLI)
ereport(PANIC,
(errmsg("unexpected prev timeline ID %u (current timeline ID %u) in checkpoint record",
prevTLI, ThisTimeLineID)));
+
/*
- * The new timeline better be in the list of timelines we expect
- * to see, according to the timeline history. It should also not
- * decrease.
+ * The new timeline better be in the list of timelines we expect to see,
+ * according to the timeline history. It should also not decrease.
*/
if (newTLI < ThisTimeLineID || !tliInHistory(newTLI, expectedTLEs))
ereport(PANIC,
- (errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
- newTLI, ThisTimeLineID)));
+ (errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
+ newTLI, ThisTimeLineID)));
/*
- * If we have not yet reached min recovery point, and we're about
- * to switch to a timeline greater than the timeline of the min
- * recovery point: trouble. After switching to the new timeline,
- * we could not possibly visit the min recovery point on the
- * correct timeline anymore. This can happen if there is a newer
- * timeline in the archive that branched before the timeline the
- * min recovery point is on, and you attempt to do PITR to the
- * new timeline.
+ * If we have not yet reached min recovery point, and we're about to
+ * switch to a timeline greater than the timeline of the min recovery
+ * point: trouble. After switching to the new timeline, we could not
+ * possibly visit the min recovery point on the correct timeline anymore.
+ * This can happen if there is a newer timeline in the archive that
+ * branched before the timeline the min recovery point is on, and you
+ * attempt to do PITR to the new timeline.
*/
if (!XLogRecPtrIsInvalid(minRecoveryPoint) &&
lsn < minRecoveryPoint &&
@@ -8101,21 +8107,21 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
}
else if (info == XLOG_HINT)
{
- char *data;
- BkpBlock bkpb;
+ char *data;
+ BkpBlock bkpb;
/*
- * Hint bit records contain a backup block stored "inline" in the normal
- * data since the locking when writing hint records isn't sufficient to
- * use the normal backup block mechanism, which assumes exclusive lock
- * on the buffer supplied.
+ * Hint bit records contain a backup block stored "inline" in the
+ * normal data since the locking when writing hint records isn't
+ * sufficient to use the normal backup block mechanism, which assumes
+ * exclusive lock on the buffer supplied.
*
- * Since the only change in these backup block are hint bits, there are
- * no recovery conflicts generated.
+ * Since the only change in these backup block are hint bits, there
+ * are no recovery conflicts generated.
*
- * This also means there is no corresponding API call for this,
- * so an smgr implementation has no need to implement anything.
- * Which means nothing is needed in md.c etc
+ * This also means there is no corresponding API call for this, so an
+ * smgr implementation has no need to implement anything. Which means
+ * nothing is needed in md.c etc
*/
data = XLogRecGetData(record);
memcpy(&bkpb, data, sizeof(BkpBlock));
@@ -8318,7 +8324,7 @@ assign_xlog_sync_method(int new_sync_method, void *extra)
ereport(PANIC,
(errcode_for_file_access(),
errmsg("could not fsync log segment %s: %m",
- XLogFileNameP(ThisTimeLineID, openLogSegNo))));
+ XLogFileNameP(ThisTimeLineID, openLogSegNo))));
if (get_sync_bit(sync_method) != get_sync_bit(new_sync_method))
XLogFileClose();
}
@@ -8349,8 +8355,8 @@ issue_xlog_fsync(int fd, XLogSegNo segno)
if (pg_fsync_writethrough(fd) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not fsync write-through log file %s: %m",
- XLogFileNameP(ThisTimeLineID, segno))));
+ errmsg("could not fsync write-through log file %s: %m",
+ XLogFileNameP(ThisTimeLineID, segno))));
break;
#endif
#ifdef HAVE_FDATASYNC
@@ -8379,6 +8385,7 @@ char *
XLogFileNameP(TimeLineID tli, XLogSegNo segno)
{
char *result = palloc(MAXFNAMELEN);
+
XLogFileName(result, tli, segno);
return result;
}
@@ -8630,9 +8637,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
"%Y-%m-%d %H:%M:%S %Z",
pg_localtime(&stamp_time, log_timezone));
appendStringInfo(&labelfbuf, "START WAL LOCATION: %X/%X (file %s)\n",
- (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename);
+ (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename);
appendStringInfo(&labelfbuf, "CHECKPOINT LOCATION: %X/%X\n",
- (uint32) (checkpointloc >> 32), (uint32) checkpointloc);
+ (uint32) (checkpointloc >> 32), (uint32) checkpointloc);
appendStringInfo(&labelfbuf, "BACKUP METHOD: %s\n",
exclusive ? "pg_start_backup" : "streamed");
appendStringInfo(&labelfbuf, "BACKUP FROM: %s\n",
@@ -8936,10 +8943,10 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL generated with full_page_writes=off was replayed "
"during online backup"),
- errhint("This means that the backup being taken on the standby "
- "is corrupt and should not be used. "
+ errhint("This means that the backup being taken on the standby "
+ "is corrupt and should not be used. "
"Enable full_page_writes and run CHECKPOINT on the master, "
- "and then try an online backup again.")));
+ "and then try an online backup again.")));
LWLockAcquire(ControlFileLock, LW_SHARED);
@@ -8990,7 +8997,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
errmsg("could not create file \"%s\": %m",
histfilepath)));
fprintf(fp, "START WAL LOCATION: %X/%X (file %s)\n",
- (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
+ (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n",
(uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename);
/* transfer remaining lines from label to history file */
@@ -9366,10 +9373,10 @@ XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen,
XLogRecPtr targetRecPtr, char *readBuf, TimeLineID *readTLI)
{
XLogPageReadPrivate *private =
- (XLogPageReadPrivate *) xlogreader->private_data;
+ (XLogPageReadPrivate *) xlogreader->private_data;
int emode = private->emode;
uint32 targetPageOff;
- XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
+ XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
XLByteToSeg(targetPagePtr, targetSegNo);
targetPageOff = targetPagePtr % XLogSegSize;
@@ -9448,24 +9455,24 @@ retry:
readOff = targetPageOff;
if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0)
{
- char fname[MAXFNAMELEN];
+ char fname[MAXFNAMELEN];
XLogFileName(fname, curFileTLI, readSegNo);
ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
(errcode_for_file_access(),
- errmsg("could not seek in log segment %s to offset %u: %m",
+ errmsg("could not seek in log segment %s to offset %u: %m",
fname, readOff)));
goto next_record_is_invalid;
}
if (read(readFile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
{
- char fname[MAXFNAMELEN];
+ char fname[MAXFNAMELEN];
XLogFileName(fname, curFileTLI, readSegNo);
ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
(errcode_for_file_access(),
- errmsg("could not read from log segment %s, offset %u: %m",
+ errmsg("could not read from log segment %s, offset %u: %m",
fname, readOff)));
goto next_record_is_invalid;
}
@@ -9524,12 +9531,12 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
bool fetching_ckpt, XLogRecPtr tliRecPtr)
{
static pg_time_t last_fail_time = 0;
- pg_time_t now;
+ pg_time_t now;
/*-------
* Standby mode is implemented by a state machine:
*
- * 1. Read from archive (XLOG_FROM_ARCHIVE)
+ * 1. Read from archive (XLOG_FROM_ARCHIVE)
* 2. Read from pg_xlog (XLOG_FROM_PG_XLOG)
* 3. Check trigger file
* 4. Read from primary server via walreceiver (XLOG_FROM_STREAM)
@@ -9554,7 +9561,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
for (;;)
{
- int oldSource = currentSource;
+ int oldSource = currentSource;
/*
* First check if we failed to read from the current source, and
@@ -9571,11 +9578,12 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
break;
case XLOG_FROM_PG_XLOG:
+
/*
- * Check to see if the trigger file exists. Note that we do
- * this only after failure, so when you create the trigger
- * file, we still finish replaying as much as we can from
- * archive and pg_xlog before failover.
+ * Check to see if the trigger file exists. Note that we
+ * do this only after failure, so when you create the
+ * trigger file, we still finish replaying as much as we
+ * can from archive and pg_xlog before failover.
*/
if (StandbyMode && CheckForStandbyTrigger())
{
@@ -9584,15 +9592,15 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
}
/*
- * Not in standby mode, and we've now tried the archive and
- * pg_xlog.
+ * Not in standby mode, and we've now tried the archive
+ * and pg_xlog.
*/
if (!StandbyMode)
return false;
/*
- * If primary_conninfo is set, launch walreceiver to try to
- * stream the missing WAL.
+ * If primary_conninfo is set, launch walreceiver to try
+ * to stream the missing WAL.
*
* If fetching_ckpt is TRUE, RecPtr points to the initial
* checkpoint location. In that case, we use RedoStartLSN
@@ -9602,8 +9610,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
*/
if (PrimaryConnInfo)
{
- XLogRecPtr ptr;
- TimeLineID tli;
+ XLogRecPtr ptr;
+ TimeLineID tli;
if (fetching_ckpt)
{
@@ -9624,28 +9632,32 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
RequestXLogStreaming(tli, ptr, PrimaryConnInfo);
receivedUpto = 0;
}
+
/*
- * Move to XLOG_FROM_STREAM state in either case. We'll get
- * immediate failure if we didn't launch walreceiver, and
- * move on to the next state.
+ * Move to XLOG_FROM_STREAM state in either case. We'll
+ * get immediate failure if we didn't launch walreceiver,
+ * and move on to the next state.
*/
currentSource = XLOG_FROM_STREAM;
break;
case XLOG_FROM_STREAM:
+
/*
- * Failure while streaming. Most likely, we got here because
- * streaming replication was terminated, or promotion was
- * triggered. But we also get here if we find an invalid
- * record in the WAL streamed from master, in which case
- * something is seriously wrong. There's little chance that
- * the problem will just go away, but PANIC is not good for
- * availability either, especially in hot standby mode. So,
- * we treat that the same as disconnection, and retry from
- * archive/pg_xlog again. The WAL in the archive should be
- * identical to what was streamed, so it's unlikely that it
- * helps, but one can hope...
+ * Failure while streaming. Most likely, we got here
+ * because streaming replication was terminated, or
+ * promotion was triggered. But we also get here if we
+ * find an invalid record in the WAL streamed from master,
+ * in which case something is seriously wrong. There's
+ * little chance that the problem will just go away, but
+ * PANIC is not good for availability either, especially
+ * in hot standby mode. So, we treat that the same as
+ * disconnection, and retry from archive/pg_xlog again.
+ * The WAL in the archive should be identical to what was
+ * streamed, so it's unlikely that it helps, but one can
+ * hope...
*/
+
/*
* Before we leave XLOG_FROM_STREAM state, make sure that
* walreceiver is not active, so that it won't overwrite
@@ -9668,11 +9680,12 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
}
/*
- * XLOG_FROM_STREAM is the last state in our state machine,
- * so we've exhausted all the options for obtaining the
- * requested WAL. We're going to loop back and retry from
- * the archive, but if it hasn't been long since last
- * attempt, sleep 5 seconds to avoid busy-waiting.
+ * XLOG_FROM_STREAM is the last state in our state
+ * machine, so we've exhausted all the options for
+ * obtaining the requested WAL. We're going to loop back
+ * and retry from the archive, but if it hasn't been long
+ * since last attempt, sleep 5 seconds to avoid
+ * busy-waiting.
*/
now = (pg_time_t) time(NULL);
if ((now - last_fail_time) < 5)
@@ -9691,9 +9704,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
else if (currentSource == XLOG_FROM_PG_XLOG)
{
/*
- * We just successfully read a file in pg_xlog. We prefer files
- * in the archive over ones in pg_xlog, so try the next file
- * again from the archive first.
+ * We just successfully read a file in pg_xlog. We prefer files in
+ * the archive over ones in pg_xlog, so try the next file again
+ * from the archive first.
*/
if (InArchiveRecovery)
currentSource = XLOG_FROM_ARCHIVE;
@@ -9739,107 +9752,110 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
break;
case XLOG_FROM_STREAM:
- {
- bool havedata;
-
- /*
- * Check if WAL receiver is still active.
- */
- if (!WalRcvStreaming())
- {
- lastSourceFailed = true;
- break;
- }
-
- /*
- * Walreceiver is active, so see if new data has arrived.
- *
- * We only advance XLogReceiptTime when we obtain fresh WAL
- * from walreceiver and observe that we had already processed
- * everything before the most recent "chunk" that it flushed to
- * disk. In steady state where we are keeping up with the
- * incoming data, XLogReceiptTime will be updated on each cycle.
- * When we are behind, XLogReceiptTime will not advance, so the
- * grace time allotted to conflicting queries will decrease.
- */
- if (RecPtr < receivedUpto)
- havedata = true;
- else
{
- XLogRecPtr latestChunkStart;
+ bool havedata;
- receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
- if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
+ /*
+ * Check if WAL receiver is still active.
+ */
+ if (!WalRcvStreaming())
{
+ lastSourceFailed = true;
+ break;
+ }
+
+ /*
+ * Walreceiver is active, so see if new data has arrived.
+ *
+ * We only advance XLogReceiptTime when we obtain fresh
+ * WAL from walreceiver and observe that we had already
+ * processed everything before the most recent "chunk"
+ * that it flushed to disk. In steady state where we are
+ * keeping up with the incoming data, XLogReceiptTime will
+ * be updated on each cycle. When we are behind,
+ * XLogReceiptTime will not advance, so the grace time
+ * allotted to conflicting queries will decrease.
+ */
+ if (RecPtr < receivedUpto)
havedata = true;
- if (latestChunkStart <= RecPtr)
+ else
+ {
+ XLogRecPtr latestChunkStart;
+
+ receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
+ if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
{
- XLogReceiptTime = GetCurrentTimestamp();
- SetCurrentChunkStartTime(XLogReceiptTime);
+ havedata = true;
+ if (latestChunkStart <= RecPtr)
+ {
+ XLogReceiptTime = GetCurrentTimestamp();
+ SetCurrentChunkStartTime(XLogReceiptTime);
+ }
}
+ else
+ havedata = false;
}
- else
- havedata = false;
- }
- if (havedata)
- {
- /*
- * Great, streamed far enough. Open the file if it's not
- * open already. Also read the timeline history file if
- * we haven't initialized timeline history yet; it should
- * be streamed over and present in pg_xlog by now. Use
- * XLOG_FROM_STREAM so that source info is set correctly
- * and XLogReceiptTime isn't changed.
- */
- if (readFile < 0)
+ if (havedata)
{
- if (!expectedTLEs)
- expectedTLEs = readTimeLineHistory(receiveTLI);
- readFile = XLogFileRead(readSegNo, PANIC,
- receiveTLI,
- XLOG_FROM_STREAM, false);
- Assert(readFile >= 0);
+ /*
+ * Great, streamed far enough. Open the file if it's
+ * not open already. Also read the timeline history
+ * file if we haven't initialized timeline history
+ * yet; it should be streamed over and present in
+ * pg_xlog by now. Use XLOG_FROM_STREAM so that
+ * source info is set correctly and XLogReceiptTime
+ * isn't changed.
+ */
+ if (readFile < 0)
+ {
+ if (!expectedTLEs)
+ expectedTLEs = readTimeLineHistory(receiveTLI);
+ readFile = XLogFileRead(readSegNo, PANIC,
+ receiveTLI,
+ XLOG_FROM_STREAM, false);
+ Assert(readFile >= 0);
+ }
+ else
+ {
+ /* just make sure source info is correct... */
+ readSource = XLOG_FROM_STREAM;
+ XLogReceiptSource = XLOG_FROM_STREAM;
+ return true;
+ }
+ break;
}
- else
+
+ /*
+ * Data not here yet. Check for trigger, then wait for
+ * walreceiver to wake us up when new WAL arrives.
+ */
+ if (CheckForStandbyTrigger())
{
- /* just make sure source info is correct... */
- readSource = XLOG_FROM_STREAM;
- XLogReceiptSource = XLOG_FROM_STREAM;
- return true;
+ /*
+ * Note that we don't "return false" immediately here.
+ * After being triggered, we still want to replay all
+ * the WAL that was already streamed. It's in pg_xlog
+ * now, so we just treat this as a failure, and the
+ * state machine will move on to replay the streamed
+ * WAL from pg_xlog, and then recheck the trigger and
+ * exit replay.
+ */
+ lastSourceFailed = true;
+ break;
}
- break;
- }
- /*
- * Data not here yet. Check for trigger, then wait for
- * walreceiver to wake us up when new WAL arrives.
- */
- if (CheckForStandbyTrigger())
- {
/*
- * Note that we don't "return false" immediately here.
- * After being triggered, we still want to replay all the
- * WAL that was already streamed. It's in pg_xlog now, so
- * we just treat this as a failure, and the state machine
- * will move on to replay the streamed WAL from pg_xlog,
- * and then recheck the trigger and exit replay.
+ * Wait for more WAL to arrive. Time out after 5 seconds,
+ * like when polling the archive, to react to a trigger
+ * file promptly.
*/
- lastSourceFailed = true;
+ WaitLatch(&XLogCtl->recoveryWakeupLatch,
+ WL_LATCH_SET | WL_TIMEOUT,
+ 5000L);
+ ResetLatch(&XLogCtl->recoveryWakeupLatch);
break;
}
- /*
- * Wait for more WAL to arrive. Time out after 5 seconds, like
- * when polling the archive, to react to a trigger file
- * promptly.
- */
- WaitLatch(&XLogCtl->recoveryWakeupLatch,
- WL_LATCH_SET | WL_TIMEOUT,
- 5000L);
- ResetLatch(&XLogCtl->recoveryWakeupLatch);
- break;
- }
-
default:
elog(ERROR, "unexpected WAL source %d", currentSource);
}
@@ -9903,11 +9919,10 @@ CheckForStandbyTrigger(void)
if (IsPromoteTriggered())
{
/*
- * In 9.1 and 9.2 the postmaster unlinked the promote file
- * inside the signal handler. We now leave the file in place
- * and let the Startup process do the unlink. This allows
- * Startup to know whether we're doing fast or normal
- * promotion. Fast promotion takes precedence.
+ * In 9.1 and 9.2 the postmaster unlinked the promote file inside the
+ * signal handler. We now leave the file in place and let the Startup
+ * process do the unlink. This allows Startup to know whether we're
+ * doing fast or normal promotion. Fast promotion takes precedence.
*/
if (stat(FAST_PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
{
diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c
index 0c178c55c87..342975c7b64 100644
--- a/src/backend/access/transam/xlogarchive.c
+++ b/src/backend/access/transam/xlogarchive.c
@@ -87,9 +87,9 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* of log segments that weren't yet transferred to the archive.
*
* Notice that we don't actually overwrite any files when we copy back
- * from archive because the restore_command may inadvertently
- * restore inappropriate xlogs, or they may be corrupt, so we may wish to
- * fallback to the segments remaining in current XLOGDIR later. The
+ * from archive because the restore_command may inadvertently restore
+ * inappropriate xlogs, or they may be corrupt, so we may wish to fallback
+ * to the segments remaining in current XLOGDIR later. The
* copy-from-archive filename is always the same, ensuring that we don't
* run out of disk space on long recoveries.
*/
@@ -433,19 +433,20 @@ KeepFileRestoredFromArchive(char *path, char *xlogfname)
if (stat(xlogfpath, &statbuf) == 0)
{
- char oldpath[MAXPGPATH];
+ char oldpath[MAXPGPATH];
+
#ifdef WIN32
static unsigned int deletedcounter = 1;
+
/*
- * On Windows, if another process (e.g a walsender process) holds
- * the file open in FILE_SHARE_DELETE mode, unlink will succeed,
- * but the file will still show up in directory listing until the
- * last handle is closed, and we cannot rename the new file in its
- * place until that. To avoid that problem, rename the old file to
- * a temporary name first. Use a counter to create a unique
- * filename, because the same file might be restored from the
- * archive multiple times, and a walsender could still be holding
- * onto an old deleted version of it.
+ * On Windows, if another process (e.g a walsender process) holds the
+ * file open in FILE_SHARE_DELETE mode, unlink will succeed, but the
+ * file will still show up in directory listing until the last handle
+ * is closed, and we cannot rename the new file in its place until
+ * that. To avoid that problem, rename the old file to a temporary
+ * name first. Use a counter to create a unique filename, because the
+ * same file might be restored from the archive multiple times, and a
+ * walsender could still be holding onto an old deleted version of it.
*/
snprintf(oldpath, MAXPGPATH, "%s.deleted%u",
xlogfpath, deletedcounter++);
@@ -474,17 +475,17 @@ KeepFileRestoredFromArchive(char *path, char *xlogfname)
path, xlogfpath)));
/*
- * Create .done file forcibly to prevent the restored segment from
- * being archived again later.
+ * Create .done file forcibly to prevent the restored segment from being
+ * archived again later.
*/
XLogArchiveForceDone(xlogfname);
/*
- * If the existing file was replaced, since walsenders might have it
- * open, request them to reload a currently-open segment. This is only
- * required for WAL segments, walsenders don't hold other files open, but
- * there's no harm in doing this too often, and we don't know what kind
- * of a file we're dealing with here.
+ * If the existing file was replaced, since walsenders might have it open,
+ * request them to reload a currently-open segment. This is only required
+ * for WAL segments, walsenders don't hold other files open, but there's
+ * no harm in doing this too often, and we don't know what kind of a file
+ * we're dealing with here.
*/
if (reload)
WalSndRqstFileReload();
diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c
index b6bb6773d6b..b7950f77a65 100644
--- a/src/backend/access/transam/xlogfuncs.c
+++ b/src/backend/access/transam/xlogfuncs.c
@@ -545,8 +545,8 @@ pg_xlog_location_diff(PG_FUNCTION_ARGS)
* XXX: this won't handle values higher than 2^63 correctly.
*/
result = DatumGetNumeric(DirectFunctionCall2(numeric_sub,
- DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
- DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
+ DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes1)),
+ DirectFunctionCall1(int8_numeric, Int64GetDatum((int64) bytes2))));
PG_RETURN_NUMERIC(result);
}
@@ -584,7 +584,7 @@ pg_backup_start_time(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m",
- BACKUP_LABEL_FILE)));
+ BACKUP_LABEL_FILE)));
PG_RETURN_NULL();
}
@@ -602,13 +602,13 @@ pg_backup_start_time(PG_FUNCTION_ARGS)
if (ferror(lfp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
+ errmsg("could not read file \"%s\": %m", BACKUP_LABEL_FILE)));
/* Close the backup label file. */
if (FreeFile(lfp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
+ errmsg("could not close file \"%s\": %m", BACKUP_LABEL_FILE)));
if (strlen(backup_start_time) == 0)
ereport(ERROR,
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index a5e2b50fe6b..fc6ff806440 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -221,9 +221,9 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg)
targetRecOff = RecPtr % XLOG_BLCKSZ;
/*
- * Read the page containing the record into state->readBuf. Request
- * enough byte to cover the whole record header, or at least the part of
- * it that fits on the same page.
+ * Read the page containing the record into state->readBuf. Request enough
+ * byte to cover the whole record header, or at least the part of it that
+ * fits on the same page.
*/
readOff = ReadPageInternal(state,
targetPagePtr,
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 9e401ef7a30..8905596c0b1 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -49,7 +49,7 @@
extern int optind;
extern char *optarg;
-uint32 bootstrap_data_checksum_version = 0; /* No checksum */
+uint32 bootstrap_data_checksum_version = 0; /* No checksum */
#define ALLOC(t, c) ((t *) calloc((unsigned)(c), sizeof(t)))
@@ -67,7 +67,7 @@ static void cleanup(void);
* ----------------
*/
-AuxProcType MyAuxProcType = NotAnAuxProcess; /* declared in miscadmin.h */
+AuxProcType MyAuxProcType = NotAnAuxProcess; /* declared in miscadmin.h */
Relation boot_reldesc; /* current relation descriptor */
@@ -389,7 +389,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
/*
* Assign the ProcSignalSlot for an auxiliary process. Since it
* doesn't have a BackendId, the slot is statically allocated based on
- * the auxiliary process type (MyAuxProcType). Backends use slots
+ * the auxiliary process type (MyAuxProcType). Backends use slots
* indexed in the range from 1 to MaxBackends (inclusive), so we use
* MaxBackends + AuxProcType + 1 as the index of the slot for an
* auxiliary process.
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 976f2d204cd..cb9b75aa092 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -3419,7 +3419,7 @@ aclcheck_error_col(AclResult aclerr, AclObjectKind objectkind,
void
aclcheck_error_type(AclResult aclerr, Oid typeOid)
{
- Oid element_type = get_element_type(typeOid);
+ Oid element_type = get_element_type(typeOid);
aclcheck_error(aclerr, ACL_KIND_TYPE, format_type_be(element_type ? element_type : typeOid));
}
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 967182b541b..41a5da0bd23 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -335,7 +335,7 @@ GetNewOid(Relation relation)
* This is exported separately because there are cases where we want to use
* an index that will not be recognized by RelationGetOidIndex: TOAST tables
* have indexes that are usable, but have multiple columns and are on
- * ordinary columns rather than a true OID column. This code will work
+ * ordinary columns rather than a true OID column. This code will work
* anyway, so long as the OID is the index's first column. The caller must
* pass in the actual heap attnum of the OID column, however.
*
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 6b7a51947c9..69171f8311c 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -198,7 +198,7 @@ static void
deleteObjectsInList(ObjectAddresses *targetObjects, Relation *depRel,
int flags)
{
- int i;
+ int i;
/*
* Keep track of objects for event triggers, if necessary.
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 24a8474cb51..7622a9655ea 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -98,7 +98,7 @@ static void StoreRelCheck(Relation rel, char *ccname, Node *expr,
bool is_validated, bool is_local, int inhcount,
bool is_no_inherit, bool is_internal);
static void StoreConstraints(Relation rel, List *cooked_constraints,
- bool is_internal);
+ bool is_internal);
static bool MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr,
bool allow_merge, bool is_local,
bool is_no_inherit);
@@ -870,6 +870,7 @@ AddNewRelationTuple(Relation pg_class_desc,
* that will do.
*/
new_rel_reltup->relfrozenxid = RecentXmin;
+
/*
* Similarly, initialize the minimum Multixact to the first value that
* could possibly be stored in tuples in the table. Running
@@ -1915,10 +1916,10 @@ StoreAttrDefault(Relation rel, AttrNumber attnum,
/*
* Post creation hook for attribute defaults.
*
- * XXX. ALTER TABLE ALTER COLUMN SET/DROP DEFAULT is implemented
- * with a couple of deletion/creation of the attribute's default entry,
- * so the callee should check existence of an older version of this
- * entry if it needs to distinguish.
+ * XXX. ALTER TABLE ALTER COLUMN SET/DROP DEFAULT is implemented with a
+ * couple of deletion/creation of the attribute's default entry, so the
+ * callee should check existence of an older version of this entry if it
+ * needs to distinguish.
*/
InvokeObjectPostCreateHookArg(AttrDefaultRelationId,
RelationGetRelid(rel), attnum, is_internal);
@@ -2018,7 +2019,7 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr,
is_local, /* conislocal */
inhcount, /* coninhcount */
is_no_inherit, /* connoinherit */
- is_internal); /* internally constructed? */
+ is_internal); /* internally constructed? */
pfree(ccbin);
pfree(ccsrc);
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index f48c0bcb31f..23943ff9ce2 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -293,9 +293,10 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
Oid namespaceId;
namespaceId = LookupExplicitNamespace(relation->schemaname, missing_ok);
+
/*
- * For missing_ok, allow a non-existant schema name to
- * return InvalidOid.
+ * For missing_ok, allow a non-existant schema name to
+ * return InvalidOid.
*/
if (namespaceId != myTempNamespace)
ereport(ERROR,
@@ -2701,7 +2702,7 @@ LookupExplicitNamespace(const char *nspname, bool missing_ok)
namespaceId = get_namespace_oid(nspname, missing_ok);
if (missing_ok && !OidIsValid(namespaceId))
return InvalidOid;
-
+
aclresult = pg_namespace_aclcheck(namespaceId, GetUserId(), ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
diff --git a/src/backend/catalog/objectaccess.c b/src/backend/catalog/objectaccess.c
index 87158e34e26..924b1a1520c 100644
--- a/src/backend/catalog/objectaccess.c
+++ b/src/backend/catalog/objectaccess.c
@@ -29,7 +29,7 @@ void
RunObjectPostCreateHook(Oid classId, Oid objectId, int subId,
bool is_internal)
{
- ObjectAccessPostCreate pc_arg;
+ ObjectAccessPostCreate pc_arg;
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
@@ -37,9 +37,9 @@ RunObjectPostCreateHook(Oid classId, Oid objectId, int subId,
memset(&pc_arg, 0, sizeof(ObjectAccessPostCreate));
pc_arg.is_internal = is_internal;
- (*object_access_hook)(OAT_POST_CREATE,
- classId, objectId, subId,
- (void *) &pc_arg);
+ (*object_access_hook) (OAT_POST_CREATE,
+ classId, objectId, subId,
+ (void *) &pc_arg);
}
/*
@@ -51,7 +51,7 @@ void
RunObjectDropHook(Oid classId, Oid objectId, int subId,
int dropflags)
{
- ObjectAccessDrop drop_arg;
+ ObjectAccessDrop drop_arg;
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
@@ -59,9 +59,9 @@ RunObjectDropHook(Oid classId, Oid objectId, int subId,
memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
drop_arg.dropflags = dropflags;
- (*object_access_hook)(OAT_DROP,
- classId, objectId, subId,
- (void *) &drop_arg);
+ (*object_access_hook) (OAT_DROP,
+ classId, objectId, subId,
+ (void *) &drop_arg);
}
/*
@@ -73,7 +73,7 @@ void
RunObjectPostAlterHook(Oid classId, Oid objectId, int subId,
Oid auxiliaryId, bool is_internal)
{
- ObjectAccessPostAlter pa_arg;
+ ObjectAccessPostAlter pa_arg;
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
@@ -82,9 +82,9 @@ RunObjectPostAlterHook(Oid classId, Oid objectId, int subId,
pa_arg.auxiliary_id = auxiliaryId;
pa_arg.is_internal = is_internal;
- (*object_access_hook)(OAT_POST_ALTER,
- classId, objectId, subId,
- (void *) &pa_arg);
+ (*object_access_hook) (OAT_POST_ALTER,
+ classId, objectId, subId,
+ (void *) &pa_arg);
}
/*
@@ -95,7 +95,7 @@ RunObjectPostAlterHook(Oid classId, Oid objectId, int subId,
bool
RunNamespaceSearchHook(Oid objectId, bool ereport_on_violation)
{
- ObjectAccessNamespaceSearch ns_arg;
+ ObjectAccessNamespaceSearch ns_arg;
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
@@ -104,9 +104,9 @@ RunNamespaceSearchHook(Oid objectId, bool ereport_on_violation)
ns_arg.ereport_on_violation = ereport_on_violation;
ns_arg.result = true;
- (*object_access_hook)(OAT_NAMESPACE_SEARCH,
- NamespaceRelationId, objectId, 0,
- (void *) &ns_arg);
+ (*object_access_hook) (OAT_NAMESPACE_SEARCH,
+ NamespaceRelationId, objectId, 0,
+ (void *) &ns_arg);
return ns_arg.result;
}
@@ -122,7 +122,7 @@ RunFunctionExecuteHook(Oid objectId)
/* caller should check, but just in case... */
Assert(object_access_hook != NULL);
- (*object_access_hook)(OAT_FUNCTION_EXECUTE,
- ProcedureRelationId, objectId, 0,
- NULL);
+ (*object_access_hook) (OAT_FUNCTION_EXECUTE,
+ ProcedureRelationId, objectId, 0,
+ NULL);
}
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index 48ef6bf0a49..215eaf53e69 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -94,10 +94,11 @@ typedef struct
AttrNumber attnum_owner; /* attnum of owner field */
AttrNumber attnum_acl; /* attnum of acl field */
AclObjectKind acl_kind; /* ACL_KIND_* of this object type */
- bool is_nsp_name_unique; /* can the nsp/name combination (or name
- * alone, if there's no namespace) be
- * considered an unique identifier for an
- * object of this class? */
+ bool is_nsp_name_unique; /* can the nsp/name combination (or
+ * name alone, if there's no
+ * namespace) be considered an unique
+ * identifier for an object of this
+ * class? */
} ObjectPropertyType;
static ObjectPropertyType ObjectProperty[] =
@@ -1443,7 +1444,7 @@ get_object_property_data(Oid class_id)
ereport(ERROR,
(errmsg_internal("unrecognized class id: %u", class_id)));
- return NULL; /* keep MSC compiler happy */
+ return NULL; /* keep MSC compiler happy */
}
/*
@@ -1463,14 +1464,14 @@ get_catalog_object_by_oid(Relation catalog, Oid objectId)
if (oidCacheId > 0)
{
tuple = SearchSysCacheCopy1(oidCacheId, ObjectIdGetDatum(objectId));
- if (!HeapTupleIsValid(tuple)) /* should not happen */
+ if (!HeapTupleIsValid(tuple)) /* should not happen */
return NULL;
}
else
{
Oid oidIndexId = get_object_oid_index(classId);
- SysScanDesc scan;
- ScanKeyData skey;
+ SysScanDesc scan;
+ ScanKeyData skey;
Assert(OidIsValid(oidIndexId));
@@ -2127,7 +2128,7 @@ getObjectDescription(const ObjectAddress *object)
break;
}
- case OCLASS_EVENT_TRIGGER:
+ case OCLASS_EVENT_TRIGGER:
{
HeapTuple tup;
@@ -2137,7 +2138,7 @@ getObjectDescription(const ObjectAddress *object)
elog(ERROR, "cache lookup failed for event trigger %u",
object->objectId);
appendStringInfo(&buffer, _("event trigger %s"),
- NameStr(((Form_pg_event_trigger) GETSTRUCT(tup))->evtname));
+ NameStr(((Form_pg_event_trigger) GETSTRUCT(tup))->evtname));
ReleaseSysCache(tup);
break;
}
@@ -2355,22 +2356,22 @@ pg_identify_object(PG_FUNCTION_ARGS)
RelationGetDescr(catalog), &isnull);
if (isnull)
elog(ERROR, "invalid null namespace in object %u/%u/%d",
- address.classId, address.objectId, address.objectSubId);
+ address.classId, address.objectId, address.objectSubId);
}
/*
- * We only return the object name if it can be used (together
- * with the schema name, if any) as an unique identifier.
+ * We only return the object name if it can be used (together with
+ * the schema name, if any) as an unique identifier.
*/
if (get_object_namensp_unique(address.classId))
{
nameAttnum = get_object_attnum_name(address.classId);
if (nameAttnum != InvalidAttrNumber)
{
- Datum nameDatum;
+ Datum nameDatum;
nameDatum = heap_getattr(objtup, nameAttnum,
- RelationGetDescr(catalog), &isnull);
+ RelationGetDescr(catalog), &isnull);
if (isnull)
elog(ERROR, "invalid null name in object %u/%u/%d",
address.classId, address.objectId, address.objectSubId);
@@ -2389,7 +2390,7 @@ pg_identify_object(PG_FUNCTION_ARGS)
/* schema name */
if (OidIsValid(schema_oid))
{
- const char *schema = quote_identifier(get_namespace_name(schema_oid));
+ const char *schema = quote_identifier(get_namespace_name(schema_oid));
values[1] = CStringGetTextDatum(schema);
nulls[1] = false;
@@ -2622,7 +2623,7 @@ getConstraintTypeDescription(StringInfo buffer, Oid constroid)
{
Relation constrRel;
HeapTuple constrTup;
- Form_pg_constraint constrForm;
+ Form_pg_constraint constrForm;
constrRel = heap_open(ConstraintRelationId, AccessShareLock);
constrTup = get_catalog_object_by_oid(constrRel, constroid);
@@ -2651,7 +2652,7 @@ getProcedureTypeDescription(StringInfo buffer, Oid procid)
Form_pg_proc procForm;
procTup = SearchSysCache1(PROCOID,
- ObjectIdGetDatum(procid));
+ ObjectIdGetDatum(procid));
if (!HeapTupleIsValid(procTup))
elog(ERROR, "cache lookup failed for procedure %u", procid);
procForm = (Form_pg_proc) GETSTRUCT(procTup);
@@ -2683,7 +2684,7 @@ getObjectIdentity(const ObjectAddress *object)
getRelationIdentity(&buffer, object->objectId);
if (object->objectSubId != 0)
{
- char *attr;
+ char *attr;
attr = get_relid_attribute_name(object->objectId,
object->objectSubId);
@@ -2718,8 +2719,8 @@ getObjectIdentity(const ObjectAddress *object)
castForm = (Form_pg_cast) GETSTRUCT(tup);
appendStringInfo(&buffer, "(%s AS %s)",
- format_type_be_qualified(castForm->castsource),
- format_type_be_qualified(castForm->casttarget));
+ format_type_be_qualified(castForm->castsource),
+ format_type_be_qualified(castForm->casttarget));
heap_close(castRel, AccessShareLock);
break;
@@ -2729,7 +2730,7 @@ getObjectIdentity(const ObjectAddress *object)
{
HeapTuple collTup;
Form_pg_collation coll;
- char *schema;
+ char *schema;
collTup = SearchSysCache1(COLLOID,
ObjectIdGetDatum(object->objectId));
@@ -2740,7 +2741,7 @@ getObjectIdentity(const ObjectAddress *object)
schema = get_namespace_name(coll->collnamespace);
appendStringInfoString(&buffer,
quote_qualified_identifier(schema,
- NameStr(coll->collname)));
+ NameStr(coll->collname)));
ReleaseSysCache(collTup);
break;
}
@@ -2765,7 +2766,7 @@ getObjectIdentity(const ObjectAddress *object)
}
else
{
- ObjectAddress domain;
+ ObjectAddress domain;
domain.classId = TypeRelationId;
domain.objectId = con->contypid;
@@ -2849,7 +2850,7 @@ getObjectIdentity(const ObjectAddress *object)
object->objectId);
langForm = (Form_pg_language) GETSTRUCT(langTup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(langForm->lanname)));
+ quote_identifier(NameStr(langForm->lanname)));
ReleaseSysCache(langTup);
break;
}
@@ -2889,7 +2890,7 @@ getObjectIdentity(const ObjectAddress *object)
appendStringInfo(&buffer,
"%s",
quote_qualified_identifier(schema,
- NameStr(opcForm->opcname)));
+ NameStr(opcForm->opcname)));
appendStringInfo(&buffer, " for %s",
quote_identifier(NameStr(amForm->amname)));
@@ -2935,8 +2936,8 @@ getObjectIdentity(const ObjectAddress *object)
appendStringInfo(&buffer, "operator %d (%s, %s) of %s",
amopForm->amopstrategy,
- format_type_be_qualified(amopForm->amoplefttype),
- format_type_be_qualified(amopForm->amoprighttype),
+ format_type_be_qualified(amopForm->amoplefttype),
+ format_type_be_qualified(amopForm->amoprighttype),
opfam.data);
pfree(opfam.data);
@@ -2979,8 +2980,8 @@ getObjectIdentity(const ObjectAddress *object)
appendStringInfo(&buffer, "function %d (%s, %s) of %s",
amprocForm->amprocnum,
- format_type_be_qualified(amprocForm->amproclefttype),
- format_type_be_qualified(amprocForm->amprocrighttype),
+ format_type_be_qualified(amprocForm->amproclefttype),
+ format_type_be_qualified(amprocForm->amprocrighttype),
opfam.data);
pfree(opfam.data);
@@ -3054,7 +3055,7 @@ getObjectIdentity(const ObjectAddress *object)
case OCLASS_TSPARSER:
{
HeapTuple tup;
- Form_pg_ts_parser formParser;
+ Form_pg_ts_parser formParser;
tup = SearchSysCache1(TSPARSEROID,
ObjectIdGetDatum(object->objectId));
@@ -3063,7 +3064,7 @@ getObjectIdentity(const ObjectAddress *object)
object->objectId);
formParser = (Form_pg_ts_parser) GETSTRUCT(tup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(formParser->prsname)));
+ quote_identifier(NameStr(formParser->prsname)));
ReleaseSysCache(tup);
break;
}
@@ -3071,7 +3072,7 @@ getObjectIdentity(const ObjectAddress *object)
case OCLASS_TSDICT:
{
HeapTuple tup;
- Form_pg_ts_dict formDict;
+ Form_pg_ts_dict formDict;
tup = SearchSysCache1(TSDICTOID,
ObjectIdGetDatum(object->objectId));
@@ -3080,7 +3081,7 @@ getObjectIdentity(const ObjectAddress *object)
object->objectId);
formDict = (Form_pg_ts_dict) GETSTRUCT(tup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(formDict->dictname)));
+ quote_identifier(NameStr(formDict->dictname)));
ReleaseSysCache(tup);
break;
}
@@ -3097,7 +3098,7 @@ getObjectIdentity(const ObjectAddress *object)
object->objectId);
formTmpl = (Form_pg_ts_template) GETSTRUCT(tup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(formTmpl->tmplname)));
+ quote_identifier(NameStr(formTmpl->tmplname)));
ReleaseSysCache(tup);
break;
}
@@ -3121,7 +3122,7 @@ getObjectIdentity(const ObjectAddress *object)
case OCLASS_ROLE:
{
- char *username;
+ char *username;
username = GetUserNameFromId(object->objectId);
appendStringInfo(&buffer, "%s",
@@ -3229,11 +3230,11 @@ getObjectIdentity(const ObjectAddress *object)
appendStringInfo(&buffer,
"for role %s",
- quote_identifier(GetUserNameFromId(defacl->defaclrole)));
+ quote_identifier(GetUserNameFromId(defacl->defaclrole)));
if (OidIsValid(defacl->defaclnamespace))
{
- char *schema;
+ char *schema;
schema = get_namespace_name(defacl->defaclnamespace);
appendStringInfo(&buffer,
@@ -3291,7 +3292,7 @@ getObjectIdentity(const ObjectAddress *object)
object->objectId);
trigForm = (Form_pg_event_trigger) GETSTRUCT(tup);
appendStringInfo(&buffer, "%s",
- quote_identifier(NameStr(trigForm->evtname)));
+ quote_identifier(NameStr(trigForm->evtname)));
ReleaseSysCache(tup);
break;
}
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 7ddadcce4da..a8eb4cbc452 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -682,7 +682,7 @@ RenameConstraintById(Oid conId, const char *newname)
*/
void
AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
- Oid newNspId, bool isType, ObjectAddresses *objsMoved)
+ Oid newNspId, bool isType, ObjectAddresses *objsMoved)
{
Relation conRel;
ScanKeyData key[1];
@@ -715,7 +715,7 @@ AlterConstraintNamespaces(Oid ownerId, Oid oldNspId,
while (HeapTupleIsValid((tup = systable_getnext(scan))))
{
Form_pg_constraint conform = (Form_pg_constraint) GETSTRUCT(tup);
- ObjectAddress thisobj;
+ ObjectAddress thisobj;
thisobj.classId = ConstraintRelationId;
thisobj.objectId = HeapTupleGetOid(tup);
diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c
index 8136f1143f1..7e746f96676 100644
--- a/src/backend/catalog/pg_enum.c
+++ b/src/backend/catalog/pg_enum.c
@@ -180,7 +180,7 @@ AddEnumLabel(Oid enumTypeOid,
const char *newVal,
const char *neighbor,
bool newValIsAfter,
- bool skipIfExists)
+ bool skipIfExists)
{
Relation pg_enum;
Oid newOid;
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index 802b9840e86..3c4fedbd49c 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -92,11 +92,11 @@ validOperatorName(const char *name)
return false;
/*
- * For SQL standard compatibility, '+' and '-' cannot be the last char of a
- * multi-char operator unless the operator contains chars that are not in
- * SQL operators. The idea is to lex '=-' as two operators, but not to
- * forbid operator names like '?-' that could not be sequences of standard SQL
- * operators.
+ * For SQL standard compatibility, '+' and '-' cannot be the last char of
+ * a multi-char operator unless the operator contains chars that are not
+ * in SQL operators. The idea is to lex '=-' as two operators, but not to
+ * forbid operator names like '?-' that could not be sequences of standard
+ * SQL operators.
*/
if (len > 1 &&
(name[len - 1] == '+' ||
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 0b70adc4795..2a98ca95981 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -406,7 +406,7 @@ ProcedureCreate(const char *procedureName,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change return type of existing function"),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
/*
* If it returns RECORD, check for possible change of record type
@@ -430,7 +430,7 @@ ProcedureCreate(const char *procedureName,
errmsg("cannot change return type of existing function"),
errdetail("Row type defined by OUT parameters is different."),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
}
/*
@@ -473,7 +473,7 @@ ProcedureCreate(const char *procedureName,
errmsg("cannot change name of input parameter \"%s\"",
old_arg_names[j]),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
}
}
@@ -497,7 +497,7 @@ ProcedureCreate(const char *procedureName,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot remove parameter defaults from existing function"),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
proargdefaults = SysCacheGetAttr(PROCNAMEARGSNSP, oldtup,
Anum_pg_proc_proargdefaults,
@@ -524,7 +524,7 @@ ProcedureCreate(const char *procedureName,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change data type of existing parameter default value"),
errhint("Use DROP FUNCTION %s first.",
- format_procedure(HeapTupleGetOid(oldtup)))));
+ format_procedure(HeapTupleGetOid(oldtup)))));
newlc = lnext(newlc);
}
}
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index e411372fec2..7de4420fa3d 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -1382,7 +1382,7 @@ shdepReassignOwned(List *roleids, Oid newrole)
AlterEventTriggerOwner_oid(sdepForm->objid, newrole);
break;
- /* Generic alter owner cases */
+ /* Generic alter owner cases */
case CollationRelationId:
case ConversionRelationId:
case OperatorRelationId:
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index c43bebce851..971a149d590 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -505,13 +505,12 @@ smgr_redo(XLogRecPtr lsn, XLogRecord *record)
smgrcreate(reln, MAIN_FORKNUM, true);
/*
- * Before we perform the truncation, update minimum recovery point
- * to cover this WAL record. Once the relation is truncated, there's
- * no going back. The buffer manager enforces the WAL-first rule
- * for normal updates to relation files, so that the minimum recovery
- * point is always updated before the corresponding change in the
- * data file is flushed to disk. We have to do the same manually
- * here.
+ * Before we perform the truncation, update minimum recovery point to
+ * cover this WAL record. Once the relation is truncated, there's no
+ * going back. The buffer manager enforces the WAL-first rule for
+ * normal updates to relation files, so that the minimum recovery
+ * point is always updated before the corresponding change in the data
+ * file is flushed to disk. We have to do the same manually here.
*
* Doing this before the truncation means that if the truncation fails
* for some reason, you cannot start up the system even after restart,
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index d34a102ee61..4a03786210a 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -217,13 +217,13 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters)
/*
* Most of the argument-checking is done inside of AggregateCreate
*/
- return AggregateCreate(aggName, /* aggregate name */
+ return AggregateCreate(aggName, /* aggregate name */
aggNamespace, /* namespace */
- aggArgTypes, /* input data type(s) */
+ aggArgTypes, /* input data type(s) */
numArgs,
transfuncName, /* step function name */
finalfuncName, /* final function name */
sortoperatorName, /* sort operator name */
- transTypeId, /* transition data type */
+ transTypeId, /* transition data type */
initval); /* initial condition */
}
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index 665b3804d57..178c97949dc 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -62,7 +62,7 @@
#include "utils/tqual.h"
-static Oid AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid);
+static Oid AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid);
/*
* Raise an error to the effect that an object of the given name is already
@@ -71,7 +71,7 @@ static Oid AlterObjectNamespace_internal(Relation rel, Oid objid, Oid nspOid);
static void
report_name_conflict(Oid classId, const char *name)
{
- char *msgfmt;
+ char *msgfmt;
switch (classId)
{
@@ -100,7 +100,7 @@ report_name_conflict(Oid classId, const char *name)
static void
report_namespace_conflict(Oid classId, const char *name, Oid nspOid)
{
- char *msgfmt;
+ char *msgfmt;
Assert(OidIsValid(nspOid));
@@ -221,10 +221,10 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
}
/*
- * Check for duplicate name (more friendly than unique-index failure).
- * Since this is just a friendliness check, we can just skip it in cases
- * where there isn't suitable support.
- */
+ * Check for duplicate name (more friendly than unique-index failure).
+ * Since this is just a friendliness check, we can just skip it in cases
+ * where there isn't suitable support.
+ */
if (classId == ProcedureRelationId)
{
Form_pg_proc proc = (Form_pg_proc) GETSTRUCT(oldtup);
@@ -355,9 +355,9 @@ ExecRenameStmt(RenameStmt *stmt)
case OBJECT_TSPARSER:
case OBJECT_TSTEMPLATE:
{
- ObjectAddress address;
- Relation catalog;
- Relation relation;
+ ObjectAddress address;
+ Relation catalog;
+ Relation relation;
address = get_object_address(stmt->renameType,
stmt->object, stmt->objarg,
@@ -377,7 +377,7 @@ ExecRenameStmt(RenameStmt *stmt)
default:
elog(ERROR, "unrecognized rename stmt type: %d",
(int) stmt->renameType);
- return InvalidOid; /* keep compiler happy */
+ return InvalidOid; /* keep compiler happy */
}
}
@@ -699,7 +699,7 @@ ExecAlterOwnerStmt(AlterOwnerStmt *stmt)
return AlterEventTriggerOwner(strVal(linitial(stmt->object)),
newowner);
- /* Generic cases */
+ /* Generic cases */
case OBJECT_AGGREGATE:
case OBJECT_COLLATION:
case OBJECT_CONVERSION:
@@ -716,7 +716,7 @@ ExecAlterOwnerStmt(AlterOwnerStmt *stmt)
Relation catalog;
Relation relation;
Oid classId;
- ObjectAddress address;
+ ObjectAddress address;
address = get_object_address(stmt->objectType,
stmt->object,
@@ -804,13 +804,13 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
/* Superusers can bypass permission checks */
if (!superuser())
{
- AclObjectKind aclkind = get_object_aclkind(classId);
+ AclObjectKind aclkind = get_object_aclkind(classId);
/* must be owner */
if (!has_privs_of_role(GetUserId(), old_ownerId))
{
- char *objname;
- char namebuf[NAMEDATALEN];
+ char *objname;
+ char namebuf[NAMEDATALEN];
if (Anum_name != InvalidAttrNumber)
{
@@ -833,7 +833,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
/* New owner must have CREATE privilege on namespace */
if (OidIsValid(namespaceId))
{
- AclResult aclresult;
+ AclResult aclresult;
aclresult = pg_namespace_aclcheck(namespaceId, new_ownerId,
ACL_CREATE);
@@ -861,7 +861,7 @@ AlterObjectOwner_internal(Relation rel, Oid objectId, Oid new_ownerId)
Anum_acl, RelationGetDescr(rel), &isnull);
if (!isnull)
{
- Acl *newAcl;
+ Acl *newAcl;
newAcl = aclnewowner(DatumGetAclP(datum),
old_ownerId, new_ownerId);
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 9845cf9a4d7..f7ebd1a650d 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -1147,7 +1147,7 @@ asyncQueueUnregister(void)
Assert(listenChannels == NIL); /* else caller error */
- if (!amRegisteredListener) /* nothing to do */
+ if (!amRegisteredListener) /* nothing to do */
return;
LWLockAcquire(AsyncQueueLock, LW_SHARED);
@@ -1519,7 +1519,7 @@ AtAbort_Notify(void)
/*
* If we LISTEN but then roll back the transaction after PreCommit_Notify,
* we have registered as a listener but have not made any entry in
- * listenChannels. In that case, deregister again.
+ * listenChannels. In that case, deregister again.
*/
if (amRegisteredListener && listenChannels == NIL)
asyncQueueUnregister();
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 878b6254f54..095d5e42d94 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -570,7 +570,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid,
bool is_system_catalog;
bool swap_toast_by_content;
TransactionId frozenXid;
- MultiXactId frozenMulti;
+ MultiXactId frozenMulti;
/* Mark the correct index as clustered */
if (OidIsValid(indexOid))
@@ -746,7 +746,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
bool is_system_catalog;
TransactionId OldestXmin;
TransactionId FreezeXid;
- MultiXactId MultiXactFrzLimit;
+ MultiXactId MultiXactFrzLimit;
RewriteState rwstate;
bool use_sort;
Tuplesortstate *tuplesort;
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index ba4cf3e942a..31819cce1d8 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -126,7 +126,7 @@ typedef struct CopyStateData
List *force_notnull; /* list of column names */
bool *force_notnull_flags; /* per-column CSV FNN flags */
bool convert_selectively; /* do selective binary conversion? */
- List *convert_select; /* list of column names (can be NIL) */
+ List *convert_select; /* list of column names (can be NIL) */
bool *convert_select_flags; /* per-column CSV/TEXT CS flags */
/* these are just for error messages, see CopyFromErrorCallback */
@@ -183,7 +183,7 @@ typedef struct CopyStateData
*/
StringInfoData line_buf;
bool line_buf_converted; /* converted to server encoding? */
- bool line_buf_valid; /* contains the row being processed? */
+ bool line_buf_valid; /* contains the row being processed? */
/*
* Finally, raw_buf holds raw data read from the data source (file or
@@ -501,9 +501,9 @@ CopySendEndOfRow(CopyState cstate)
ClosePipeToProgram(cstate);
/*
- * If ClosePipeToProgram() didn't throw an error,
- * the program terminated normally, but closed the
- * pipe first. Restore errno, and throw an error.
+ * If ClosePipeToProgram() didn't throw an error, the
+ * program terminated normally, but closed the pipe
+ * first. Restore errno, and throw an error.
*/
errno = EPIPE;
}
@@ -781,7 +781,7 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
bool is_from = stmt->is_from;
bool pipe = (stmt->filename == NULL);
Relation rel;
- Oid relid;
+ Oid relid;
/* Disallow COPY to/from file or program except to superusers. */
if (!pipe && !superuser())
@@ -789,15 +789,15 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
if (stmt->is_program)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to COPY to or from an external program"),
+ errmsg("must be superuser to COPY to or from an external program"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a file"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
}
if (stmt->relation)
@@ -1022,9 +1022,9 @@ ProcessCopyOptions(CopyState cstate,
else if (strcmp(defel->defname, "convert_selectively") == 0)
{
/*
- * Undocumented, not-accessible-from-SQL option: convert only
- * the named columns to binary form, storing the rest as NULLs.
- * It's allowed for the column list to be NIL.
+ * Undocumented, not-accessible-from-SQL option: convert only the
+ * named columns to binary form, storing the rest as NULLs. It's
+ * allowed for the column list to be NIL.
*/
if (cstate->convert_selectively)
ereport(ERROR,
@@ -1403,7 +1403,7 @@ BeginCopy(bool is_from,
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg_internal("selected column \"%s\" not referenced by COPY",
- NameStr(tupDesc->attrs[attnum - 1]->attname))));
+ NameStr(tupDesc->attrs[attnum - 1]->attname))));
cstate->convert_select_flags[attnum - 1] = true;
}
}
@@ -1436,7 +1436,7 @@ BeginCopy(bool is_from,
static void
ClosePipeToProgram(CopyState cstate)
{
- int pclose_rc;
+ int pclose_rc;
Assert(cstate->is_program);
@@ -1482,7 +1482,7 @@ BeginCopyTo(Relation rel,
Node *query,
const char *queryString,
const char *filename,
- bool is_program,
+ bool is_program,
List *attnamelist,
List *options)
{
@@ -1546,7 +1546,7 @@ BeginCopyTo(Relation rel,
}
else
{
- mode_t oumask; /* Pre-existing umask value */
+ mode_t oumask; /* Pre-existing umask value */
struct stat st;
/*
@@ -1556,7 +1556,7 @@ BeginCopyTo(Relation rel,
if (!is_absolute_path(filename))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("relative path not allowed for COPY to file")));
+ errmsg("relative path not allowed for COPY to file")));
oumask = umask(S_IWGRP | S_IWOTH);
cstate->copy_file = AllocateFile(cstate->filename, PG_BINARY_W);
@@ -1929,8 +1929,8 @@ CopyFromErrorCallback(void *arg)
* Error is relevant to a particular line.
*
* If line_buf still contains the correct line, and it's already
- * transcoded, print it. If it's still in a foreign encoding,
- * it's quite likely that the error is precisely a failure to do
+ * transcoded, print it. If it's still in a foreign encoding, it's
+ * quite likely that the error is precisely a failure to do
* encoding conversion (ie, bad data). We dare not try to convert
* it, and at present there's no way to regurgitate it without
* conversion. So we have to punt and just report the line number.
@@ -2096,23 +2096,22 @@ CopyFrom(CopyState cstate)
}
/*
- * Optimize if new relfilenode was created in this subxact or
- * one of its committed children and we won't see those rows later
- * as part of an earlier scan or command. This ensures that if this
- * subtransaction aborts then the frozen rows won't be visible
- * after xact cleanup. Note that the stronger test of exactly
- * which subtransaction created it is crucial for correctness
- * of this optimisation.
+ * Optimize if new relfilenode was created in this subxact or one of its
+ * committed children and we won't see those rows later as part of an
+ * earlier scan or command. This ensures that if this subtransaction
+ * aborts then the frozen rows won't be visible after xact cleanup. Note
+ * that the stronger test of exactly which subtransaction created it is
+ * crucial for correctness of this optimisation.
*/
if (cstate->freeze)
{
if (!ThereAreNoPriorRegisteredSnapshots() || !ThereAreNoReadyPortals())
ereport(ERROR,
(ERRCODE_INVALID_TRANSACTION_STATE,
- errmsg("cannot perform FREEZE because of prior transaction activity")));
+ errmsg("cannot perform FREEZE because of prior transaction activity")));
if (cstate->rel->rd_createSubid != GetCurrentSubTransactionId() &&
- cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId())
+ cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId())
ereport(ERROR,
(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE,
errmsg("cannot perform FREEZE because the table was not created or truncated in the current subtransaction")));
@@ -2427,7 +2426,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
CopyState
BeginCopyFrom(Relation rel,
const char *filename,
- bool is_program,
+ bool is_program,
List *attnamelist,
List *options)
{
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index 14973f8e7c4..2bfe5fba877 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -173,7 +173,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
int
GetIntoRelEFlags(IntoClause *intoClause)
{
- int flags;
+ int flags;
/*
* We need to tell the executor whether it has to produce OIDs or not,
@@ -348,7 +348,7 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
if (is_matview)
{
/* StoreViewQuery scribbles on tree, so make a copy */
- Query *query = (Query *) copyObject(into->viewQuery);
+ Query *query = (Query *) copyObject(into->viewQuery);
StoreViewQuery(intoRelationId, query, false);
CommandCounterIncrement();
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index b3911bff350..0e10a752180 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -788,7 +788,7 @@ dropdb(const char *dbname, bool missing_ok)
pgdbrel = heap_open(DatabaseRelationId, RowExclusiveLock);
if (!get_db_info(dbname, AccessExclusiveLock, &db_id, NULL, NULL,
- &db_istemplate, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
+ &db_istemplate, NULL, NULL, NULL, NULL, NULL, NULL, NULL))
{
if (!missing_ok)
{
@@ -1043,7 +1043,7 @@ movedb(const char *dbname, const char *tblspcname)
pgdbrel = heap_open(DatabaseRelationId, RowExclusiveLock);
if (!get_db_info(dbname, AccessExclusiveLock, &db_id, NULL, NULL,
- NULL, NULL, NULL, NULL, NULL, &src_tblspcoid, NULL, NULL))
+ NULL, NULL, NULL, NULL, NULL, &src_tblspcoid, NULL, NULL))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist", dbname)));
@@ -1334,7 +1334,7 @@ Oid
AlterDatabase(AlterDatabaseStmt *stmt, bool isTopLevel)
{
Relation rel;
- Oid dboid;
+ Oid dboid;
HeapTuple tuple,
newtuple;
ScanKeyData scankey;
@@ -1882,8 +1882,11 @@ static int
errdetail_busy_db(int notherbackends, int npreparedxacts)
{
if (notherbackends > 0 && npreparedxacts > 0)
- /* We don't deal with singular versus plural here, since gettext
- * doesn't support multiple plurals in one string. */
+
+ /*
+ * We don't deal with singular versus plural here, since gettext
+ * doesn't support multiple plurals in one string.
+ */
errdetail("There are %d other session(s) and %d prepared transaction(s) using the database.",
notherbackends, npreparedxacts);
else if (notherbackends > 0)
@@ -1893,7 +1896,7 @@ errdetail_busy_db(int notherbackends, int npreparedxacts)
notherbackends);
else
errdetail_plural("There is %d prepared transaction using the database.",
- "There are %d prepared transactions using the database.",
+ "There are %d prepared transactions using the database.",
npreparedxacts,
npreparedxacts);
return 0; /* just to keep ereport macro happy */
diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c
index 8af96e12b95..93d16798e24 100644
--- a/src/backend/commands/event_trigger.c
+++ b/src/backend/commands/event_trigger.c
@@ -47,16 +47,16 @@ typedef struct EventTriggerQueryState
{
slist_head SQLDropList;
bool in_sql_drop;
- MemoryContext cxt;
+ MemoryContext cxt;
struct EventTriggerQueryState *previous;
} EventTriggerQueryState;
-EventTriggerQueryState *currentEventTriggerState = NULL;
+EventTriggerQueryState *currentEventTriggerState = NULL;
typedef struct
{
- const char *obtypename;
- bool supported;
+ const char *obtypename;
+ bool supported;
} event_trigger_support_data;
typedef enum
@@ -67,61 +67,61 @@ typedef enum
} event_trigger_command_tag_check_result;
static event_trigger_support_data event_trigger_support[] = {
- { "AGGREGATE", true },
- { "CAST", true },
- { "CONSTRAINT", true },
- { "COLLATION", true },
- { "CONVERSION", true },
- { "DATABASE", false },
- { "DOMAIN", true },
- { "EXTENSION", true },
- { "EVENT TRIGGER", false },
- { "FOREIGN DATA WRAPPER", true },
- { "FOREIGN TABLE", true },
- { "FUNCTION", true },
- { "INDEX", true },
- { "LANGUAGE", true },
- { "MATERIALIZED VIEW", true },
- { "OPERATOR", true },
- { "OPERATOR CLASS", true },
- { "OPERATOR FAMILY", true },
- { "ROLE", false },
- { "RULE", true },
- { "SCHEMA", true },
- { "SEQUENCE", true },
- { "SERVER", true },
- { "TABLE", true },
- { "TABLESPACE", false},
- { "TRIGGER", true },
- { "TEXT SEARCH CONFIGURATION", true },
- { "TEXT SEARCH DICTIONARY", true },
- { "TEXT SEARCH PARSER", true },
- { "TEXT SEARCH TEMPLATE", true },
- { "TYPE", true },
- { "USER MAPPING", true },
- { "VIEW", true },
- { NULL, false }
+ {"AGGREGATE", true},
+ {"CAST", true},
+ {"CONSTRAINT", true},
+ {"COLLATION", true},
+ {"CONVERSION", true},
+ {"DATABASE", false},
+ {"DOMAIN", true},
+ {"EXTENSION", true},
+ {"EVENT TRIGGER", false},
+ {"FOREIGN DATA WRAPPER", true},
+ {"FOREIGN TABLE", true},
+ {"FUNCTION", true},
+ {"INDEX", true},
+ {"LANGUAGE", true},
+ {"MATERIALIZED VIEW", true},
+ {"OPERATOR", true},
+ {"OPERATOR CLASS", true},
+ {"OPERATOR FAMILY", true},
+ {"ROLE", false},
+ {"RULE", true},
+ {"SCHEMA", true},
+ {"SEQUENCE", true},
+ {"SERVER", true},
+ {"TABLE", true},
+ {"TABLESPACE", false},
+ {"TRIGGER", true},
+ {"TEXT SEARCH CONFIGURATION", true},
+ {"TEXT SEARCH DICTIONARY", true},
+ {"TEXT SEARCH PARSER", true},
+ {"TEXT SEARCH TEMPLATE", true},
+ {"TYPE", true},
+ {"USER MAPPING", true},
+ {"VIEW", true},
+ {NULL, false}
};
/* Support for dropped objects */
typedef struct SQLDropObject
{
- ObjectAddress address;
- const char *schemaname;
- const char *objname;
- const char *objidentity;
- const char *objecttype;
- slist_node next;
+ ObjectAddress address;
+ const char *schemaname;
+ const char *objname;
+ const char *objidentity;
+ const char *objecttype;
+ slist_node next;
} SQLDropObject;
static void AlterEventTriggerOwner_internal(Relation rel,
- HeapTuple tup,
- Oid newOwnerId);
+ HeapTuple tup,
+ Oid newOwnerId);
static event_trigger_command_tag_check_result check_ddl_tag(const char *tag);
static void error_duplicate_filter_variable(const char *defname);
static Datum filter_list_to_array(List *filterlist);
static Oid insert_event_trigger_tuple(char *trigname, char *eventname,
- Oid evtOwner, Oid funcoid, List *tags);
+ Oid evtOwner, Oid funcoid, List *tags);
static void validate_ddl_tags(const char *filtervar, List *taglist);
static void EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata);
@@ -145,24 +145,24 @@ CreateEventTrigger(CreateEventTrigStmt *stmt)
*/
if (!superuser())
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to create event trigger \"%s\"",
- stmt->trigname),
- errhint("Must be superuser to create an event trigger.")));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("permission denied to create event trigger \"%s\"",
+ stmt->trigname),
+ errhint("Must be superuser to create an event trigger.")));
/* Validate event name. */
if (strcmp(stmt->eventname, "ddl_command_start") != 0 &&
strcmp(stmt->eventname, "ddl_command_end") != 0 &&
strcmp(stmt->eventname, "sql_drop") != 0)
ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized event name \"%s\"",
- stmt->eventname)));
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unrecognized event name \"%s\"",
+ stmt->eventname)));
/* Validate filter conditions. */
- foreach (lc, stmt->whenclause)
+ foreach(lc, stmt->whenclause)
{
- DefElem *def = (DefElem *) lfirst(lc);
+ DefElem *def = (DefElem *) lfirst(lc);
if (strcmp(def->defname, "tag") == 0)
{
@@ -172,8 +172,8 @@ CreateEventTrigger(CreateEventTrigStmt *stmt)
}
else
ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized filter variable \"%s\"", def->defname)));
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unrecognized filter variable \"%s\"", def->defname)));
}
/* Validate tag list, if any. */
@@ -192,7 +192,7 @@ CreateEventTrigger(CreateEventTrigStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("event trigger \"%s\" already exists",
- stmt->trigname)));
+ stmt->trigname)));
/* Find and validate the trigger function. */
funcoid = LookupFuncName(stmt->funcname, 0, NULL, false);
@@ -216,7 +216,7 @@ validate_ddl_tags(const char *filtervar, List *taglist)
{
ListCell *lc;
- foreach (lc, taglist)
+ foreach(lc, taglist)
{
const char *tag = strVal(lfirst(lc));
event_trigger_command_tag_check_result result;
@@ -226,13 +226,13 @@ validate_ddl_tags(const char *filtervar, List *taglist)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("filter value \"%s\" not recognized for filter variable \"%s\"",
- tag, filtervar)));
+ tag, filtervar)));
if (result == EVENT_TRIGGER_COMMAND_TAG_NOT_SUPPORTED)
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s represents an SQL statement name */
- errmsg("event triggers are not supported for %s",
- tag)));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ /* translator: %s represents an SQL statement name */
+ errmsg("event triggers are not supported for %s",
+ tag)));
}
}
@@ -240,7 +240,7 @@ static event_trigger_command_tag_check_result
check_ddl_tag(const char *tag)
{
const char *obtypename;
- event_trigger_support_data *etsd;
+ event_trigger_support_data *etsd;
/*
* Handle some idiosyncratic special cases.
@@ -287,7 +287,7 @@ error_duplicate_filter_variable(const char *defname)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("filter variable \"%s\" specified more than once",
- defname)));
+ defname)));
}
/*
@@ -297,12 +297,13 @@ static Oid
insert_event_trigger_tuple(char *trigname, char *eventname, Oid evtOwner,
Oid funcoid, List *taglist)
{
- Relation tgrel;
- Oid trigoid;
+ Relation tgrel;
+ Oid trigoid;
HeapTuple tuple;
Datum values[Natts_pg_trigger];
bool nulls[Natts_pg_trigger];
- ObjectAddress myself, referenced;
+ ObjectAddress myself,
+ referenced;
/* Open pg_event_trigger. */
tgrel = heap_open(EventTriggerRelationId, RowExclusiveLock);
@@ -415,9 +416,9 @@ AlterEventTrigger(AlterEventTrigStmt *stmt)
{
Relation tgrel;
HeapTuple tup;
- Oid trigoid;
+ Oid trigoid;
Form_pg_event_trigger evtForm;
- char tgenabled = stmt->tgenabled;
+ char tgenabled = stmt->tgenabled;
tgrel = heap_open(EventTriggerRelationId, RowExclusiveLock);
@@ -427,7 +428,7 @@ AlterEventTrigger(AlterEventTrigStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("event trigger \"%s\" does not exist",
- stmt->trigname)));
+ stmt->trigname)));
trigoid = HeapTupleGetOid(tup);
@@ -498,7 +499,7 @@ AlterEventTriggerOwner_oid(Oid trigOid, Oid newOwnerId)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("event trigger with OID %u does not exist", trigOid)));
+ errmsg("event trigger with OID %u does not exist", trigOid)));
AlterEventTriggerOwner_internal(rel, tup, newOwnerId);
@@ -528,9 +529,9 @@ AlterEventTriggerOwner_internal(Relation rel, HeapTuple tup, Oid newOwnerId)
if (!superuser_arg(newOwnerId))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to change owner of event trigger \"%s\"",
- NameStr(form->evtname)),
- errhint("The owner of an event trigger must be a superuser.")));
+ errmsg("permission denied to change owner of event trigger \"%s\"",
+ NameStr(form->evtname)),
+ errhint("The owner of an event trigger must be a superuser.")));
form->evtowner = newOwnerId;
simple_heap_update(rel, &tup->t_self, tup);
@@ -570,7 +571,7 @@ get_event_trigger_oid(const char *trigname, bool missing_ok)
* tags matching.
*/
static bool
-filter_event_trigger(const char **tag, EventTriggerCacheItem *item)
+filter_event_trigger(const char **tag, EventTriggerCacheItem *item)
{
/*
* Filter by session replication role, knowing that we never see disabled
@@ -598,7 +599,7 @@ filter_event_trigger(const char **tag, EventTriggerCacheItem *item)
}
/*
- * Setup for running triggers for the given event. Return value is an OID list
+ * Setup for running triggers for the given event. Return value is an OID list
* of functions to run; if there are any, trigdata is filled with an
* appropriate EventTriggerData for them to receive.
*/
@@ -617,7 +618,7 @@ EventTriggerCommonSetup(Node *parsetree,
* invoked to match up exactly with the list that CREATE EVENT TRIGGER
* accepts. This debugging cross-check will throw an error if this
* function is invoked for a command tag that CREATE EVENT TRIGGER won't
- * accept. (Unfortunately, there doesn't seem to be any simple, automated
+ * accept. (Unfortunately, there doesn't seem to be any simple, automated
* way to verify that CREATE EVENT TRIGGER doesn't accept extra stuff that
* never reaches this control point.)
*
@@ -646,15 +647,15 @@ EventTriggerCommonSetup(Node *parsetree,
tag = CreateCommandTag(parsetree);
/*
- * Filter list of event triggers by command tag, and copy them into
- * our memory context. Once we start running the command trigers, or
- * indeed once we do anything at all that touches the catalogs, an
- * invalidation might leave cachelist pointing at garbage, so we must
- * do this before we can do much else.
+ * Filter list of event triggers by command tag, and copy them into our
+ * memory context. Once we start running the command trigers, or indeed
+ * once we do anything at all that touches the catalogs, an invalidation
+ * might leave cachelist pointing at garbage, so we must do this before we
+ * can do much else.
*/
- foreach (lc, cachelist)
+ foreach(lc, cachelist)
{
- EventTriggerCacheItem *item = lfirst(lc);
+ EventTriggerCacheItem *item = lfirst(lc);
if (filter_event_trigger(&tag, item))
{
@@ -682,7 +683,7 @@ void
EventTriggerDDLCommandStart(Node *parsetree)
{
List *runlist;
- EventTriggerData trigdata;
+ EventTriggerData trigdata;
/*
* Event Triggers are completely disabled in standalone mode. There are
@@ -704,7 +705,7 @@ EventTriggerDDLCommandStart(Node *parsetree)
return;
runlist = EventTriggerCommonSetup(parsetree,
- EVT_DDLCommandStart, "ddl_command_start",
+ EVT_DDLCommandStart, "ddl_command_start",
&trigdata);
if (runlist == NIL)
return;
@@ -716,8 +717,8 @@ EventTriggerDDLCommandStart(Node *parsetree)
list_free(runlist);
/*
- * Make sure anything the event triggers did will be visible to
- * the main command.
+ * Make sure anything the event triggers did will be visible to the main
+ * command.
*/
CommandCounterIncrement();
}
@@ -729,7 +730,7 @@ void
EventTriggerDDLCommandEnd(Node *parsetree)
{
List *runlist;
- EventTriggerData trigdata;
+ EventTriggerData trigdata;
/*
* See EventTriggerDDLCommandStart for a discussion about why event
@@ -745,8 +746,8 @@ EventTriggerDDLCommandEnd(Node *parsetree)
return;
/*
- * Make sure anything the main command did will be visible to the
- * event triggers.
+ * Make sure anything the main command did will be visible to the event
+ * triggers.
*/
CommandCounterIncrement();
@@ -764,7 +765,7 @@ void
EventTriggerSQLDrop(Node *parsetree)
{
List *runlist;
- EventTriggerData trigdata;
+ EventTriggerData trigdata;
/*
* See EventTriggerDDLCommandStart for a discussion about why event
@@ -774,10 +775,11 @@ EventTriggerSQLDrop(Node *parsetree)
return;
/*
- * Use current state to determine whether this event fires at all. If there
- * are no triggers for the sql_drop event, then we don't have anything to do
- * here. Note that dropped object collection is disabled if this is the case,
- * so even if we were to try to run, the list would be empty.
+ * Use current state to determine whether this event fires at all. If
+ * there are no triggers for the sql_drop event, then we don't have
+ * anything to do here. Note that dropped object collection is disabled
+ * if this is the case, so even if we were to try to run, the list would
+ * be empty.
*/
if (!currentEventTriggerState ||
slist_is_empty(&currentEventTriggerState->SQLDropList))
@@ -786,24 +788,25 @@ EventTriggerSQLDrop(Node *parsetree)
runlist = EventTriggerCommonSetup(parsetree,
EVT_SQLDrop, "sql_drop",
&trigdata);
+
/*
- * Nothing to do if run list is empty. Note this shouldn't happen, because
- * if there are no sql_drop events, then objects-to-drop wouldn't have been
- * collected in the first place and we would have quitted above.
+ * Nothing to do if run list is empty. Note this shouldn't happen,
+ * because if there are no sql_drop events, then objects-to-drop wouldn't
+ * have been collected in the first place and we would have quitted above.
*/
if (runlist == NIL)
return;
/*
- * Make sure anything the main command did will be visible to the
- * event triggers.
+ * Make sure anything the main command did will be visible to the event
+ * triggers.
*/
CommandCounterIncrement();
/*
- * Make sure pg_event_trigger_dropped_objects only works when running these
- * triggers. Use PG_TRY to ensure in_sql_drop is reset even when one
- * trigger fails. (This is perhaps not necessary, as the currentState
+ * Make sure pg_event_trigger_dropped_objects only works when running
+ * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when
+ * one trigger fails. (This is perhaps not necessary, as the currentState
* variable will be removed shortly by our caller, but it seems better to
* play safe.)
*/
@@ -832,17 +835,17 @@ EventTriggerSQLDrop(Node *parsetree)
static void
EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata)
{
- MemoryContext context;
- MemoryContext oldcontext;
- ListCell *lc;
- bool first = true;
+ MemoryContext context;
+ MemoryContext oldcontext;
+ ListCell *lc;
+ bool first = true;
/* Guard against stack overflow due to recursive event trigger */
check_stack_depth();
/*
- * Let's evaluate event triggers in their own memory context, so
- * that any leaks get cleaned up promptly.
+ * Let's evaluate event triggers in their own memory context, so that any
+ * leaks get cleaned up promptly.
*/
context = AllocSetContextCreate(CurrentMemoryContext,
"event trigger context",
@@ -852,18 +855,18 @@ EventTriggerInvoke(List *fn_oid_list, EventTriggerData *trigdata)
oldcontext = MemoryContextSwitchTo(context);
/* Call each event trigger. */
- foreach (lc, fn_oid_list)
+ foreach(lc, fn_oid_list)
{
- Oid fnoid = lfirst_oid(lc);
- FmgrInfo flinfo;
+ Oid fnoid = lfirst_oid(lc);
+ FmgrInfo flinfo;
FunctionCallInfoData fcinfo;
PgStat_FunctionCallUsage fcusage;
/*
- * We want each event trigger to be able to see the results of
- * the previous event trigger's action. Caller is responsible
- * for any command-counter increment that is needed between the
- * event trigger and anything else in the transaction.
+ * We want each event trigger to be able to see the results of the
+ * previous event trigger's action. Caller is responsible for any
+ * command-counter increment that is needed between the event trigger
+ * and anything else in the transaction.
*/
if (first)
first = false;
@@ -987,6 +990,7 @@ EventTriggerSupportsObjectClass(ObjectClass objclass)
return true;
case MAX_OCLASS:
+
/*
* This shouldn't ever happen, but we keep the case to avoid a
* compiler warning without a "default" clause in the switch.
@@ -1008,7 +1012,7 @@ bool
EventTriggerBeginCompleteQuery(void)
{
EventTriggerQueryState *state;
- MemoryContext cxt;
+ MemoryContext cxt;
/*
* Currently, sql_drop events are the only reason to have event trigger
@@ -1041,7 +1045,7 @@ EventTriggerBeginCompleteQuery(void)
* returned false previously.
*
* Note: this might be called in the PG_CATCH block of a failing transaction,
- * so be wary of running anything unnecessary. (In particular, it's probably
+ * so be wary of running anything unnecessary. (In particular, it's probably
* unwise to try to allocate memory.)
*/
void
@@ -1092,8 +1096,8 @@ trackDroppedObjectsNeeded(void)
void
EventTriggerSQLDropAddObject(ObjectAddress *object)
{
- SQLDropObject *obj;
- MemoryContext oldcxt;
+ SQLDropObject *obj;
+ MemoryContext oldcxt;
if (!currentEventTriggerState)
return;
@@ -1112,8 +1116,9 @@ EventTriggerSQLDropAddObject(ObjectAddress *object)
/*
* Obtain schema names from the object's catalog tuple, if one exists;
- * this lets us skip objects in temp schemas. We trust that ObjectProperty
- * contains all object classes that can be schema-qualified.
+ * this lets us skip objects in temp schemas. We trust that
+ * ObjectProperty contains all object classes that can be
+ * schema-qualified.
*/
if (is_objectclass_supported(object->classId))
{
@@ -1136,7 +1141,7 @@ EventTriggerSQLDropAddObject(ObjectAddress *object)
RelationGetDescr(catalog), &isnull);
if (!isnull)
{
- Oid namespaceId;
+ Oid namespaceId;
namespaceId = DatumGetObjectId(datum);
/* Don't report objects in temp namespaces */
@@ -1189,12 +1194,12 @@ EventTriggerSQLDropAddObject(ObjectAddress *object)
Datum
pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
- Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- slist_iter iter;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ slist_iter iter;
/*
* Protect this function from being called out of context
@@ -1203,8 +1208,8 @@ pg_event_trigger_dropped_objects(PG_FUNCTION_ARGS)
!currentEventTriggerState->in_sql_drop)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("%s can only be called in a sql_drop event trigger function",
- "pg_event_trigger_dropped_objects()")));
+ errmsg("%s can only be called in a sql_drop event trigger function",
+ "pg_event_trigger_dropped_objects()")));
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index c6398e3c8e7..91bea517ec8 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -415,8 +415,8 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
instrument_option |= INSTRUMENT_BUFFERS;
/*
- * We always collect timing for the entire statement, even when
- * node-level timing is off, so we don't look at es->timing here.
+ * We always collect timing for the entire statement, even when node-level
+ * timing is off, so we don't look at es->timing here.
*/
INSTR_TIME_SET_CURRENT(starttime);
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 9f0ac9bd50a..38187a837c6 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -968,8 +968,8 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
GetUserId(),
languageOid,
languageValidator,
- prosrc_str, /* converted to text later */
- probin_str, /* converted to text later */
+ prosrc_str, /* converted to text later */
+ probin_str, /* converted to text later */
false, /* not an aggregate */
isWindowFunc,
security,
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 66eae92a4c1..7ea90d07d3c 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -351,7 +351,7 @@ DefineIndex(IndexStmt *stmt,
* (but not VACUUM).
*/
rel = heap_openrv(stmt->relation,
- (stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock));
+ (stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock));
relationId = RelationGetRelid(rel);
namespaceId = RelationGetNamespace(rel);
@@ -774,7 +774,7 @@ DefineIndex(IndexStmt *stmt,
* Drop the reference snapshot. We must do this before waiting out other
* snapshot holders, else we will deadlock against other processes also
* doing CREATE INDEX CONCURRENTLY, which would see our snapshot as one
- * they must wait for. But first, save the snapshot's xmin to use as
+ * they must wait for. But first, save the snapshot's xmin to use as
* limitXmin for GetCurrentVirtualXIDs().
*/
limitXmin = snapshot->xmin;
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index 5491c84c766..2ffdca31f6b 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -49,7 +49,7 @@ static void transientrel_receive(TupleTableSlot *slot, DestReceiver *self);
static void transientrel_shutdown(DestReceiver *self);
static void transientrel_destroy(DestReceiver *self);
static void refresh_matview_datafill(DestReceiver *dest, Query *query,
- const char *queryString);
+ const char *queryString);
/*
* SetMatViewPopulatedState
@@ -115,7 +115,7 @@ SetMatViewPopulatedState(Relation relation, bool newstate)
*/
void
ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
- ParamListInfo params, char *completionTag)
+ ParamListInfo params, char *completionTag)
{
Oid matviewOid;
Relation matviewRel;
@@ -130,8 +130,8 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString,
* Get a lock until end of transaction.
*/
matviewOid = RangeVarGetRelidExtended(stmt->relation,
- AccessExclusiveLock, false, false,
- RangeVarCallbackOwnsTable, NULL);
+ AccessExclusiveLock, false, false,
+ RangeVarCallbackOwnsTable, NULL);
matviewRel = heap_open(matviewOid, NoLock);
/* Make sure it is a materialized view. */
@@ -226,7 +226,7 @@ static void
refresh_matview_datafill(DestReceiver *dest, Query *query,
const char *queryString)
{
- List *rewritten;
+ List *rewritten;
PlannedStmt *plan;
QueryDesc *queryDesc;
@@ -295,7 +295,7 @@ static void
transientrel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
{
DR_transientrel *myState = (DR_transientrel *) self;
- Relation transientrel;
+ Relation transientrel;
transientrel = heap_open(myState->transientoid, NoLock);
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 68ec57ac4b6..f2d78ef6632 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -1683,7 +1683,7 @@ get_am_name(Oid amOid)
* Subroutine for ALTER OPERATOR CLASS SET SCHEMA/RENAME
*
* Is there an operator class with the given name and signature already
- * in the given namespace? If so, raise an appropriate error message.
+ * in the given namespace? If so, raise an appropriate error message.
*/
void
IsThereOpClassInNamespace(const char *opcname, Oid opcmethod,
@@ -1706,7 +1706,7 @@ IsThereOpClassInNamespace(const char *opcname, Oid opcmethod,
* Subroutine for ALTER OPERATOR FAMILY SET SCHEMA/RENAME
*
* Is there an operator family with the given name and signature already
- * in the given namespace? If so, raise an appropriate error message.
+ * in the given namespace? If so, raise an appropriate error message.
*/
void
IsThereOpFamilyInNamespace(const char *opfname, Oid opfmethod,
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index e451414b149..4692b087bef 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -296,15 +296,15 @@ DefineOperator(List *names, List *parameters)
* now have OperatorCreate do all the work..
*/
return
- OperatorCreate(oprName, /* operator name */
+ OperatorCreate(oprName, /* operator name */
oprNamespace, /* namespace */
- typeId1, /* left type id */
- typeId2, /* right type id */
- functionOid, /* function for operator */
- commutatorName, /* optional commutator operator name */
- negatorName, /* optional negator operator name */
- restrictionOid, /* optional restrict. sel. procedure */
- joinOid, /* optional join sel. procedure name */
+ typeId1, /* left type id */
+ typeId2, /* right type id */
+ functionOid, /* function for operator */
+ commutatorName, /* optional commutator operator name */
+ negatorName, /* optional negator operator name */
+ restrictionOid, /* optional restrict. sel. procedure */
+ joinOid, /* optional join sel. procedure name */
canMerge, /* operator merges */
canHash); /* operator hashes */
}
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 1c98c3226d0..6e4c682072d 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -52,8 +52,8 @@ typedef struct
} PLTemplate;
static Oid create_proc_lang(const char *languageName, bool replace,
- Oid languageOwner, Oid handlerOid, Oid inlineOid,
- Oid valOid, bool trusted);
+ Oid languageOwner, Oid handlerOid, Oid inlineOid,
+ Oid valOid, bool trusted);
static PLTemplate *find_language_template(const char *languageName);
/* ---------------------------------------------------------------------
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 49e409a5eed..bffc12ed0e9 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -698,8 +698,8 @@ nextval_internal(Oid relid)
/*
* We must mark the buffer dirty before doing XLogInsert(); see notes in
* SyncOneBuffer(). However, we don't apply the desired changes just yet.
- * This looks like a violation of the buffer update protocol, but it is
- * in fact safe because we hold exclusive lock on the buffer. Any other
+ * This looks like a violation of the buffer update protocol, but it is in
+ * fact safe because we hold exclusive lock on the buffer. Any other
* process, including a checkpoint, that tries to examine the buffer
* contents will block until we release the lock, and then will see the
* final state that we install below.
@@ -1226,8 +1226,8 @@ init_params(List *options, bool isInit,
}
/*
- * We must reset log_cnt when isInit or when changing any parameters
- * that would affect future nextval allocations.
+ * We must reset log_cnt when isInit or when changing any parameters that
+ * would affect future nextval allocations.
*/
if (isInit)
new->log_cnt = 0;
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index fe328349533..a3f4ce2c52e 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -271,7 +271,7 @@ static void StoreCatalogInheritance1(Oid relationId, Oid parentOid,
int16 seqNumber, Relation inhRelation);
static int findAttrByName(const char *attributeName, List *schema);
static void AlterIndexNamespaces(Relation classRel, Relation rel,
- Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved);
+ Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved);
static void AlterSeqNamespaces(Relation classRel, Relation rel,
Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved,
LOCKMODE lockmode);
@@ -1141,7 +1141,7 @@ ExecuteTruncate(TruncateStmt *stmt)
{
Oid heap_relid;
Oid toast_relid;
- MultiXactId minmulti;
+ MultiXactId minmulti;
/*
* This effectively deletes all rows in the table, and may be done
@@ -1675,14 +1675,14 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
&found_whole_row);
/*
- * For the moment we have to reject whole-row variables.
- * We could convert them, if we knew the new table's rowtype
- * OID, but that hasn't been assigned yet.
+ * For the moment we have to reject whole-row variables. We
+ * could convert them, if we knew the new table's rowtype OID,
+ * but that hasn't been assigned yet.
*/
if (found_whole_row)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert whole-row table reference"),
+ errmsg("cannot convert whole-row table reference"),
errdetail("Constraint \"%s\" contains a whole-row reference to table \"%s\".",
name,
RelationGetRelationName(relation))));
@@ -2122,7 +2122,7 @@ renameatt_internal(Oid myrelid,
Relation targetrelation;
Relation attrelation;
HeapTuple atttup;
- Form_pg_attribute attform;
+ Form_pg_attribute attform;
int attnum;
/*
@@ -2438,8 +2438,8 @@ RenameConstraint(RenameStmt *stmt)
rename_constraint_internal(relid, typid,
stmt->subname,
stmt->newname,
- stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */
- false, /* recursing? */
+ stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */
+ false, /* recursing? */
0 /* expected inhcount */ );
}
@@ -2795,7 +2795,7 @@ AlterTableGetLockLevel(List *cmds)
case AT_ColumnDefault:
case AT_ProcessedConstraint: /* becomes AT_AddConstraint */
case AT_AddConstraintRecurse: /* becomes AT_AddConstraint */
- case AT_ReAddConstraint: /* becomes AT_AddConstraint */
+ case AT_ReAddConstraint: /* becomes AT_AddConstraint */
case AT_EnableTrig:
case AT_EnableAlwaysTrig:
case AT_EnableReplicaTrig:
@@ -3294,7 +3294,8 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def,
true, false, lockmode);
break;
- case AT_ReAddConstraint: /* Re-add pre-existing check constraint */
+ case AT_ReAddConstraint: /* Re-add pre-existing check
+ * constraint */
ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def,
false, true, lockmode);
break;
@@ -3855,7 +3856,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" contains null values",
- NameStr(newTupDesc->attrs[attn]->attname)),
+ NameStr(newTupDesc->attrs[attn]->attname)),
errtablecol(oldrel, attn + 1)));
}
@@ -5566,10 +5567,10 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
stmt->deferrable,
stmt->initdeferred,
stmt->primary,
- true, /* update pg_index */
- true, /* remove old dependencies */
+ true, /* update pg_index */
+ true, /* remove old dependencies */
allowSystemTableMods,
- false); /* is_internal */
+ false); /* is_internal */
index_close(indexRel, NoLock);
}
@@ -9023,14 +9024,14 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode)
!parent_rel->rd_islocaltemp)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit from temporary relation of another session")));
+ errmsg("cannot inherit from temporary relation of another session")));
/* Ditto for the child */
if (child_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
!child_rel->rd_islocaltemp)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit to temporary relation of another session")));
+ errmsg("cannot inherit to temporary relation of another session")));
/*
* Check for duplicates in the list of parents, and determine the highest
@@ -9564,9 +9565,9 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
RelationGetRelid(parent_rel));
/*
- * Post alter hook of this inherits. Since object_access_hook doesn't
- * take multiple object identifiers, we relay oid of parent relation
- * using auxiliary_id argument.
+ * Post alter hook of this inherits. Since object_access_hook doesn't take
+ * multiple object identifiers, we relay oid of parent relation using
+ * auxiliary_id argument.
*/
InvokeObjectPostAlterHookArg(InheritsRelationId,
RelationGetRelid(rel), 0,
@@ -9984,11 +9985,11 @@ AlterTableNamespaceInternal(Relation rel, Oid oldNspOid, Oid nspOid,
void
AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
Oid oldNspOid, Oid newNspOid,
- bool hasDependEntry, ObjectAddresses *objsMoved)
+ bool hasDependEntry, ObjectAddresses *objsMoved)
{
HeapTuple classTup;
Form_pg_class classForm;
- ObjectAddress thisobj;
+ ObjectAddress thisobj;
classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relOid));
if (!HeapTupleIsValid(classTup))
@@ -10024,7 +10025,7 @@ AlterRelationNamespaceInternal(Relation classRel, Oid relOid,
/* Update dependency on schema if caller said so */
if (hasDependEntry &&
changeDependencyFor(RelationRelationId, relOid,
- NamespaceRelationId, oldNspOid, newNspOid) != 1)
+ NamespaceRelationId, oldNspOid, newNspOid) != 1)
elog(ERROR, "failed to change schema dependency for relation \"%s\"",
NameStr(classForm->relname));
@@ -10247,6 +10248,7 @@ PreCommit_on_commit_actions(void)
/* Do nothing (there shouldn't be such entries, actually) */
break;
case ONCOMMIT_DELETE_ROWS:
+
/*
* If this transaction hasn't accessed any temporary
* relations, we can skip truncating ON COMMIT DELETE ROWS
@@ -10379,7 +10381,7 @@ AtEOSubXact_on_commit_actions(bool isCommit, SubTransactionId mySubid,
* This is intended as a callback for RangeVarGetRelidExtended(). It allows
* the relation to be locked only if (1) it's a plain table, materialized
* view, or TOAST table and (2) the current user is the owner (or the
- * superuser). This meets the permission-checking needs of CLUSTER, REINDEX
+ * superuser). This meets the permission-checking needs of CLUSTER, REINDEX
* TABLE, and REFRESH MATERIALIZED VIEW; we expose it here so that it can be
* used by all.
*/
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index a0473498bd2..851947643c2 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -447,7 +447,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
true, /* islocal */
0, /* inhcount */
true, /* isnoinherit */
- isInternal); /* is_internal */
+ isInternal); /* is_internal */
}
/*
@@ -1266,6 +1266,7 @@ renametrig(RenameStmt *stmt)
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
{
tgoid = HeapTupleGetOid(tuple);
+
/*
* Update pg_trigger tuple with new tgname.
*/
@@ -2210,7 +2211,7 @@ ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
if (trigdesc && trigdesc->trig_delete_after_row)
{
HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
- tupleid, LockTupleExclusive,
+ tupleid, LockTupleExclusive,
NULL);
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_DELETE,
@@ -2449,7 +2450,7 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
if (trigdesc && trigdesc->trig_update_after_row)
{
HeapTuple trigtuple = GetTupleForTrigger(estate, NULL, relinfo,
- tupleid, LockTupleExclusive,
+ tupleid, LockTupleExclusive,
NULL);
AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_UPDATE,
@@ -2614,11 +2615,12 @@ ltrmark:;
tuple.t_self = *tid;
test = heap_lock_tuple(relation, &tuple,
estate->es_output_cid,
- lockmode, false /* wait */,
+ lockmode, false /* wait */ ,
false, &buffer, &hufd);
switch (test)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 9efe24417e5..6bc16f198e3 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -598,32 +598,32 @@ DefineType(List *names, List *parameters)
array_type, /* type name */
typeNamespace, /* namespace */
InvalidOid, /* relation oid (n/a here) */
- 0, /* relation kind (ditto) */
- GetUserId(), /* owner's ID */
- -1, /* internal size (always varlena) */
+ 0, /* relation kind (ditto) */
+ GetUserId(), /* owner's ID */
+ -1, /* internal size (always varlena) */
TYPTYPE_BASE, /* type-type (base type) */
TYPCATEGORY_ARRAY, /* type-category (array) */
- false, /* array types are never preferred */
+ false, /* array types are never preferred */
delimiter, /* array element delimiter */
F_ARRAY_IN, /* input procedure */
- F_ARRAY_OUT, /* output procedure */
+ F_ARRAY_OUT, /* output procedure */
F_ARRAY_RECV, /* receive procedure */
F_ARRAY_SEND, /* send procedure */
- typmodinOid, /* typmodin procedure */
+ typmodinOid, /* typmodin procedure */
typmodoutOid, /* typmodout procedure */
F_ARRAY_TYPANALYZE, /* analyze procedure */
- typoid, /* element type ID */
- true, /* yes this is an array type */
+ typoid, /* element type ID */
+ true, /* yes this is an array type */
InvalidOid, /* no further array type */
InvalidOid, /* base type ID */
- NULL, /* never a default type value */
- NULL, /* binary default isn't sent either */
- false, /* never passed by value */
+ NULL, /* never a default type value */
+ NULL, /* binary default isn't sent either */
+ false, /* never passed by value */
alignment, /* see above */
- 'x', /* ARRAY is always toastable */
- -1, /* typMod (Domains only) */
- 0, /* Array dimensions of typbasetype */
- false, /* Type NOT NULL */
+ 'x', /* ARRAY is always toastable */
+ -1, /* typMod (Domains only) */
+ 0, /* Array dimensions of typbasetype */
+ false, /* Type NOT NULL */
collation); /* type's collation */
pfree(array_type);
@@ -924,8 +924,8 @@ DefineDomain(CreateDomainStmt *stmt)
/*
* Check constraints are handled after domain creation, as
* they require the Oid of the domain; at this point we can
- * only check that they're not marked NO INHERIT, because
- * that would be bogus.
+ * only check that they're not marked NO INHERIT, because that
+ * would be bogus.
*/
if (constr->is_no_inherit)
ereport(ERROR,
@@ -1191,19 +1191,19 @@ AlterEnum(AlterEnumStmt *stmt, bool isTopLevel)
/*
* Ordinarily we disallow adding values within transaction blocks, because
* we can't cope with enum OID values getting into indexes and then having
- * their defining pg_enum entries go away. However, it's okay if the enum
- * type was created in the current transaction, since then there can be
- * no such indexes that wouldn't themselves go away on rollback. (We
- * support this case because pg_dump --binary-upgrade needs it.) We test
- * this by seeing if the pg_type row has xmin == current XID and is not
- * HEAP_UPDATED. If it is HEAP_UPDATED, we can't be sure whether the
- * type was created or only modified in this xact. So we are disallowing
- * some cases that could theoretically be safe; but fortunately pg_dump
- * only needs the simplest case.
+ * their defining pg_enum entries go away. However, it's okay if the enum
+ * type was created in the current transaction, since then there can be no
+ * such indexes that wouldn't themselves go away on rollback. (We support
+ * this case because pg_dump --binary-upgrade needs it.) We test this by
+ * seeing if the pg_type row has xmin == current XID and is not
+ * HEAP_UPDATED. If it is HEAP_UPDATED, we can't be sure whether the type
+ * was created or only modified in this xact. So we are disallowing some
+ * cases that could theoretically be safe; but fortunately pg_dump only
+ * needs the simplest case.
*/
if (HeapTupleHeaderGetXmin(tup->t_data) == GetCurrentTransactionId() &&
!(tup->t_data->t_infomask & HEAP_UPDATED))
- /* safe to do inside transaction block */ ;
+ /* safe to do inside transaction block */ ;
else
PreventTransactionChain(isTopLevel, "ALTER TYPE ... ADD");
@@ -2273,7 +2273,7 @@ AlterDomainNotNull(List *names, bool notNull)
/*
* In principle the auxiliary information for this
* error should be errdatatype(), but errtablecol()
- * seems considerably more useful in practice. Since
+ * seems considerably more useful in practice. Since
* this code only executes in an ALTER DOMAIN command,
* the client should already know which domain is in
* question.
@@ -2667,7 +2667,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin)
/*
* In principle the auxiliary information for this error
* should be errdomainconstraint(), but errtablecol()
- * seems considerably more useful in practice. Since this
+ * seems considerably more useful in practice. Since this
* code only executes in an ALTER DOMAIN command, the
* client should already know which domain is in question,
* and which constraint too.
@@ -3005,7 +3005,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
true, /* is local */
0, /* inhcount */
false, /* connoinherit */
- false); /* is_internal */
+ false); /* is_internal */
/*
* Return the compiled constraint expression so the calling routine can
@@ -3348,7 +3348,7 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype)
* hasDependEntry should be TRUE if type is expected to have a pg_shdepend
* entry (ie, it's not a table rowtype nor an array type).
* is_primary_ops should be TRUE if this function is invoked with user's
- * direct operation (e.g, shdepReassignOwned). Elsewhere,
+ * direct operation (e.g, shdepReassignOwned). Elsewhere,
*/
void
AlterTypeOwnerInternal(Oid typeOid, Oid newOwnerId,
@@ -3397,7 +3397,7 @@ AlterTypeNamespace(List *names, const char *newschema, ObjectType objecttype)
TypeName *typename;
Oid typeOid;
Oid nspOid;
- ObjectAddresses *objsMoved;
+ ObjectAddresses *objsMoved;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeTypeNameFromNameList(names);
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index c7886ed799e..844f25cfa65 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -815,7 +815,7 @@ AlterRoleSet(AlterRoleSetStmt *stmt)
{
HeapTuple roletuple;
Oid databaseid = InvalidOid;
- Oid roleid = InvalidOid;
+ Oid roleid = InvalidOid;
if (stmt->role)
{
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index c984488e034..641c740268a 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -327,10 +327,10 @@ get_rel_oids(Oid relid, const RangeVar *vacrel)
* Since we don't take a lock here, the relation might be gone, or the
* RangeVar might no longer refer to the OID we look up here. In the
* former case, VACUUM will do nothing; in the latter case, it will
- * process the OID we looked up here, rather than the new one.
- * Neither is ideal, but there's little practical alternative, since
- * we're going to commit this transaction and begin a new one between
- * now and then.
+ * process the OID we looked up here, rather than the new one. Neither
+ * is ideal, but there's little practical alternative, since we're
+ * going to commit this transaction and begin a new one between now
+ * and then.
*/
relid = RangeVarGetRelid(vacrel, NoLock, false);
@@ -471,7 +471,7 @@ vacuum_set_xid_limits(int freeze_min_age,
if (multiXactFrzLimit != NULL)
{
- MultiXactId mxLimit;
+ MultiXactId mxLimit;
/*
* simplistic multixactid freezing: use the same freezing policy as
@@ -711,7 +711,7 @@ vac_update_datfrozenxid(void)
SysScanDesc scan;
HeapTuple classTup;
TransactionId newFrozenXid;
- MultiXactId newFrozenMulti;
+ MultiXactId newFrozenMulti;
bool dirty = false;
/*
@@ -723,8 +723,8 @@ vac_update_datfrozenxid(void)
newFrozenXid = GetOldestXmin(true, true);
/*
- * Similarly, initialize the MultiXact "min" with the value that would
- * be used on pg_class for new tables. See AddNewRelationTuple().
+ * Similarly, initialize the MultiXact "min" with the value that would be
+ * used on pg_class for new tables. See AddNewRelationTuple().
*/
newFrozenMulti = GetOldestMultiXactId();
@@ -900,8 +900,8 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId frozenMulti)
/*
* Update the wrap limit for GetNewTransactionId and creation of new
- * MultiXactIds. Note: these functions will also signal the postmaster for
- * an(other) autovac cycle if needed. XXX should we avoid possibly
+ * MultiXactIds. Note: these functions will also signal the postmaster
+ * for an(other) autovac cycle if needed. XXX should we avoid possibly
* signalling twice?
*/
SetTransactionIdLimit(frozenXID, oldestxid_datoid);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 9d304153b8b..7e46f9e9343 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -78,9 +78,9 @@
* that the potential for improvement was great enough to merit the cost of
* supporting them.
*/
-#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
-#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
-#define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
+#define VACUUM_TRUNCATE_LOCK_CHECK_INTERVAL 20 /* ms */
+#define VACUUM_TRUNCATE_LOCK_WAIT_INTERVAL 50 /* ms */
+#define VACUUM_TRUNCATE_LOCK_TIMEOUT 5000 /* ms */
/*
* Guesstimation of number of dead tuples per page. This is used to
@@ -184,7 +184,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
double new_rel_tuples;
BlockNumber new_rel_allvisible;
TransactionId new_frozen_xid;
- MultiXactId new_min_multi;
+ MultiXactId new_min_multi;
/* measure elapsed time iff autovacuum logging requires it */
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
@@ -287,8 +287,8 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
/* report results to the stats collector, too */
pgstat_report_vacuum(RelationGetRelid(onerel),
- onerel->rd_rel->relisshared,
- new_rel_tuples);
+ onerel->rd_rel->relisshared,
+ new_rel_tuples);
/* and log the action if appropriate */
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
@@ -315,7 +315,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
"pages: %d removed, %d remain\n"
"tuples: %.0f removed, %.0f remain\n"
"buffer usage: %d hits, %d misses, %d dirtied\n"
- "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
+ "avg read rate: %.3f MB/s, avg write rate: %.3f MB/s\n"
"system usage: %s",
get_database_name(MyDatabaseId),
get_namespace_name(RelationGetNamespace(onerel)),
@@ -899,15 +899,15 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* It should never be the case that the visibility map page is set
* while the page-level bit is clear, but the reverse is allowed
- * (if checksums are not enabled). Regardless, set the both bits
+ * (if checksums are not enabled). Regardless, set the both bits
* so that we get back in sync.
*
* NB: If the heap page is all-visible but the VM bit is not set,
- * we don't need to dirty the heap page. However, if checksums are
- * enabled, we do need to make sure that the heap page is dirtied
- * before passing it to visibilitymap_set(), because it may be
- * logged. Given that this situation should only happen in rare
- * cases after a crash, it is not worth optimizing.
+ * we don't need to dirty the heap page. However, if checksums
+ * are enabled, we do need to make sure that the heap page is
+ * dirtied before passing it to visibilitymap_set(), because it
+ * may be logged. Given that this situation should only happen in
+ * rare cases after a crash, it is not worth optimizing.
*/
PageSetAllVisible(page);
MarkBufferDirty(buf);
@@ -1116,7 +1116,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
Page page = BufferGetPage(buffer);
OffsetNumber unused[MaxOffsetNumber];
int uncnt = 0;
- TransactionId visibility_cutoff_xid;
+ TransactionId visibility_cutoff_xid;
START_CRIT_SECTION();
@@ -1146,8 +1146,8 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
MarkBufferDirty(buffer);
/*
- * Now that we have removed the dead tuples from the page, once again check
- * if the page has become all-visible.
+ * Now that we have removed the dead tuples from the page, once again
+ * check if the page has become all-visible.
*/
if (!visibilitymap_test(onerel, blkno, vmbuffer) &&
heap_page_is_all_visible(buffer, &visibility_cutoff_xid))
@@ -1155,7 +1155,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
Assert(BufferIsValid(*vmbuffer));
PageSetAllVisible(page);
visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr, *vmbuffer,
- visibility_cutoff_xid);
+ visibility_cutoff_xid);
}
/* XLOG stuff */
@@ -1660,25 +1660,24 @@ vac_cmp_itemptr(const void *left, const void *right)
static bool
heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
{
- Page page = BufferGetPage(buf);
+ Page page = BufferGetPage(buf);
OffsetNumber offnum,
- maxoff;
- bool all_visible = true;
+ maxoff;
+ bool all_visible = true;
*visibility_cutoff_xid = InvalidTransactionId;
/*
* This is a stripped down version of the line pointer scan in
- * lazy_scan_heap(). So if you change anything here, also check that
- * code.
+ * lazy_scan_heap(). So if you change anything here, also check that code.
*/
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
- offnum <= maxoff && all_visible;
- offnum = OffsetNumberNext(offnum))
+ offnum <= maxoff && all_visible;
+ offnum = OffsetNumberNext(offnum))
{
- ItemId itemid;
- HeapTupleData tuple;
+ ItemId itemid;
+ HeapTupleData tuple;
itemid = PageGetItemId(page, offnum);
@@ -1689,8 +1688,8 @@ heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
ItemPointerSet(&(tuple.t_self), BufferGetBlockNumber(buf), offnum);
/*
- * Dead line pointers can have index pointers pointing to them. So they
- * can't be treated as visible
+ * Dead line pointers can have index pointers pointing to them. So
+ * they can't be treated as visible
*/
if (ItemIdIsDead(itemid))
{
@@ -1716,8 +1715,8 @@ heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
}
/*
- * The inserter definitely committed. But is it old
- * enough that everyone sees it as committed?
+ * The inserter definitely committed. But is it old enough
+ * that everyone sees it as committed?
*/
xmin = HeapTupleHeaderGetXmin(tuple.t_data);
if (!TransactionIdPrecedes(xmin, OldestXmin))
@@ -1743,7 +1742,7 @@ heap_page_is_all_visible(Buffer buf, TransactionId *visibility_cutoff_xid)
elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
break;
}
- } /* scan along page */
+ } /* scan along page */
return all_visible;
}
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index e1b280a065c..9b0cd8c2070 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -959,12 +959,13 @@ CheckValidResultRel(Relation resultRel, CmdType operation)
RelationGetRelationName(resultRel))));
break;
case RELKIND_VIEW:
+
/*
* Okay only if there's a suitable INSTEAD OF trigger. Messages
* here should match rewriteHandler.c's rewriteTargetView, except
* that we omit errdetail because we haven't got the information
- * handy (and given that we really shouldn't get here anyway,
- * it's not worth great exertion to get).
+ * handy (and given that we really shouldn't get here anyway, it's
+ * not worth great exertion to get).
*/
switch (operation)
{
@@ -1012,8 +1013,8 @@ CheckValidResultRel(Relation resultRel, CmdType operation)
if (fdwroutine->ExecForeignInsert == NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot insert into foreign table \"%s\"",
- RelationGetRelationName(resultRel))));
+ errmsg("cannot insert into foreign table \"%s\"",
+ RelationGetRelationName(resultRel))));
break;
case CMD_UPDATE:
if (fdwroutine->ExecForeignUpdate == NULL)
@@ -1026,8 +1027,8 @@ CheckValidResultRel(Relation resultRel, CmdType operation)
if (fdwroutine->ExecForeignDelete == NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot delete from foreign table \"%s\"",
- RelationGetRelationName(resultRel))));
+ errmsg("cannot delete from foreign table \"%s\"",
+ RelationGetRelationName(resultRel))));
break;
default:
elog(ERROR, "unrecognized CmdType: %d", (int) operation);
@@ -1391,7 +1392,8 @@ ExecEndPlan(PlanState *planstate, EState *estate)
}
/*
- * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping locks
+ * close any relations selected FOR [KEY] UPDATE/SHARE, again keeping
+ * locks
*/
foreach(l, estate->es_rowMarks)
{
@@ -1546,9 +1548,9 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
qual = resultRelInfo->ri_ConstraintExprs[i];
/*
- * NOTE: SQL specifies that a NULL result from a constraint
- * expression is not to be treated as a failure. Therefore, tell
- * ExecQual to return TRUE for NULL.
+ * NOTE: SQL specifies that a NULL result from a constraint expression
+ * is not to be treated as a failure. Therefore, tell ExecQual to
+ * return TRUE for NULL.
*/
if (!ExecQual(qual, econtext, true))
return check[i].ccname;
@@ -1901,13 +1903,13 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
/*
* If tuple was inserted by our own transaction, we have to check
* cmin against es_output_cid: cmin >= current CID means our
- * command cannot see the tuple, so we should ignore it.
- * Otherwise heap_lock_tuple() will throw an error, and so would
- * any later attempt to update or delete the tuple. (We need not
- * check cmax because HeapTupleSatisfiesDirty will consider a
- * tuple deleted by our transaction dead, regardless of cmax.)
- * Wee just checked that priorXmax == xmin, so we can test that
- * variable instead of doing HeapTupleHeaderGetXmin again.
+ * command cannot see the tuple, so we should ignore it. Otherwise
+ * heap_lock_tuple() will throw an error, and so would any later
+ * attempt to update or delete the tuple. (We need not check cmax
+ * because HeapTupleSatisfiesDirty will consider a tuple deleted
+ * by our transaction dead, regardless of cmax.) Wee just checked
+ * that priorXmax == xmin, so we can test that variable instead of
+ * doing HeapTupleHeaderGetXmin again.
*/
if (TransactionIdIsCurrentTransactionId(priorXmax) &&
HeapTupleHeaderGetCmin(tuple.t_data) >= estate->es_output_cid)
@@ -1921,7 +1923,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
*/
test = heap_lock_tuple(relation, &tuple,
estate->es_output_cid,
- lockmode, false /* wait */,
+ lockmode, false /* wait */ ,
false, &buffer, &hufd);
/* We now have two pins on the buffer, get rid of one */
ReleaseBuffer(buffer);
@@ -1929,6 +1931,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode,
switch (test)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 494208a0320..138818313b7 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -4278,7 +4278,7 @@ ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext,
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("WHERE CURRENT OF is not supported for this table type")));
+ errmsg("WHERE CURRENT OF is not supported for this table type")));
return 0; /* keep compiler quiet */
}
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index dbb4805ae2c..12e1b8ef599 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -1682,7 +1682,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
rettype,
-1,
get_typcollation(rettype),
- COERCE_IMPLICIT_CAST);
+ COERCE_IMPLICIT_CAST);
/* Relabel is dangerous if sort/group or setop column */
if (tle->ressortgroupref != 0 || parse->setOperations)
*modifyTargetList = true;
@@ -1786,7 +1786,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
atttype,
-1,
get_typcollation(atttype),
- COERCE_IMPLICIT_CAST);
+ COERCE_IMPLICIT_CAST);
/* Relabel is dangerous if sort/group or setop column */
if (tle->ressortgroupref != 0 || parse->setOperations)
*modifyTargetList = true;
diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c
index ae2d26b48b4..5b5c705a96d 100644
--- a/src/backend/executor/nodeLockRows.c
+++ b/src/backend/executor/nodeLockRows.c
@@ -127,7 +127,7 @@ lnext:
break;
default:
elog(ERROR, "unsupported rowmark type");
- lockmode = LockTupleNoKeyExclusive; /* keep compiler quiet */
+ lockmode = LockTupleNoKeyExclusive; /* keep compiler quiet */
break;
}
@@ -139,6 +139,7 @@ lnext:
switch (test)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index a6f247e1bc3..e934c7b9ab9 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -392,18 +392,19 @@ ldelete:;
result = heap_delete(resultRelationDesc, tupleid,
estate->es_output_cid,
estate->es_crosscheck_snapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd);
switch (result)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
* transaction. The former case is possible in a join DELETE
- * where multiple tuples join to the same target tuple.
- * This is somewhat questionable, but Postgres has always
- * allowed it: we just ignore additional deletion attempts.
+ * where multiple tuples join to the same target tuple. This
+ * is somewhat questionable, but Postgres has always allowed
+ * it: we just ignore additional deletion attempts.
*
* The latter case arises if the tuple is modified by a
* command in a BEFORE trigger, or perhaps by a command in a
@@ -412,14 +413,14 @@ ldelete:;
* proceed. We don't want to discard the original DELETE
* while keeping the triggered actions based on its deletion;
* and it would be no better to allow the original DELETE
- * while discarding updates that it triggered. The row update
+ * while discarding updates that it triggered. The row update
* carries some information that might be important according
* to business rules; so throwing an error is the only safe
* course.
*
- * If a trigger actually intends this type of interaction,
- * it can re-execute the DELETE and then return NULL to
- * cancel the outer delete.
+ * If a trigger actually intends this type of interaction, it
+ * can re-execute the DELETE and then return NULL to cancel
+ * the outer delete.
*/
if (hufd.cmax != estate->es_output_cid)
ereport(ERROR,
@@ -646,7 +647,7 @@ ExecUpdate(ItemPointer tupleid,
}
else
{
- LockTupleMode lockmode;
+ LockTupleMode lockmode;
/*
* Check the constraints of the tuple
@@ -673,19 +674,20 @@ lreplace:;
result = heap_update(resultRelationDesc, tupleid, tuple,
estate->es_output_cid,
estate->es_crosscheck_snapshot,
- true /* wait for commit */,
+ true /* wait for commit */ ,
&hufd, &lockmode);
switch (result)
{
case HeapTupleSelfUpdated:
+
/*
* The target tuple was already updated or deleted by the
* current command, or by a later command in the current
* transaction. The former case is possible in a join UPDATE
- * where multiple tuples join to the same target tuple.
- * This is pretty questionable, but Postgres has always
- * allowed it: we just execute the first update action and
- * ignore additional update attempts.
+ * where multiple tuples join to the same target tuple. This
+ * is pretty questionable, but Postgres has always allowed it:
+ * we just execute the first update action and ignore
+ * additional update attempts.
*
* The latter case arises if the tuple is modified by a
* command in a BEFORE trigger, or perhaps by a command in a
@@ -697,9 +699,9 @@ lreplace:;
* previous ones. So throwing an error is the only safe
* course.
*
- * If a trigger actually intends this type of interaction,
- * it can re-execute the UPDATE (assuming it can figure out
- * how) and then return NULL to cancel the outer update.
+ * If a trigger actually intends this type of interaction, it
+ * can re-execute the UPDATE (assuming it can figure out how)
+ * and then return NULL to cancel the outer update.
*/
if (hufd.cmax != estate->es_output_cid)
ereport(ERROR,
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index c4edec0750b..366e784bb0d 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -132,7 +132,7 @@ InitScanRelation(SeqScanState *node, EState *estate, int eflags)
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
- ((SeqScan *) node->ps.plan)->scanrelid,
+ ((SeqScan *) node->ps.plan)->scanrelid,
eflags);
/* initialize a heapscan */
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index ca0d05d2cc0..2f9a94d01e5 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -1570,7 +1570,7 @@ SPI_result_code_string(int code)
* CachedPlanSources.
*
* This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself). It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
* spi.sgml because we'd just as soon not have too many places using this.
*/
List *
@@ -1586,7 +1586,7 @@ SPI_plan_get_plan_sources(SPIPlanPtr plan)
* return NULL. Caller is responsible for doing ReleaseCachedPlan().
*
* This is exported so that pl/pgsql can use it (this beats letting pl/pgsql
- * look directly into the SPIPlan for itself). It's not documented in
+ * look directly into the SPIPlan for itself). It's not documented in
* spi.sgml because we'd just as soon not have too many places using this.
*/
CachedPlan *
@@ -1971,7 +1971,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
stmt_list = pg_analyze_and_rewrite_params(parsetree,
src,
plan->parserSetup,
- plan->parserSetupArg);
+ plan->parserSetupArg);
}
else
{
@@ -1990,7 +1990,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
plan->parserSetup,
plan->parserSetupArg,
plan->cursor_options,
- false); /* not fixed result */
+ false); /* not fixed result */
}
/*
diff --git a/src/backend/lib/binaryheap.c b/src/backend/lib/binaryheap.c
index 2c6f85ca536..4b4fc945c32 100644
--- a/src/backend/lib/binaryheap.c
+++ b/src/backend/lib/binaryheap.c
@@ -35,7 +35,7 @@ binaryheap_allocate(int capacity, binaryheap_comparator compare, void *arg)
int sz;
binaryheap *heap;
- sz = offsetof(binaryheap, bh_nodes) + sizeof(Datum) * capacity;
+ sz = offsetof(binaryheap, bh_nodes) +sizeof(Datum) * capacity;
heap = palloc(sz);
heap->bh_size = 0;
heap->bh_space = capacity;
@@ -203,7 +203,7 @@ binaryheap_replace_first(binaryheap *heap, Datum d)
static inline void
swap_nodes(binaryheap *heap, int a, int b)
{
- Datum swap;
+ Datum swap;
swap = heap->bh_nodes[a];
heap->bh_nodes[a] = heap->bh_nodes[b];
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 3a041d9d58a..415b614e48b 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -827,7 +827,7 @@ pg_krb5_recvauth(Port *port)
return ret;
retval = krb5_recvauth(pg_krb5_context, &auth_context,
- (krb5_pointer) &port->sock, pg_krb_srvnam,
+ (krb5_pointer) & port->sock, pg_krb_srvnam,
pg_krb5_server, 0, pg_krb5_keytab, &ticket);
if (retval)
{
@@ -2057,7 +2057,7 @@ InitializeLDAPConnection(Port *port, LDAP **ldap)
{
ldap_unbind(*ldap);
ereport(LOG,
- (errmsg("could not set LDAP protocol version: %s", ldap_err2string(r))));
+ (errmsg("could not set LDAP protocol version: %s", ldap_err2string(r))));
return STATUS_ERROR;
}
@@ -2110,7 +2110,7 @@ InitializeLDAPConnection(Port *port, LDAP **ldap)
{
ldap_unbind(*ldap);
ereport(LOG,
- (errmsg("could not start LDAP TLS session: %s", ldap_err2string(r))));
+ (errmsg("could not start LDAP TLS session: %s", ldap_err2string(r))));
return STATUS_ERROR;
}
}
@@ -2201,7 +2201,7 @@ CheckLDAPAuth(Port *port)
{
ereport(LOG,
(errmsg("could not perform initial LDAP bind for ldapbinddn \"%s\" on server \"%s\": %s",
- port->hba->ldapbinddn, port->hba->ldapserver, ldap_err2string(r))));
+ port->hba->ldapbinddn, port->hba->ldapserver, ldap_err2string(r))));
return STATUS_ERROR;
}
@@ -2226,7 +2226,7 @@ CheckLDAPAuth(Port *port)
{
ereport(LOG,
(errmsg("could not search LDAP for filter \"%s\" on server \"%s\": %s",
- filter, port->hba->ldapserver, ldap_err2string(r))));
+ filter, port->hba->ldapserver, ldap_err2string(r))));
pfree(filter);
return STATUS_ERROR;
}
@@ -2236,16 +2236,16 @@ CheckLDAPAuth(Port *port)
{
if (count == 0)
ereport(LOG,
- (errmsg("LDAP user \"%s\" does not exist", port->user_name),
- errdetail("LDAP search for filter \"%s\" on server \"%s\" returned no entries.",
- filter, port->hba->ldapserver)));
+ (errmsg("LDAP user \"%s\" does not exist", port->user_name),
+ errdetail("LDAP search for filter \"%s\" on server \"%s\" returned no entries.",
+ filter, port->hba->ldapserver)));
else
ereport(LOG,
- (errmsg("LDAP user \"%s\" is not unique", port->user_name),
- errdetail_plural("LDAP search for filter \"%s\" on server \"%s\" returned %d entry.",
- "LDAP search for filter \"%s\" on server \"%s\" returned %d entries.",
- count,
- filter, port->hba->ldapserver, count)));
+ (errmsg("LDAP user \"%s\" is not unique", port->user_name),
+ errdetail_plural("LDAP search for filter \"%s\" on server \"%s\" returned %d entry.",
+ "LDAP search for filter \"%s\" on server \"%s\" returned %d entries.",
+ count,
+ filter, port->hba->ldapserver, count)));
pfree(filter);
ldap_msgfree(search_message);
@@ -2317,8 +2317,8 @@ CheckLDAPAuth(Port *port)
if (r != LDAP_SUCCESS)
{
ereport(LOG,
- (errmsg("LDAP login failed for user \"%s\" on server \"%s\": %s",
- fulluser, port->hba->ldapserver, ldap_err2string(r))));
+ (errmsg("LDAP login failed for user \"%s\" on server \"%s\": %s",
+ fulluser, port->hba->ldapserver, ldap_err2string(r))));
pfree(fulluser);
return STATUS_ERROR;
}
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index 5b60d1c3c53..e946a4659f2 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -397,12 +397,12 @@ tokenize_file(const char *filename, FILE *file,
while (!feof(file) && !ferror(file))
{
- char rawline[MAX_LINE];
- char *lineptr;
+ char rawline[MAX_LINE];
+ char *lineptr;
if (!fgets(rawline, sizeof(rawline), file))
break;
- if (strlen(rawline) == MAX_LINE-1)
+ if (strlen(rawline) == MAX_LINE - 1)
/* Line too long! */
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
@@ -411,9 +411,9 @@ tokenize_file(const char *filename, FILE *file,
line_number, filename)));
/* Strip trailing linebreak from rawline */
- while (rawline[strlen(rawline)-1] == '\n' ||
- rawline[strlen(rawline)-1] == '\r')
- rawline[strlen(rawline)-1] = '\0';
+ while (rawline[strlen(rawline) - 1] == '\n' ||
+ rawline[strlen(rawline) - 1] == '\r')
+ rawline[strlen(rawline) - 1] = '\0';
lineptr = rawline;
while (strlen(lineptr) > 0)
@@ -1476,7 +1476,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
{
#ifdef LDAP_API_FEATURE_X_OPENLDAP
LDAPURLDesc *urldata;
- int rc;
+ int rc;
#endif
REQUIRE_AUTH_OPTION(uaLDAP, "ldapurl", "ldap");
@@ -1485,8 +1485,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
if (rc != LDAP_SUCCESS)
{
ereport(LOG,
- (errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("could not parse LDAP URL \"%s\": %s", val, ldap_err2string(rc))));
+ (errcode(ERRCODE_CONFIG_FILE_ERROR),
+ errmsg("could not parse LDAP URL \"%s\": %s", val, ldap_err2string(rc))));
return false;
}
@@ -1494,7 +1494,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("unsupported LDAP URL scheme: %s", urldata->lud_scheme)));
+ errmsg("unsupported LDAP URL scheme: %s", urldata->lud_scheme)));
ldap_free_urldesc(urldata);
return false;
}
@@ -1504,7 +1504,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
hbaline->ldapbasedn = pstrdup(urldata->lud_dn);
if (urldata->lud_attrs)
- hbaline->ldapsearchattribute = pstrdup(urldata->lud_attrs[0]); /* only use first one */
+ hbaline->ldapsearchattribute = pstrdup(urldata->lud_attrs[0]); /* only use first one */
hbaline->ldapscope = urldata->lud_scope;
if (urldata->lud_filter)
{
@@ -1515,11 +1515,11 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
return false;
}
ldap_free_urldesc(urldata);
-#else /* not OpenLDAP */
+#else /* not OpenLDAP */
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("LDAP URLs not supported on this platform")));
-#endif /* not OpenLDAP */
+#endif /* not OpenLDAP */
}
else if (strcmp(name, "ldaptls") == 0)
{
@@ -2023,7 +2023,7 @@ check_ident_usermap(IdentLine *identLine, const char *usermap_name,
ereport(LOG,
(errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
errmsg("regular expression \"%s\" has no subexpressions as requested by backreference in \"%s\"",
- identLine->ident_user + 1, identLine->pg_role)));
+ identLine->ident_user + 1, identLine->pg_role)));
*error_p = true;
return;
}
@@ -2165,7 +2165,7 @@ load_ident(void)
MemoryContext linecxt;
MemoryContext oldcxt;
MemoryContext ident_context;
- IdentLine *newline;
+ IdentLine *newline;
file = AllocateFile(IdentFileName, "r");
if (file == NULL)
@@ -2183,10 +2183,10 @@ load_ident(void)
/* Now parse all the lines */
ident_context = AllocSetContextCreate(TopMemoryContext,
- "ident parser context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "ident parser context",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
oldcxt = MemoryContextSwitchTo(ident_context);
forboth(line_cell, ident_lines, num_cell, ident_line_nums)
{
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 61dde51f55c..76aac975528 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -808,7 +808,7 @@ pq_set_nonblocking(bool nonblocking)
{
if (!pg_set_noblock(MyProcPort->sock))
ereport(COMMERROR,
- (errmsg("could not set socket to nonblocking mode: %m")));
+ (errmsg("could not set socket to nonblocking mode: %m")));
}
else
{
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index a77e05da90f..8ea6c1f3874 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -170,7 +170,7 @@ main(int argc, char *argv[])
#ifdef EXEC_BACKEND
if (argc > 1 && strncmp(argv[1], "--fork", 6) == 0)
- SubPostmasterMain(argc, argv); /* does not return */
+ SubPostmasterMain(argc, argv); /* does not return */
#endif
#ifdef WIN32
@@ -191,10 +191,10 @@ main(int argc, char *argv[])
else if (argc > 1 && strcmp(argv[1], "--single") == 0)
PostgresMain(argc, argv,
NULL, /* no dbname */
- get_current_username(progname)); /* does not return */
+ get_current_username(progname)); /* does not return */
else
- PostmasterMain(argc, argv); /* does not return */
- abort(); /* should not get here */
+ PostmasterMain(argc, argv); /* does not return */
+ abort(); /* should not get here */
}
diff --git a/src/backend/optimizer/geqo/geqo_cx.c b/src/backend/optimizer/geqo/geqo_cx.c
index afae948a61f..9f6d5e478aa 100644
--- a/src/backend/optimizer/geqo/geqo_cx.c
+++ b/src/backend/optimizer/geqo/geqo_cx.c
@@ -47,7 +47,6 @@ int
cx(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring,
int num_gene, City *city_table)
{
-
int i,
start_pos,
curr_pos;
diff --git a/src/backend/optimizer/geqo/geqo_px.c b/src/backend/optimizer/geqo/geqo_px.c
index 808ff6a14c9..99289bc11f5 100644
--- a/src/backend/optimizer/geqo/geqo_px.c
+++ b/src/backend/optimizer/geqo/geqo_px.c
@@ -46,7 +46,6 @@ void
px(PlannerInfo *root, Gene *tour1, Gene *tour2, Gene *offspring, int num_gene,
City *city_table)
{
-
int num_positions;
int i,
pos,
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 105718ff371..742177f4570 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -721,7 +721,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
*/
if (childrel->cheapest_total_path->param_info == NULL)
subpaths = accumulate_append_subpath(subpaths,
- childrel->cheapest_total_path);
+ childrel->cheapest_total_path);
else
subpaths_valid = false;
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 8d2490208d4..3507f18007e 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -69,7 +69,7 @@
#include "postgres.h"
#ifdef _MSC_VER
-#include <float.h> /* for _isnan */
+#include <float.h> /* for _isnan */
#endif
#include <math.h>
@@ -3745,7 +3745,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel)
* The subquery could be an expansion of a view that's had columns
* added to it since the current query was parsed, so that there are
* non-junk tlist columns in it that don't correspond to any column
- * visible at our query level. Ignore such columns.
+ * visible at our query level. Ignore such columns.
*/
if (te->resno < rel->min_attr || te->resno > rel->max_attr)
continue;
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index cbb4f5cd956..711b161c0d1 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -294,7 +294,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
/*
* We add ec2's items to ec1, then set ec2's ec_merged link to point
- * to ec1 and remove ec2 from the eq_classes list. We cannot simply
+ * to ec1 and remove ec2 from the eq_classes list. We cannot simply
* delete ec2 because that could leave dangling pointers in existing
* PathKeys. We leave it behind with a link so that the merged EC can
* be found.
@@ -2083,9 +2083,9 @@ generate_implied_equalities_for_column(PlannerInfo *root,
continue;
/*
- * Scan members, looking for a match to the target column. Note
- * that child EC members are considered, but only when they belong to
- * the target relation. (Unlike regular members, the same expression
+ * Scan members, looking for a match to the target column. Note that
+ * child EC members are considered, but only when they belong to the
+ * target relation. (Unlike regular members, the same expression
* could be a child member of more than one EC. Therefore, it's
* potentially order-dependent which EC a child relation's target
* column gets matched to. This is annoying but it only happens in
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index d74603983bc..65eb344cde4 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -250,7 +250,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
* If there are any rels that have LATERAL references to this one, we
* cannot use join quals referencing them as index quals for this one,
* since such rels would have to be on the inside not the outside of a
- * nestloop join relative to this one. Create a Relids set listing all
+ * nestloop join relative to this one. Create a Relids set listing all
* such rels, for use in checks of potential join clauses.
*/
lateral_referencers = NULL;
@@ -482,7 +482,7 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
*
* For simplicity in selecting relevant clauses, we represent each set of
* outer rels as a maximum set of clause_relids --- that is, the indexed
- * relation itself is also included in the relids set. considered_relids
+ * relation itself is also included in the relids set. considered_relids
* lists all relids sets we've already tried.
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
@@ -557,7 +557,7 @@ consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel,
*/
foreach(lc2, *considered_relids)
{
- Relids oldrelids = (Relids) lfirst(lc2);
+ Relids oldrelids = (Relids) lfirst(lc2);
/*
* If either is a subset of the other, no new set is possible.
@@ -571,7 +571,7 @@ consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel,
/*
* If this clause was derived from an equivalence class, the
* clause list may contain other clauses derived from the same
- * eclass. We should not consider that combining this clause with
+ * eclass. We should not consider that combining this clause with
* one of those clauses generates a usefully different
* parameterization; so skip if any clause derived from the same
* eclass would already have been included when using oldrelids.
@@ -654,9 +654,9 @@ get_join_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * Add applicable eclass join clauses. The clauses generated for each
+ * Add applicable eclass join clauses. The clauses generated for each
* column are redundant (cf generate_implied_equalities_for_column),
- * so we need at most one. This is the only exception to the general
+ * so we need at most one. This is the only exception to the general
* rule of using all available index clauses.
*/
foreach(lc, eclauseset->indexclauses[indexcol])
@@ -2630,8 +2630,8 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
return;
/*
- * Construct a list of clauses that we can assume true for the purpose
- * of proving the index(es) usable. Restriction clauses for the rel are
+ * Construct a list of clauses that we can assume true for the purpose of
+ * proving the index(es) usable. Restriction clauses for the rel are
* always usable, and so are any join clauses that are "movable to" this
* rel. Also, we can consider any EC-derivable join clauses (which must
* be "movable to" this rel, by definition).
@@ -2653,8 +2653,8 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
/*
* Add on any equivalence-derivable join clauses. Computing the correct
* relid sets for generate_join_implied_equalities is slightly tricky
- * because the rel could be a child rel rather than a true baserel, and
- * in that case we must remove its parent's relid from all_baserels.
+ * because the rel could be a child rel rather than a true baserel, and in
+ * that case we must remove its parent's relid from all_baserels.
*/
if (rel->reloptkind == RELOPT_OTHER_MEMBER_REL)
{
@@ -2671,8 +2671,8 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
clauselist =
list_concat(clauselist,
generate_join_implied_equalities(root,
- bms_union(rel->relids,
- otherrels),
+ bms_union(rel->relids,
+ otherrels),
otherrels,
rel));
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index e1d6b3e2234..d6050a616c7 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -154,7 +154,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* However, when a LATERAL subquery is involved, we have to be a bit
* laxer, because there will simply not be any paths for the joinrel that
* aren't parameterized by whatever the subquery is parameterized by,
- * unless its parameterization is resolved within the joinrel. Hence, add
+ * unless its parameterization is resolved within the joinrel. Hence, add
* to param_source_rels anything that is laterally referenced in either
* input and is not in the join already.
*/
@@ -507,7 +507,7 @@ sort_inner_and_outer(PlannerInfo *root,
* sort.
*
* This function intentionally does not consider parameterized input
- * paths, except when the cheapest-total is parameterized. If we did so,
+ * paths, except when the cheapest-total is parameterized. If we did so,
* we'd have a combinatorial explosion of mergejoin paths of dubious
* value. This interacts with decisions elsewhere that also discriminate
* against mergejoins with parameterized inputs; see comments in
diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c
index 6f64695e990..a7db69c85bf 100644
--- a/src/backend/optimizer/plan/analyzejoins.c
+++ b/src/backend/optimizer/plan/analyzejoins.c
@@ -355,7 +355,7 @@ remove_rel_from_query(PlannerInfo *root, int relid, Relids joinrelids)
* Likewise remove references from LateralJoinInfo data structures.
*
* If we are deleting a LATERAL subquery, we can forget its
- * LateralJoinInfo altogether. Otherwise, make sure the target is not
+ * LateralJoinInfo altogether. Otherwise, make sure the target is not
* included in any lateral_lhs set. (It probably can't be, since that
* should have precluded deciding to remove it; but let's cope anyway.)
*/
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 84ca67473bb..839ed9dde40 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -315,12 +315,12 @@ extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex)
newvars = NIL;
foreach(lc, vars)
{
- Node *node = (Node *) lfirst(lc);
+ Node *node = (Node *) lfirst(lc);
node = copyObject(node);
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
/* Adjustment is easy since it's just one node */
var->varlevelsup = 0;
@@ -328,7 +328,7 @@ extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex)
else if (IsA(node, PlaceHolderVar))
{
PlaceHolderVar *phv = (PlaceHolderVar *) node;
- int levelsup = phv->phlevelsup;
+ int levelsup = phv->phlevelsup;
/* Have to work harder to adjust the contained expression too */
if (levelsup != 0)
@@ -389,7 +389,7 @@ create_lateral_join_info(PlannerInfo *root)
{
RelOptInfo *brel = root->simple_rel_array[rti];
Relids lateral_relids;
- ListCell *lc;
+ ListCell *lc;
/* there may be empty slots corresponding to non-baserel RTEs */
if (brel == NULL)
@@ -406,11 +406,11 @@ create_lateral_join_info(PlannerInfo *root)
/* consider each laterally-referenced Var or PHV */
foreach(lc, brel->lateral_vars)
{
- Node *node = (Node *) lfirst(lc);
+ Node *node = (Node *) lfirst(lc);
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
add_lateral_info(root, rti, bms_make_singleton(var->varno));
lateral_relids = bms_add_member(lateral_relids,
@@ -439,7 +439,7 @@ create_lateral_join_info(PlannerInfo *root)
* If it's an appendrel parent, copy its lateral_relids to each child
* rel. We intentionally give each child rel the same minimum
* parameterization, even though it's quite possible that some don't
- * reference all the lateral rels. This is because any append path
+ * reference all the lateral rels. This is because any append path
* for the parent will have to have the same parameterization for
* every child anyway, and there's no value in forcing extra
* reparameterize_path() calls.
@@ -466,7 +466,7 @@ create_lateral_join_info(PlannerInfo *root)
* add_lateral_info
* Add a LateralJoinInfo to root->lateral_info_list, if needed
*
- * We suppress redundant list entries. The passed lhs set must be freshly
+ * We suppress redundant list entries. The passed lhs set must be freshly
* made; we free it if not used in a new list entry.
*/
static void
@@ -861,11 +861,11 @@ make_outerjoininfo(PlannerInfo *root,
Assert(jointype != JOIN_RIGHT);
/*
- * Presently the executor cannot support FOR [KEY] UPDATE/SHARE marking of rels
- * appearing on the nullable side of an outer join. (It's somewhat unclear
- * what that would mean, anyway: what should we mark when a result row is
- * generated from no element of the nullable relation?) So, complain if
- * any nullable rel is FOR [KEY] UPDATE/SHARE.
+ * Presently the executor cannot support FOR [KEY] UPDATE/SHARE marking of
+ * rels appearing on the nullable side of an outer join. (It's somewhat
+ * unclear what that would mean, anyway: what should we mark when a result
+ * row is generated from no element of the nullable relation?) So,
+ * complain if any nullable rel is FOR [KEY] UPDATE/SHARE.
*
* You might be wondering why this test isn't made far upstream in the
* parser. It's because the parser hasn't got enough info --- consider
@@ -1721,7 +1721,7 @@ distribute_restrictinfo_to_rels(PlannerInfo *root,
* that provides all its variables.
*
* "nullable_relids" is the set of relids used in the expressions that are
- * potentially nullable below the expressions. (This has to be supplied by
+ * potentially nullable below the expressions. (This has to be supplied by
* caller because this function is used after deconstruct_jointree, so we
* don't have knowledge of where the clause items came from.)
*
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 5bbfd2377eb..090ae0b494c 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -260,8 +260,8 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist,
* We have to replace Aggrefs with Params in equivalence classes too, else
* ORDER BY or DISTINCT on an optimized aggregate will fail. We don't
* need to process child eclass members though, since they aren't of
- * interest anymore --- and replace_aggs_with_params_mutator isn't able
- * to handle Aggrefs containing translated child Vars, anyway.
+ * interest anymore --- and replace_aggs_with_params_mutator isn't able to
+ * handle Aggrefs containing translated child Vars, anyway.
*
* Note: at some point it might become necessary to mutate other data
* structures too, such as the query's sortClause or distinctClause. Right
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index df274fe7830..d80c26420fa 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -52,9 +52,9 @@ planner_hook_type planner_hook = NULL;
#define EXPRKIND_QUAL 0
#define EXPRKIND_TARGET 1
#define EXPRKIND_RTFUNC 2
-#define EXPRKIND_RTFUNC_LATERAL 3
+#define EXPRKIND_RTFUNC_LATERAL 3
#define EXPRKIND_VALUES 4
-#define EXPRKIND_VALUES_LATERAL 5
+#define EXPRKIND_VALUES_LATERAL 5
#define EXPRKIND_LIMIT 6
#define EXPRKIND_APPINFO 7
#define EXPRKIND_PHV 8
@@ -571,9 +571,9 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
returningLists = NIL;
/*
- * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will
- * have dealt with fetching non-locked marked rows, else we need
- * to have ModifyTable do that.
+ * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node
+ * will have dealt with fetching non-locked marked rows, else we
+ * need to have ModifyTable do that.
*/
if (parse->rowMarks)
rowMarks = NIL;
@@ -964,8 +964,8 @@ inheritance_planner(PlannerInfo *root)
root->simple_rel_array = save_rel_array;
/*
- * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will have
- * dealt with fetching non-locked marked rows, else we need to have
+ * If there was a FOR [KEY] UPDATE/SHARE clause, the LockRows node will
+ * have dealt with fetching non-locked marked rows, else we need to have
* ModifyTable do that.
*/
if (parse->rowMarks)
@@ -1060,7 +1060,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
*/
current_pathkeys = make_pathkeys_for_sortclauses(root,
set_sortclauses,
- result_plan->targetlist);
+ result_plan->targetlist);
/*
* We should not need to call preprocess_targetlist, since we must be
@@ -1075,8 +1075,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
tlist);
/*
- * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have checked
- * already, but let's make sure).
+ * Can't handle FOR [KEY] UPDATE/SHARE here (parser should have
+ * checked already, but let's make sure).
*/
if (parse->rowMarks)
ereport(ERROR,
@@ -1485,7 +1485,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* it's not worth trying to avoid it. In particular, think not to
* skip adding the Result if the initial window_tlist matches the
* top-level plan node's output, because we might change the tlist
- * inside the following loop.) Note that on second and subsequent
+ * inside the following loop.) Note that on second and subsequent
* passes through the following loop, the top-level node will be a
* WindowAgg which we know can project; so we only need to check
* once.
@@ -1500,14 +1500,14 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* The "base" targetlist for all steps of the windowing process is
- * a flat tlist of all Vars and Aggs needed in the result. (In
+ * a flat tlist of all Vars and Aggs needed in the result. (In
* some cases we wouldn't need to propagate all of these all the
* way to the top, since they might only be needed as inputs to
* WindowFuncs. It's probably not worth trying to optimize that
* though.) We also add window partitioning and sorting
* expressions to the base tlist, to ensure they're computed only
* once at the bottom of the stack (that's critical for volatile
- * functions). As we climb up the stack, we'll add outputs for
+ * functions). As we climb up the stack, we'll add outputs for
* the WindowFuncs computed at each level.
*/
window_tlist = make_windowInputTargetList(root,
@@ -1516,7 +1516,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
/*
* The copyObject steps here are needed to ensure that each plan
- * node has a separately modifiable tlist. (XXX wouldn't a
+ * node has a separately modifiable tlist. (XXX wouldn't a
* shallow list copy do for that?)
*/
result_plan->targetlist = (List *) copyObject(window_tlist);
@@ -1543,7 +1543,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* plan's tlist for any partitioning or ordering columns that
* aren't plain Vars. (In theory, make_windowInputTargetList
* should have provided all such columns, but let's not assume
- * that here.) Furthermore, this way we can use existing
+ * that here.) Furthermore, this way we can use existing
* infrastructure to identify which input columns are the
* interesting ones.
*/
@@ -1741,9 +1741,9 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
}
/*
- * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node. (Note: we
- * intentionally test parse->rowMarks not root->rowMarks here. If there
- * are only non-locking rowmarks, they should be handled by the
+ * If there is a FOR [KEY] UPDATE/SHARE clause, add the LockRows node.
+ * (Note: we intentionally test parse->rowMarks not root->rowMarks here.
+ * If there are only non-locking rowmarks, they should be handled by the
* ModifyTable node instead.)
*/
if (parse->rowMarks)
@@ -1927,9 +1927,9 @@ preprocess_rowmarks(PlannerInfo *root)
if (parse->rowMarks)
{
/*
- * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside grouping,
- * since grouping renders a reference to individual tuple CTIDs
- * invalid. This is also checked at parse time, but that's
+ * We've got trouble if FOR [KEY] UPDATE/SHARE appears inside
+ * grouping, since grouping renders a reference to individual tuple
+ * CTIDs invalid. This is also checked at parse time, but that's
* insufficient because of rule substitution, query pullup, etc.
*/
CheckSelectLocking(parse);
@@ -1937,7 +1937,8 @@ preprocess_rowmarks(PlannerInfo *root)
else
{
/*
- * We only need rowmarks for UPDATE, DELETE, or FOR [KEY] UPDATE/SHARE.
+ * We only need rowmarks for UPDATE, DELETE, or FOR [KEY]
+ * UPDATE/SHARE.
*/
if (parse->commandType != CMD_UPDATE &&
parse->commandType != CMD_DELETE)
@@ -2238,7 +2239,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction,
*
* If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding
* a Limit node. This is worth checking for because "OFFSET 0" is a common
- * locution for an optimization fence. (Because other places in the planner
+ * locution for an optimization fence. (Because other places in the planner
* merely check whether parse->limitOffset isn't NULL, it will still work as
* an optimization fence --- we're just suppressing unnecessary run-time
* overhead.)
@@ -2273,7 +2274,7 @@ limit_needed(Query *parse)
/* Treat NULL as no offset; the executor would too */
if (!((Const *) node)->constisnull)
{
- int64 offset = DatumGetInt64(((Const *) node)->constvalue);
+ int64 offset = DatumGetInt64(((Const *) node)->constvalue);
/* Executor would treat less-than-zero same as zero */
if (offset > 0)
@@ -3107,7 +3108,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
*
* When grouping_planner inserts one or more WindowAgg nodes into the plan,
* this function computes the initial target list to be computed by the node
- * just below the first WindowAgg. This list must contain all values needed
+ * just below the first WindowAgg. This list must contain all values needed
* to evaluate the window functions, compute the final target list, and
* perform any required final sort step. If multiple WindowAggs are needed,
* each intermediate one adds its window function results onto this tlist;
@@ -3115,7 +3116,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
*
* This function is much like make_subplanTargetList, though not quite enough
* like it to share code. As in that function, we flatten most expressions
- * into their component variables. But we do not want to flatten window
+ * into their component variables. But we do not want to flatten window
* PARTITION BY/ORDER BY clauses, since that might result in multiple
* evaluations of them, which would be bad (possibly even resulting in
* inconsistent answers, if they contain volatile functions). Also, we must
@@ -3472,7 +3473,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
rte = makeNode(RangeTblEntry);
rte->rtekind = RTE_RELATION;
rte->relid = tableOid;
- rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
+ rte->relkind = RELKIND_RELATION; /* Don't be too picky. */
rte->lateral = false;
rte->inh = false;
rte->inFromCl = true;
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index bbdd8dc2d24..52842931ec5 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -608,7 +608,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode)
*
* If this jointree node is within either side of an outer join, then
* lowest_outer_join references the lowest such JoinExpr node; otherwise
- * it is NULL. We use this to constrain the effects of LATERAL subqueries.
+ * it is NULL. We use this to constrain the effects of LATERAL subqueries.
*
* If this jointree node is within the nullable side of an outer join, then
* lowest_nulling_outer_join references the lowest such JoinExpr node;
@@ -702,11 +702,11 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
case JOIN_INNER:
j->larg = pull_up_subqueries_recurse(root, j->larg,
lowest_outer_join,
- lowest_nulling_outer_join,
+ lowest_nulling_outer_join,
NULL);
j->rarg = pull_up_subqueries_recurse(root, j->rarg,
lowest_outer_join,
- lowest_nulling_outer_join,
+ lowest_nulling_outer_join,
NULL);
break;
case JOIN_LEFT:
@@ -714,7 +714,7 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
case JOIN_ANTI:
j->larg = pull_up_subqueries_recurse(root, j->larg,
j,
- lowest_nulling_outer_join,
+ lowest_nulling_outer_join,
NULL);
j->rarg = pull_up_subqueries_recurse(root, j->rarg,
j,
@@ -738,7 +738,7 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode,
NULL);
j->rarg = pull_up_subqueries_recurse(root, j->rarg,
j,
- lowest_nulling_outer_join,
+ lowest_nulling_outer_join,
NULL);
break;
default:
@@ -1080,7 +1080,7 @@ pull_up_simple_union_all(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte)
/*
* Make a modifiable copy of the subquery's rtable, so we can adjust
- * upper-level Vars in it. There are no such Vars in the setOperations
+ * upper-level Vars in it. There are no such Vars in the setOperations
* tree proper, so fixing the rtable should be sufficient.
*/
rtable = copyObject(subquery->rtable);
@@ -1288,9 +1288,9 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte,
return false;
/*
- * Don't pull up if the RTE represents a security-barrier view; we couldn't
- * prevent information leakage once the RTE's Vars are scattered about in
- * the upper query.
+ * Don't pull up if the RTE represents a security-barrier view; we
+ * couldn't prevent information leakage once the RTE's Vars are scattered
+ * about in the upper query.
*/
if (rte->security_barrier)
return false;
@@ -1304,9 +1304,9 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte,
*/
if (rte->lateral && lowest_outer_join != NULL)
{
- Relids lvarnos = pull_varnos_of_level((Node *) subquery, 1);
- Relids jvarnos = get_relids_in_jointree((Node *) lowest_outer_join,
- true);
+ Relids lvarnos = pull_varnos_of_level((Node *) subquery, 1);
+ Relids jvarnos = get_relids_in_jointree((Node *) lowest_outer_join,
+ true);
if (!bms_is_subset(lvarnos, jvarnos))
return false;
@@ -1478,7 +1478,7 @@ replace_vars_in_jointree(Node *jtnode,
/*
* If the RangeTblRef refers to a LATERAL subquery (that isn't the
* same subquery we're pulling up), it might contain references to the
- * target subquery, which we must replace. We drive this from the
+ * target subquery, which we must replace. We drive this from the
* jointree scan, rather than a scan of the rtable, for a couple of
* reasons: we can avoid processing no-longer-referenced RTEs, and we
* can use the appropriate setting of need_phvs depending on whether
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 657a18b1be4..6d5b20406e6 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -3971,7 +3971,7 @@ evaluate_function(Oid funcid, Oid result_type, int32 result_typmod,
newexpr->funcresulttype = result_type;
newexpr->funcretset = false;
newexpr->funcvariadic = funcvariadic;
- newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
+ newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
newexpr->funccollid = result_collid; /* doesn't matter */
newexpr->inputcollid = input_collid;
newexpr->args = args;
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index f6ac06f4553..64b17051913 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -207,12 +207,12 @@ compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor,
*
* cheapest_total_path is normally the cheapest-total-cost unparameterized
* path; but if there are no unparameterized paths, we assign it to be the
- * best (cheapest least-parameterized) parameterized path. However, only
+ * best (cheapest least-parameterized) parameterized path. However, only
* unparameterized paths are considered candidates for cheapest_startup_path,
* so that will be NULL if there are no unparameterized paths.
*
* The cheapest_parameterized_paths list collects all parameterized paths
- * that have survived the add_path() tournament for this relation. (Since
+ * that have survived the add_path() tournament for this relation. (Since
* add_path ignores pathkeys and startup cost for a parameterized path,
* these will be paths that have best total cost or best row count for their
* parameterization.) cheapest_parameterized_paths always includes the
@@ -282,6 +282,7 @@ set_cheapest(RelOptInfo *parent_rel)
/* old path is less-parameterized, keep it */
break;
case BMS_DIFFERENT:
+
/*
* This means that neither path has the least possible
* parameterization for the rel. We'll sit on the old
@@ -328,8 +329,8 @@ set_cheapest(RelOptInfo *parent_rel)
parameterized_paths = lcons(cheapest_total_path, parameterized_paths);
/*
- * If there is no unparameterized path, use the best parameterized path
- * as cheapest_total_path (but not as cheapest_startup_path).
+ * If there is no unparameterized path, use the best parameterized path as
+ * cheapest_total_path (but not as cheapest_startup_path).
*/
if (cheapest_total_path == NULL)
cheapest_total_path = best_param_path;
@@ -501,7 +502,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
accept_new = false; /* old dominates new */
else if (compare_path_costs_fuzzily(new_path,
old_path,
- 1.0000000001,
+ 1.0000000001,
parent_rel->consider_startup) == COSTS_BETTER1)
remove_old = true; /* new dominates old */
else
@@ -1022,7 +1023,7 @@ create_result_path(List *quals)
pathnode->path.pathtype = T_Result;
pathnode->path.parent = NULL;
- pathnode->path.param_info = NULL; /* there are no other rels... */
+ pathnode->path.param_info = NULL; /* there are no other rels... */
pathnode->path.pathkeys = NIL;
pathnode->quals = quals;
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 8f8da0523c5..16ff23443c5 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -678,7 +678,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
else
{
/*
- * Process INSERT ... VALUES with a single VALUES sublist. We treat
+ * Process INSERT ... VALUES with a single VALUES sublist. We treat
* this case separately for efficiency. The sublist is just computed
* directly as the Query's targetlist, with no VALUES RTE. So it
* works just like a SELECT without any FROM.
@@ -1178,7 +1178,7 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
/*
* Ordinarily there can't be any current-level Vars in the expression
* lists, because the namespace was empty ... but if we're inside CREATE
- * RULE, then NEW/OLD references might appear. In that case we have to
+ * RULE, then NEW/OLD references might appear. In that case we have to
* mark the VALUES RTE as LATERAL.
*/
if (pstate->p_rtable != NIL &&
@@ -2158,7 +2158,7 @@ transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt)
/*
* A materialized view would either need to save parameters for use in
- * maintaining/loading the data or prohibit them entirely. The latter
+ * maintaining/loading the data or prohibit them entirely. The latter
* seems safer and more sane.
*/
if (query_contains_extern_params(query))
@@ -2167,10 +2167,10 @@ transformCreateTableAsStmt(ParseState *pstate, CreateTableAsStmt *stmt)
errmsg("materialized views may not be defined using bound parameters")));
/*
- * For now, we disallow unlogged materialized views, because it
- * seems like a bad idea for them to just go to empty after a crash.
- * (If we could mark them as unpopulated, that would be better, but
- * that requires catalog changes which crash recovery can't presently
+ * For now, we disallow unlogged materialized views, because it seems
+ * like a bad idea for them to just go to empty after a crash. (If we
+ * could mark them as unpopulated, that would be better, but that
+ * requires catalog changes which crash recovery can't presently
* handle.)
*/
if (stmt->into->rel->relpersistence == RELPERSISTENCE_UNLOGGED)
@@ -2211,23 +2211,23 @@ CheckSelectLocking(Query *qry)
if (qry->distinctClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with DISTINCT clause")));
+ errmsg("row-level locks are not allowed with DISTINCT clause")));
if (qry->groupClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with GROUP BY clause")));
+ errmsg("row-level locks are not allowed with GROUP BY clause")));
if (qry->havingQual != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with HAVING clause")));
+ errmsg("row-level locks are not allowed with HAVING clause")));
if (qry->hasAggs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with aggregate functions")));
+ errmsg("row-level locks are not allowed with aggregate functions")));
if (qry->hasWindowFuncs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row-level locks are not allowed with window functions")));
+ errmsg("row-level locks are not allowed with window functions")));
if (expression_returns_set((Node *) qry->targetList))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -2394,8 +2394,8 @@ applyLockingClause(Query *qry, Index rtindex,
{
/*
* If the same RTE is specified for more than one locking strength,
- * treat is as the strongest. (Reasonable, since you can't take both a
- * shared and exclusive lock at the same time; it'll end up being
+ * treat is as the strongest. (Reasonable, since you can't take both
+ * a shared and exclusive lock at the same time; it'll end up being
* exclusive anyway.)
*
* We also consider that NOWAIT wins if it's specified both ways. This
diff --git a/src/backend/parser/check_keywords.pl b/src/backend/parser/check_keywords.pl
index ffdf5256929..39b94bc4659 100644
--- a/src/backend/parser/check_keywords.pl
+++ b/src/backend/parser/check_keywords.pl
@@ -9,7 +9,7 @@
use warnings;
use strict;
-my $gram_filename = $ARGV[0];
+my $gram_filename = $ARGV[0];
my $kwlist_filename = $ARGV[1];
my $errors = 0;
@@ -52,6 +52,7 @@ line: while (<GRAM>)
if (!($kcat))
{
+
# Is this the beginning of a keyword list?
foreach $k (keys %keyword_categories)
{
@@ -81,6 +82,7 @@ line: while (<GRAM>)
}
elsif ($arr[$fieldIndexer] eq '/*')
{
+
# start of a multiline comment
$comment = 1;
next;
@@ -92,6 +94,7 @@ line: while (<GRAM>)
if ($arr[$fieldIndexer] eq ';')
{
+
# end of keyword list
$kcat = '';
next;
@@ -116,6 +119,7 @@ foreach $kcat (keys %keyword_categories)
foreach $kword (@{ $keywords{$kcat} })
{
+
# Some keyword have a _P suffix. Remove it for the comparison.
$bare_kword = $kword;
$bare_kword =~ s/_P$//;
@@ -206,6 +210,7 @@ kwlist_line: while (<KWLIST>)
}
else
{
+
# Remove it from the hash, so that we can
# complain at the end if there's keywords left
# that were not found in kwlist.h
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index a944a4d4a8d..7380618fae3 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -286,7 +286,7 @@ transformAggregateCall(ParseState *pstate, Aggref *agg,
if (errkind)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- /* translator: %s is name of a SQL construct, eg GROUP BY */
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
errmsg("aggregate functions are not allowed in %s",
ParseExprKindName(pstate->p_expr_kind)),
parser_errposition(pstate, agg->location)));
@@ -554,7 +554,7 @@ transformWindowFuncCall(ParseState *pstate, WindowFunc *wfunc,
if (errkind)
ereport(ERROR,
(errcode(ERRCODE_WINDOWING_ERROR),
- /* translator: %s is name of a SQL construct, eg GROUP BY */
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
errmsg("window functions are not allowed in %s",
ParseExprKindName(pstate->p_expr_kind)),
parser_errposition(pstate, wfunc->location)));
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 1915210bab5..cbfb43188c1 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -604,7 +604,7 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
* *top_rti: receives the rangetable index of top_rte. (Ditto.)
*
* *namespace: receives a List of ParseNamespaceItems for the RTEs exposed
- * as table/column names by this item. (The lateral_only flags in these items
+ * as table/column names by this item. (The lateral_only flags in these items
* are indeterminate and should be explicitly set by the caller before use.)
*/
static Node *
@@ -715,8 +715,8 @@ transformFromClauseItem(ParseState *pstate, Node *n,
/*
* Make the left-side RTEs available for LATERAL access within the
* right side, by temporarily adding them to the pstate's namespace
- * list. Per SQL:2008, if the join type is not INNER or LEFT then
- * the left-side names must still be exposed, but it's an error to
+ * list. Per SQL:2008, if the join type is not INNER or LEFT then the
+ * left-side names must still be exposed, but it's an error to
* reference them. (Stupid design, but that's what it says.) Hence,
* we always push them into the namespace, but mark them as not
* lateral_ok if the jointype is wrong.
@@ -980,7 +980,7 @@ transformFromClauseItem(ParseState *pstate, Node *n,
*
* Note: if there are nested alias-less JOINs, the lower-level ones
* will remain in the list although they have neither p_rel_visible
- * nor p_cols_visible set. We could delete such list items, but it's
+ * nor p_cols_visible set. We could delete such list items, but it's
* unclear that it's worth expending cycles to do so.
*/
if (j->alias != NULL)
@@ -1282,20 +1282,20 @@ checkTargetlistEntrySQL92(ParseState *pstate, TargetEntry *tle,
contain_aggs_of_level((Node *) tle->expr, 0))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- /* translator: %s is name of a SQL construct, eg GROUP BY */
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
errmsg("aggregate functions are not allowed in %s",
ParseExprKindName(exprKind)),
parser_errposition(pstate,
- locate_agg_of_level((Node *) tle->expr, 0))));
+ locate_agg_of_level((Node *) tle->expr, 0))));
if (pstate->p_hasWindowFuncs &&
contain_windowfuncs((Node *) tle->expr))
ereport(ERROR,
(errcode(ERRCODE_WINDOWING_ERROR),
- /* translator: %s is name of a SQL construct, eg GROUP BY */
+ /* translator: %s is name of a SQL construct, eg GROUP BY */
errmsg("window functions are not allowed in %s",
ParseExprKindName(exprKind)),
parser_errposition(pstate,
- locate_windowfunc((Node *) tle->expr))));
+ locate_windowfunc((Node *) tle->expr))));
break;
case EXPR_KIND_ORDER_BY:
/* no extra checks needed */
@@ -1324,7 +1324,7 @@ checkTargetlistEntrySQL92(ParseState *pstate, TargetEntry *tle,
*
* node the ORDER BY, GROUP BY, or DISTINCT ON expression to be matched
* tlist the target list (passed by reference so we can append to it)
- * exprKind identifies clause type being processed
+ * exprKind identifies clause type being processed
*/
static TargetEntry *
findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
@@ -1491,7 +1491,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist,
*
* node the ORDER BY, GROUP BY, etc expression to be matched
* tlist the target list (passed by reference so we can append to it)
- * exprKind identifies clause type being processed
+ * exprKind identifies clause type being processed
*/
static TargetEntry *
findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist,
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 327557e0a38..7f0995fae1f 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -251,7 +251,7 @@ transformExprRecurse(ParseState *pstate, Node *expr)
break;
default:
elog(ERROR, "unrecognized A_Expr kind: %d", a->kind);
- result = NULL; /* keep compiler quiet */
+ result = NULL; /* keep compiler quiet */
break;
}
break;
@@ -1411,9 +1411,9 @@ transformSubLink(ParseState *pstate, SubLink *sublink)
return result;
/*
- * Check to see if the sublink is in an invalid place within the query.
- * We allow sublinks everywhere in SELECT/INSERT/UPDATE/DELETE, but
- * generally not in utility statements.
+ * Check to see if the sublink is in an invalid place within the query. We
+ * allow sublinks everywhere in SELECT/INSERT/UPDATE/DELETE, but generally
+ * not in utility statements.
*/
err = NULL;
switch (pstate->p_expr_kind)
@@ -2031,7 +2031,7 @@ transformXmlSerialize(ParseState *pstate, XmlSerialize *xs)
xexpr = makeNode(XmlExpr);
xexpr->op = IS_XMLSERIALIZE;
xexpr->args = list_make1(coerce_to_specific_type(pstate,
- transformExprRecurse(pstate, xs->expr),
+ transformExprRecurse(pstate, xs->expr),
XMLOID,
"XMLSERIALIZE"));
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index a01589a1d9c..a9254c8c3a2 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -285,7 +285,7 @@ isFutureCTE(ParseState *pstate, const char *refname)
*
* This is different from refnameRangeTblEntry in that it considers every
* entry in the ParseState's rangetable(s), not only those that are currently
- * visible in the p_namespace list(s). This behavior is invalid per the SQL
+ * visible in the p_namespace list(s). This behavior is invalid per the SQL
* spec, and it may give ambiguous results (there might be multiple equally
* valid matches, but only one will be returned). This must be used ONLY
* as a heuristic in giving suitable error messages. See errorMissingRTE.
@@ -639,7 +639,7 @@ colNameToVar(ParseState *pstate, char *colname, bool localonly,
*
* This is different from colNameToVar in that it considers every entry in
* the ParseState's rangetable(s), not only those that are currently visible
- * in the p_namespace list(s). This behavior is invalid per the SQL spec,
+ * in the p_namespace list(s). This behavior is invalid per the SQL spec,
* and it may give ambiguous results (there might be multiple equally valid
* matches, but only one will be returned). This must be used ONLY as a
* heuristic in giving suitable error messages. See errorMissingColumn.
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index e3397764d61..ca20e77ce6d 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -76,7 +76,7 @@ static int FigureColnameInternal(Node *node, char **name);
*
* node the (untransformed) parse tree for the value expression.
* expr the transformed expression, or NULL if caller didn't do it yet.
- * exprKind expression kind (EXPR_KIND_SELECT_TARGET, etc)
+ * exprKind expression kind (EXPR_KIND_SELECT_TARGET, etc)
* colname the column name to be assigned, or NULL if none yet set.
* resjunk true if the target should be marked resjunk, ie, it is not
* wanted in the final projected tuple.
@@ -1130,7 +1130,7 @@ ExpandColumnRefStar(ParseState *pstate, ColumnRef *cref,
* Transforms '*' (in the target list) into a list of targetlist entries.
*
* tlist entries are generated for each relation visible for unqualified
- * column name access. We do not consider qualified-name-only entries because
+ * column name access. We do not consider qualified-name-only entries because
* that would include input tables of aliasless JOINs, NEW/OLD pseudo-entries,
* etc.
*
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 9ad832bbb28..b426a453242 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -525,7 +525,7 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
if (cxt->isforeign)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("constraints are not supported on foreign tables"),
+ errmsg("constraints are not supported on foreign tables"),
parser_errposition(cxt->pstate,
constraint->location)));
cxt->ckconstraints = lappend(cxt->ckconstraints, constraint);
@@ -536,7 +536,7 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
if (cxt->isforeign)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("constraints are not supported on foreign tables"),
+ errmsg("constraints are not supported on foreign tables"),
parser_errposition(cxt->pstate,
constraint->location)));
if (constraint->keys == NIL)
@@ -553,9 +553,10 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
if (cxt->isforeign)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("constraints are not supported on foreign tables"),
+ errmsg("constraints are not supported on foreign tables"),
parser_errposition(cxt->pstate,
constraint->location)));
+
/*
* Fill in the current attribute's name and throw it into the
* list of FK constraints to be processed later.
@@ -718,7 +719,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
constr = tupleDesc->constr;
/*
- * Initialize column number map for map_variable_attnos(). We need this
+ * Initialize column number map for map_variable_attnos(). We need this
* since dropped columns in the source table aren't copied, so the new
* table can have different column numbers.
*/
@@ -1273,8 +1274,8 @@ generateClonedIndexStmt(CreateStmtContext *cxt, Relation source_idx,
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot convert whole-row table reference"),
- errdetail("Index \"%s\" contains a whole-row table reference.",
- RelationGetRelationName(source_idx))));
+ errdetail("Index \"%s\" contains a whole-row table reference.",
+ RelationGetRelationName(source_idx))));
index->whereClause = pred_tree;
}
@@ -1405,8 +1406,8 @@ transformIndexConstraints(CreateStmtContext *cxt)
/*
* Scan the index list and remove any redundant index specifications. This
* can happen if, for instance, the user writes UNIQUE PRIMARY KEY. A
- * strict reading of SQL would suggest raising an error instead, but
- * that strikes me as too anal-retentive. - tgl 2001-02-14
+ * strict reading of SQL would suggest raising an error instead, but that
+ * strikes me as too anal-retentive. - tgl 2001-02-14
*
* XXX in ALTER TABLE case, it'd be nice to look for duplicate
* pre-existing indexes, too.
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 37774977129..1cfebed51ca 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -54,7 +54,7 @@ typedef int IpcMemoryId; /* shared memory ID returned by shmget(2) */
#define MAP_HASSEMAPHORE 0
#endif
-#define PG_MMAP_FLAGS (MAP_SHARED|MAP_ANONYMOUS|MAP_HASSEMAPHORE)
+#define PG_MMAP_FLAGS (MAP_SHARED|MAP_ANONYMOUS|MAP_HASSEMAPHORE)
/* Some really old systems don't define MAP_FAILED. */
#ifndef MAP_FAILED
@@ -167,14 +167,14 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
IPC_CREAT | IPC_EXCL | IPCProtection),
(errno == EINVAL) ?
errhint("This error usually means that PostgreSQL's request for a shared memory "
- "segment exceeded your kernel's SHMMAX parameter, or possibly that "
+ "segment exceeded your kernel's SHMMAX parameter, or possibly that "
"it is less than "
"your kernel's SHMMIN parameter.\n"
"The PostgreSQL documentation contains more information about shared "
"memory configuration.") : 0,
(errno == ENOMEM) ?
errhint("This error usually means that PostgreSQL's request for a shared "
- "memory segment exceeded your kernel's SHMALL parameter. You may need "
+ "memory segment exceeded your kernel's SHMALL parameter. You may need "
"to reconfigure the kernel with larger SHMALL.\n"
"The PostgreSQL documentation contains more information about shared "
"memory configuration.") : 0,
@@ -183,7 +183,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, Size size)
"It occurs either if all available shared memory IDs have been taken, "
"in which case you need to raise the SHMMNI parameter in your kernel, "
"or because the system's overall limit for shared memory has been "
- "reached.\n"
+ "reached.\n"
"The PostgreSQL documentation contains more information about shared "
"memory configuration.") : 0));
}
@@ -384,14 +384,14 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
* settings.
*
* However, we disable this logic in the EXEC_BACKEND case, and fall back
- * to the old method of allocating the entire segment using System V shared
- * memory, because there's no way to attach an mmap'd segment to a process
- * after exec(). Since EXEC_BACKEND is intended only for developer use,
- * this shouldn't be a big problem.
+ * to the old method of allocating the entire segment using System V
+ * shared memory, because there's no way to attach an mmap'd segment to a
+ * process after exec(). Since EXEC_BACKEND is intended only for
+ * developer use, this shouldn't be a big problem.
*/
#ifndef EXEC_BACKEND
{
- long pagesize = sysconf(_SC_PAGE_SIZE);
+ long pagesize = sysconf(_SC_PAGE_SIZE);
/*
* Ensure request size is a multiple of pagesize.
@@ -406,23 +406,23 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
/*
* We assume that no one will attempt to run PostgreSQL 9.3 or later
* on systems that are ancient enough that anonymous shared memory is
- * not supported, such as pre-2.4 versions of Linux. If that turns out
- * to be false, we might need to add a run-time test here and do this
- * only if the running kernel supports it.
+ * not supported, such as pre-2.4 versions of Linux. If that turns
+ * out to be false, we might need to add a run-time test here and do
+ * this only if the running kernel supports it.
*/
- AnonymousShmem = mmap(NULL, size, PROT_READ|PROT_WRITE, PG_MMAP_FLAGS,
+ AnonymousShmem = mmap(NULL, size, PROT_READ | PROT_WRITE, PG_MMAP_FLAGS,
-1, 0);
if (AnonymousShmem == MAP_FAILED)
ereport(FATAL,
- (errmsg("could not map anonymous shared memory: %m"),
- (errno == ENOMEM) ?
- errhint("This error usually means that PostgreSQL's request "
- "for a shared memory segment exceeded available memory "
- "or swap space. To reduce the request size (currently "
- "%lu bytes), reduce PostgreSQL's shared memory usage, "
- "perhaps by reducing shared_buffers or "
- "max_connections.",
- (unsigned long) size) : 0));
+ (errmsg("could not map anonymous shared memory: %m"),
+ (errno == ENOMEM) ?
+ errhint("This error usually means that PostgreSQL's request "
+ "for a shared memory segment exceeded available memory "
+ "or swap space. To reduce the request size (currently "
+ "%lu bytes), reduce PostgreSQL's shared memory usage, "
+ "perhaps by reducing shared_buffers or "
+ "max_connections.",
+ (unsigned long) size) : 0));
AnonymousShmemSize = size;
/* Now we need only allocate a minimal-sized SysV shmem block. */
@@ -519,9 +519,9 @@ PGSharedMemoryCreate(Size size, bool makePrivate, int port)
/*
* If AnonymousShmem is NULL here, then we're not using anonymous shared
- * memory, and should return a pointer to the System V shared memory block.
- * Otherwise, the System V shared memory block is only a shim, and we must
- * return a pointer to the real block.
+ * memory, and should return a pointer to the System V shared memory
+ * block. Otherwise, the System V shared memory block is only a shim, and
+ * we must return a pointer to the real block.
*/
if (AnonymousShmem == NULL)
return hdr;
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index b4af6972c41..cd8806165c4 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -163,7 +163,7 @@ typedef struct avw_dbase
Oid adw_datid;
char *adw_name;
TransactionId adw_frozenxid;
- MultiXactId adw_frozenmulti;
+ MultiXactId adw_frozenmulti;
PgStat_StatDBEntry *adw_entry;
} avw_dbase;
@@ -220,7 +220,7 @@ typedef struct WorkerInfoData
int wi_cost_delay;
int wi_cost_limit;
int wi_cost_limit_base;
-} WorkerInfoData;
+} WorkerInfoData;
typedef struct WorkerInfoData *WorkerInfo;
@@ -880,7 +880,7 @@ rebuild_database_list(Oid newdb)
int score;
int nelems;
HTAB *dbhash;
- dlist_iter iter;
+ dlist_iter iter;
/* use fresh stats */
autovac_refresh_stats();
@@ -949,8 +949,8 @@ rebuild_database_list(Oid newdb)
PgStat_StatDBEntry *entry;
/*
- * skip databases with no stat entries -- in particular, this gets
- * rid of dropped databases
+ * skip databases with no stat entries -- in particular, this gets rid
+ * of dropped databases
*/
entry = pgstat_fetch_stat_dbentry(avdb->adl_datid);
if (entry == NULL)
@@ -1162,7 +1162,7 @@ do_start_worker(void)
foreach(cell, dblist)
{
avw_dbase *tmp = lfirst(cell);
- dlist_iter iter;
+ dlist_iter iter;
/* Check to see if this one is at risk of wraparound */
if (TransactionIdPrecedes(tmp->adw_frozenxid, xidForceLimit))
@@ -1296,12 +1296,12 @@ static void
launch_worker(TimestampTz now)
{
Oid dbid;
- dlist_iter iter;
+ dlist_iter iter;
dbid = do_start_worker();
if (OidIsValid(dbid))
{
- bool found = false;
+ bool found = false;
/*
* Walk the database list and update the corresponding entry. If the
@@ -1776,7 +1776,7 @@ autovac_balance_cost(void)
cost_total = 0.0;
dlist_foreach(iter, &AutoVacuumShmem->av_runningWorkers)
{
- WorkerInfo worker = dlist_container(WorkerInfoData, wi_links, iter.cur);
+ WorkerInfo worker = dlist_container(WorkerInfoData, wi_links, iter.cur);
if (worker->wi_proc != NULL &&
worker->wi_cost_limit_base > 0 && worker->wi_cost_delay > 0)
@@ -1794,7 +1794,7 @@ autovac_balance_cost(void)
cost_avail = (double) vac_cost_limit / vac_cost_delay;
dlist_foreach(iter, &AutoVacuumShmem->av_runningWorkers)
{
- WorkerInfo worker = dlist_container(WorkerInfoData, wi_links, iter.cur);
+ WorkerInfo worker = dlist_container(WorkerInfoData, wi_links, iter.cur);
if (worker->wi_proc != NULL &&
worker->wi_cost_limit_base > 0 && worker->wi_cost_delay > 0)
@@ -2631,7 +2631,7 @@ relation_needs_vacanalyze(Oid relid,
/* freeze parameters */
int freeze_max_age;
TransactionId xidForceLimit;
- MultiXactId multiForceLimit;
+ MultiXactId multiForceLimit;
AssertArg(classForm != NULL);
AssertArg(OidIsValid(relid));
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 5fb2d81118e..fdf6625c58b 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -107,7 +107,7 @@
*/
typedef struct
{
- RelFileNode rnode;
+ RelFileNode rnode;
ForkNumber forknum;
BlockNumber segno; /* see md.c for special values */
/* might add a real request-type field later; not needed yet */
@@ -930,8 +930,8 @@ CheckpointerShmemInit(void)
{
/*
* First time through, so initialize. Note that we zero the whole
- * requests array; this is so that CompactCheckpointerRequestQueue
- * can assume that any pad bytes in the request structs are zeroes.
+ * requests array; this is so that CompactCheckpointerRequestQueue can
+ * assume that any pad bytes in the request structs are zeroes.
*/
MemSet(CheckpointerShmem, 0, size);
SpinLockInit(&CheckpointerShmem->ckpt_lck);
diff --git a/src/backend/postmaster/fork_process.c b/src/backend/postmaster/fork_process.c
index 2d1e75613de..aa54721f5a5 100644
--- a/src/backend/postmaster/fork_process.c
+++ b/src/backend/postmaster/fork_process.c
@@ -101,10 +101,10 @@ fork_process(void)
#endif /* LINUX_OOM_SCORE_ADJ */
/*
- * Older Linux kernels have oom_adj not oom_score_adj. This works
- * similarly except with a different scale of adjustment values.
- * If it's necessary to build Postgres to work with either API,
- * you can define both LINUX_OOM_SCORE_ADJ and LINUX_OOM_ADJ.
+ * Older Linux kernels have oom_adj not oom_score_adj. This works
+ * similarly except with a different scale of adjustment values. If
+ * it's necessary to build Postgres to work with either API, you can
+ * define both LINUX_OOM_SCORE_ADJ and LINUX_OOM_ADJ.
*/
#ifdef LINUX_OOM_ADJ
{
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index ffd4830cb05..2bb572ef686 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -246,7 +246,7 @@ PgArchiverMain(int argc, char *argv[])
elog(FATAL, "setsid() failed: %m");
#endif
- InitializeLatchSupport(); /* needed for latch waits */
+ InitializeLatchSupport(); /* needed for latch waits */
InitLatch(&mainloop_latch); /* initialize latch used in main loop */
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 29d986a65a1..ac20dffd988 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -2586,7 +2586,7 @@ pgstat_report_activity(BackendState state, const char *cmd_str)
{
/*
* track_activities is disabled, but we last reported a
- * non-disabled state. As our final update, change the state and
+ * non-disabled state. As our final update, change the state and
* clear fields we will not be updating anymore.
*/
beentry->st_changecount++;
@@ -4401,9 +4401,9 @@ pgstat_recv_inquiry(PgStat_MsgInquiry *msg, int len)
* request's cutoff time, update it; otherwise there's nothing to do.
*
* Note that if a request is found, we return early and skip the below
- * check for clock skew. This is okay, since the only way for a DB request
- * to be present in the list is that we have been here since the last write
- * round.
+ * check for clock skew. This is okay, since the only way for a DB
+ * request to be present in the list is that we have been here since the
+ * last write round.
*/
slist_foreach(iter, &last_statrequests)
{
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index f07ed76881f..87e60621396 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -183,7 +183,7 @@ static Backend *ShmemBackendArray;
* List of background workers.
*
* A worker that requests a database connection during registration will have
- * rw_backend set, and will be present in BackendList. Note: do not rely on
+ * rw_backend set, and will be present in BackendList. Note: do not rely on
* rw_backend being non-NULL for shmem-connected workers!
*/
typedef struct RegisteredBgWorker
@@ -197,7 +197,7 @@ typedef struct RegisteredBgWorker
int rw_cookie;
#endif
slist_node rw_lnode; /* list link */
-} RegisteredBgWorker;
+} RegisteredBgWorker;
static slist_head BackgroundWorkerList = SLIST_STATIC_INIT(BackgroundWorkerList);
@@ -207,8 +207,10 @@ BackgroundWorker *MyBgworkerEntry = NULL;
/* The socket number we are listening for connections on */
int PostPortNumber;
+
/* The directory names for Unix socket(s) */
char *Unix_socket_directories;
+
/* The TCP listen address(es) */
char *ListenAddresses;
@@ -446,7 +448,7 @@ typedef struct
HANDLE procHandle;
DWORD procId;
} win32_deadchild_waitinfo;
-#endif /* WIN32 */
+#endif /* WIN32 */
static pid_t backend_forkexec(Port *port);
static pid_t internal_forkexec(int argc, char *argv[], Port *port);
@@ -1022,7 +1024,7 @@ PostmasterMain(int argc, char *argv[])
/* syntax error in list */
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for \"unix_socket_directories\"")));
+ errmsg("invalid list syntax for \"unix_socket_directories\"")));
}
foreach(l, elemlist)
@@ -1212,8 +1214,8 @@ PostmasterMain(int argc, char *argv[])
/*
* We can start up without the IDENT file, although it means that you
* cannot log in using any of the authentication methods that need a
- * user name mapping. load_ident() already logged the details of
- * error to the log.
+ * user name mapping. load_ident() already logged the details of error
+ * to the log.
*/
}
@@ -1414,7 +1416,7 @@ checkDataDir(void)
* we don't actually sleep so that they are quickly serviced.
*/
static void
-DetermineSleepTime(struct timeval *timeout)
+DetermineSleepTime(struct timeval * timeout)
{
TimestampTz next_wakeup = 0;
@@ -2969,7 +2971,7 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
rw = slist_container(RegisteredBgWorker, rw_lnode, siter.cur);
if (rw->rw_pid == 0)
- continue; /* not running */
+ continue; /* not running */
if (rw->rw_pid == pid)
{
/*
@@ -3819,9 +3821,9 @@ BackendInitialize(Port *port)
remote_host[0] = '\0';
remote_port[0] = '\0';
if ((ret = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
- remote_host, sizeof(remote_host),
- remote_port, sizeof(remote_port),
- (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV)) != 0)
+ remote_host, sizeof(remote_host),
+ remote_port, sizeof(remote_port),
+ (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV)) != 0)
ereport(WARNING,
(errmsg_internal("pg_getnameinfo_all() failed: %s",
gai_strerror(ret))));
@@ -4503,7 +4505,7 @@ SubPostmasterMain(int argc, char *argv[])
/* Attach process to shared data structures */
CreateSharedMemoryAndSemaphores(false, 0);
- AuxiliaryProcessMain(argc - 2, argv + 2); /* does not return */
+ AuxiliaryProcessMain(argc - 2, argv + 2); /* does not return */
}
if (strcmp(argv[1], "--forkavlauncher") == 0)
{
@@ -4519,7 +4521,7 @@ SubPostmasterMain(int argc, char *argv[])
/* Attach process to shared data structures */
CreateSharedMemoryAndSemaphores(false, 0);
- AutoVacLauncherMain(argc - 2, argv + 2); /* does not return */
+ AutoVacLauncherMain(argc - 2, argv + 2); /* does not return */
}
if (strcmp(argv[1], "--forkavworker") == 0)
{
@@ -4535,7 +4537,7 @@ SubPostmasterMain(int argc, char *argv[])
/* Attach process to shared data structures */
CreateSharedMemoryAndSemaphores(false, 0);
- AutoVacWorkerMain(argc - 2, argv + 2); /* does not return */
+ AutoVacWorkerMain(argc - 2, argv + 2); /* does not return */
}
if (strncmp(argv[1], "--forkbgworker=", 15) == 0)
{
@@ -4564,7 +4566,7 @@ SubPostmasterMain(int argc, char *argv[])
/* Do not want to attach to shared memory */
- PgArchiverMain(argc, argv); /* does not return */
+ PgArchiverMain(argc, argv); /* does not return */
}
if (strcmp(argv[1], "--forkcol") == 0)
{
@@ -4573,7 +4575,7 @@ SubPostmasterMain(int argc, char *argv[])
/* Do not want to attach to shared memory */
- PgstatCollectorMain(argc, argv); /* does not return */
+ PgstatCollectorMain(argc, argv); /* does not return */
}
if (strcmp(argv[1], "--forklog") == 0)
{
@@ -4582,7 +4584,7 @@ SubPostmasterMain(int argc, char *argv[])
/* Do not want to attach to shared memory */
- SysLoggerMain(argc, argv); /* does not return */
+ SysLoggerMain(argc, argv); /* does not return */
}
abort(); /* shouldn't get here */
@@ -5214,11 +5216,11 @@ RegisterBackgroundWorker(BackgroundWorker *worker)
}
/*
- * Enforce maximum number of workers. Note this is overly restrictive:
- * we could allow more non-shmem-connected workers, because these don't
- * count towards the MAX_BACKENDS limit elsewhere. This doesn't really
- * matter for practical purposes; several million processes would need to
- * run on a single server.
+ * Enforce maximum number of workers. Note this is overly restrictive: we
+ * could allow more non-shmem-connected workers, because these don't count
+ * towards the MAX_BACKENDS limit elsewhere. This doesn't really matter
+ * for practical purposes; several million processes would need to run on
+ * a single server.
*/
if (++numworkers > maxworkers)
{
@@ -6156,7 +6158,7 @@ ShmemBackendArrayRemove(Backend *bn)
#ifdef WIN32
/*
- * Subset implementation of waitpid() for Windows. We assume pid is -1
+ * Subset implementation of waitpid() for Windows. We assume pid is -1
* (that is, check all child processes) and options is WNOHANG (don't wait).
*/
static pid_t
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index d113011be7c..e3b61025162 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -252,7 +252,7 @@ SysLoggerMain(int argc, char *argv[])
elog(FATAL, "setsid() failed: %m");
#endif
- InitializeLatchSupport(); /* needed for latch waits */
+ InitializeLatchSupport(); /* needed for latch waits */
/* Initialize private latch for use by signal handlers */
InitLatch(&sysLoggerLatch);
@@ -583,8 +583,8 @@ SysLogger_Start(void)
/*
* The initial logfile is created right in the postmaster, to verify that
- * the Log_directory is writable. We save the reference time so that
- * the syslogger child process can recompute this file name.
+ * the Log_directory is writable. We save the reference time so that the
+ * syslogger child process can recompute this file name.
*
* It might look a bit strange to re-do this during a syslogger restart,
* but we must do so since the postmaster closed syslogFile after the
diff --git a/src/backend/regex/regc_nfa.c b/src/backend/regex/regc_nfa.c
index 05fe8b0808c..ae2dbe43fe8 100644
--- a/src/backend/regex/regc_nfa.c
+++ b/src/backend/regex/regc_nfa.c
@@ -1506,7 +1506,7 @@ compact(struct nfa * nfa,
for (s = nfa->states; s != NULL; s = s->next)
{
nstates++;
- narcs += s->nouts + 1; /* need one extra for endmarker */
+ narcs += s->nouts + 1; /* need one extra for endmarker */
}
cnfa->stflags = (char *) MALLOC(nstates * sizeof(char));
@@ -1810,7 +1810,7 @@ dumpcstate(int st,
struct cnfa * cnfa,
FILE *f)
{
- struct carc * ca;
+ struct carc *ca;
int pos;
fprintf(f, "%d%s", st, (cnfa->stflags[st] & CNFA_NOPROGRESS) ? ":" : ".");
diff --git a/src/backend/regex/regprefix.c b/src/backend/regex/regprefix.c
index d1b7974cafe..abda80c094e 100644
--- a/src/backend/regex/regprefix.c
+++ b/src/backend/regex/regprefix.c
@@ -20,7 +20,7 @@
* forward declarations
*/
static int findprefix(struct cnfa * cnfa, struct colormap * cm,
- chr *string, size_t *slength);
+ chr *string, size_t *slength);
/*
@@ -38,7 +38,7 @@ static int findprefix(struct cnfa * cnfa, struct colormap * cm,
*
* This function does not analyze all complex cases (such as lookahead
* constraints) exactly. Therefore it is possible that some strings matching
- * the reported prefix or exact-match string do not satisfy the regex. But
+ * the reported prefix or exact-match string do not satisfy the regex. But
* it should never be the case that a string satisfying the regex does not
* match the reported prefix or exact-match string.
*/
@@ -79,8 +79,8 @@ pg_regprefix(regex_t *re,
/*
* Since a correct NFA should never contain any exit-free loops, it should
- * not be possible for our traversal to return to a previously visited
- * NFA state. Hence we need at most nstates chrs in the output string.
+ * not be possible for our traversal to return to a previously visited NFA
+ * state. Hence we need at most nstates chrs in the output string.
*/
*string = (chr *) MALLOC(cnfa->nstates * sizeof(chr));
if (*string == NULL)
@@ -122,8 +122,8 @@ findprefix(struct cnfa * cnfa,
/*
* The "pre" state must have only BOS/BOL outarcs, else pattern isn't
- * anchored left. If we have both BOS and BOL, they must go to the
- * same next state.
+ * anchored left. If we have both BOS and BOL, they must go to the same
+ * next state.
*/
st = cnfa->pre;
nextst = -1;
@@ -150,7 +150,7 @@ findprefix(struct cnfa * cnfa,
* We could find a state with multiple out-arcs that are all labeled with
* the same singleton color; this comes from patterns like "^ab(cde|cxy)".
* In that case we add the chr "c" to the output string but then exit the
- * loop with nextst == -1. This leaves a little bit on the table: if the
+ * loop with nextst == -1. This leaves a little bit on the table: if the
* pattern is like "^ab(cde|cdy)", we won't notice that "d" could be added
* to the prefix. But chasing multiple parallel state chains doesn't seem
* worth the trouble.
@@ -201,14 +201,14 @@ findprefix(struct cnfa * cnfa,
/*
* Identify the color's sole member chr and add it to the prefix
- * string. In general the colormap data structure doesn't provide a
+ * string. In general the colormap data structure doesn't provide a
* way to find color member chrs, except by trying GETCOLOR() on each
* possible chr value, which won't do at all. However, for the cases
* we care about it should be sufficient to test the "firstchr" value,
* that is the first chr ever added to the color. There are cases
* where this might no longer be a member of the color (so we do need
* to test), but none of them are likely to arise for a character that
- * is a member of a common prefix. If we do hit such a corner case,
+ * is a member of a common prefix. If we do hit such a corner case,
* we just fall out without adding anything to the prefix string.
*/
c = cm->cd[thiscolor].firstchr;
diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c
index ab5262adfbf..12b5e24cac5 100644
--- a/src/backend/replication/basebackup.c
+++ b/src/backend/replication/basebackup.c
@@ -58,7 +58,7 @@ static void base_backup_cleanup(int code, Datum arg);
static void perform_base_backup(basebackup_options *opt, DIR *tblspcdir);
static void parse_basebackup_options(List *options, basebackup_options *opt);
static void SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli);
-static int compareWalFileNames(const void *a, const void *b);
+static int compareWalFileNames(const void *a, const void *b);
/* Was the backup currently in-progress initiated in recovery mode? */
static bool backup_started_in_recovery = false;
@@ -249,8 +249,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
* I'd rather not worry about timelines here, so scan pg_xlog and
* include all WAL files in the range between 'startptr' and 'endptr',
* regardless of the timeline the file is stamped with. If there are
- * some spurious WAL files belonging to timelines that don't belong
- * in this server's history, they will be included too. Normally there
+ * some spurious WAL files belonging to timelines that don't belong in
+ * this server's history, they will be included too. Normally there
* shouldn't be such files, but if there are, there's little harm in
* including them.
*/
@@ -262,7 +262,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
dir = AllocateDir("pg_xlog");
if (!dir)
ereport(ERROR,
- (errmsg("could not open directory \"%s\": %m", "pg_xlog")));
+ (errmsg("could not open directory \"%s\": %m", "pg_xlog")));
while ((de = ReadDir(dir, "pg_xlog")) != NULL)
{
/* Does it look like a WAL segment, and is it in the range? */
@@ -290,9 +290,9 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
CheckXLogRemoved(startsegno, ThisTimeLineID);
/*
- * Put the WAL filenames into an array, and sort. We send the files
- * in order from oldest to newest, to reduce the chance that a file
- * is recycled before we get a chance to send it over.
+ * Put the WAL filenames into an array, and sort. We send the files in
+ * order from oldest to newest, to reduce the chance that a file is
+ * recycled before we get a chance to send it over.
*/
nWalFiles = list_length(walFileList);
walFiles = palloc(nWalFiles * sizeof(char *));
@@ -310,28 +310,31 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
XLogFromFileName(walFiles[0], &tli, &segno);
if (segno != startsegno)
{
- char startfname[MAXFNAMELEN];
+ char startfname[MAXFNAMELEN];
+
XLogFileName(startfname, ThisTimeLineID, startsegno);
ereport(ERROR,
(errmsg("could not find WAL file \"%s\"", startfname)));
}
for (i = 0; i < nWalFiles; i++)
{
- XLogSegNo currsegno = segno;
- XLogSegNo nextsegno = segno + 1;
+ XLogSegNo currsegno = segno;
+ XLogSegNo nextsegno = segno + 1;
XLogFromFileName(walFiles[i], &tli, &segno);
if (!(nextsegno == segno || currsegno == segno))
{
- char nextfname[MAXFNAMELEN];
+ char nextfname[MAXFNAMELEN];
+
XLogFileName(nextfname, ThisTimeLineID, nextsegno);
ereport(ERROR,
- (errmsg("could not find WAL file \"%s\"", nextfname)));
+ (errmsg("could not find WAL file \"%s\"", nextfname)));
}
}
if (segno != endsegno)
{
- char endfname[MAXFNAMELEN];
+ char endfname[MAXFNAMELEN];
+
XLogFileName(endfname, ThisTimeLineID, endsegno);
ereport(ERROR,
(errmsg("could not find WAL file \"%s\"", endfname)));
@@ -373,7 +376,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
CheckXLogRemoved(segno, tli);
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("unexpected WAL file size \"%s\"", walFiles[i])));
+ errmsg("unexpected WAL file size \"%s\"", walFiles[i])));
}
_tarWriteHeader(pathbuf, NULL, &statbuf);
@@ -396,7 +399,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
CheckXLogRemoved(segno, tli);
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("unexpected WAL file size \"%s\"", walFiles[i])));
+ errmsg("unexpected WAL file size \"%s\"", walFiles[i])));
}
/* XLogSegSize is a multiple of 512, so no need for padding */
@@ -408,13 +411,14 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
* file is required for recovery, and even that only if there happens
* to be a timeline switch in the first WAL segment that contains the
* checkpoint record, or if we're taking a base backup from a standby
- * server and the target timeline changes while the backup is taken.
+ * server and the target timeline changes while the backup is taken.
* But they are small and highly useful for debugging purposes, so
* better include them all, always.
*/
foreach(lc, historyFileList)
{
- char *fname = lfirst(lc);
+ char *fname = lfirst(lc);
+
snprintf(pathbuf, MAXPGPATH, XLOGDIR "/%s", fname);
if (lstat(pathbuf, &statbuf) != 0)
@@ -438,8 +442,8 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
static int
compareWalFileNames(const void *a, const void *b)
{
- char *fna = *((char **) a);
- char *fnb = *((char **) b);
+ char *fna = *((char **) a);
+ char *fnb = *((char **) b);
return strcmp(fna + 8, fnb + 8);
}
@@ -657,11 +661,12 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
pq_sendstring(&buf, "tli");
pq_sendint(&buf, 0, 4); /* table oid */
pq_sendint(&buf, 0, 2); /* attnum */
+
/*
* int8 may seem like a surprising data type for this, but in thory int4
* would not be wide enough for this, as TimeLineID is unsigned.
*/
- pq_sendint(&buf, INT8OID, 4); /* type oid */
+ pq_sendint(&buf, INT8OID, 4); /* type oid */
pq_sendint(&buf, -1, 2);
pq_sendint(&buf, 0, 4);
pq_sendint(&buf, 0, 2);
@@ -729,7 +734,7 @@ sendFileWithContent(const char *filename, const char *content)
/*
* Include the tablespace directory pointed to by 'path' in the output tar
- * stream. If 'sizeonly' is true, we just calculate a total length and return
+ * stream. If 'sizeonly' is true, we just calculate a total length and return
* it, without actually sending anything.
*/
static int64
@@ -747,7 +752,8 @@ sendTablespace(char *path, bool sizeonly)
TABLESPACE_VERSION_DIRECTORY);
/*
- * Store a directory entry in the tar file so we get the permissions right.
+ * Store a directory entry in the tar file so we get the permissions
+ * right.
*/
if (lstat(pathbuf, &statbuf) != 0)
{
@@ -762,7 +768,7 @@ sendTablespace(char *path, bool sizeonly)
}
if (!sizeonly)
_tarWriteHeader(TABLESPACE_VERSION_DIRECTORY, NULL, &statbuf);
- size = 512; /* Size of the header just added */
+ size = 512; /* Size of the header just added */
/* Send all the files in the tablespace version directory */
size += sendDir(pathbuf, strlen(path), sizeonly);
@@ -818,9 +824,9 @@ sendDir(char *path, int basepathlen, bool sizeonly)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("the standby was promoted during online backup"),
- errhint("This means that the backup being taken is corrupt "
- "and should not be used. "
- "Try taking another online backup.")));
+ errhint("This means that the backup being taken is corrupt "
+ "and should not be used. "
+ "Try taking another online backup.")));
snprintf(pathbuf, MAXPGPATH, "%s/%s", path, de->d_name);
@@ -923,7 +929,7 @@ sendDir(char *path, int basepathlen, bool sizeonly)
}
else if (S_ISREG(statbuf.st_mode))
{
- bool sent = false;
+ bool sent = false;
if (!sizeonly)
sent = sendFile(pathbuf, pathbuf + basepathlen + 1, &statbuf,
@@ -933,7 +939,7 @@ sendDir(char *path, int basepathlen, bool sizeonly)
{
/* Add size, rounded up to 512byte block */
size += ((statbuf.st_size + 511) & ~511);
- size += 512; /* Size of the header of the file */
+ size += 512; /* Size of the header of the file */
}
}
else
@@ -967,7 +973,7 @@ sendDir(char *path, int basepathlen, bool sizeonly)
* and the file did not exist.
*/
static bool
-sendFile(char *readfilename, char *tarfilename, struct stat *statbuf,
+sendFile(char *readfilename, char *tarfilename, struct stat * statbuf,
bool missing_ok)
{
FILE *fp;
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index f7cc6e3c2f5..6bc0aa1c12c 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -51,7 +51,7 @@ static void libpqrcv_identify_system(TimeLineID *primary_tli);
static void libpqrcv_readtimelinehistoryfile(TimeLineID tli, char **filename, char **content, int *len);
static bool libpqrcv_startstreaming(TimeLineID tli, XLogRecPtr startpoint);
static void libpqrcv_endstreaming(TimeLineID *next_tli);
-static int libpqrcv_receive(int timeout, char **buffer);
+static int libpqrcv_receive(int timeout, char **buffer);
static void libpqrcv_send(const char *buffer, int nbytes);
static void libpqrcv_disconnect(void);
@@ -209,12 +209,13 @@ libpqrcv_endstreaming(TimeLineID *next_tli)
if (PQputCopyEnd(streamConn, NULL) <= 0 || PQflush(streamConn))
ereport(ERROR,
- (errmsg("could not send end-of-streaming message to primary: %s",
- PQerrorMessage(streamConn))));
+ (errmsg("could not send end-of-streaming message to primary: %s",
+ PQerrorMessage(streamConn))));
/*
* After COPY is finished, we should receive a result set indicating the
- * next timeline's ID, or just CommandComplete if the server was shut down.
+ * next timeline's ID, or just CommandComplete if the server was shut
+ * down.
*
* If we had not yet received CopyDone from the backend, PGRES_COPY_IN
* would also be possible. However, at the moment this function is only
@@ -456,7 +457,7 @@ libpqrcv_disconnect(void)
* 0 if no data was available within timeout, or wait was interrupted
* by signal.
*
- * -1 if the server ended the COPY.
+ * -1 if the server ended the COPY.
*
* ereports on error.
*/
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 975ee214ab4..5424281b425 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -443,7 +443,7 @@ SyncRepReleaseWaiters(void)
elog(DEBUG3, "released %d procs up to write %X/%X, %d procs up to flush %X/%X",
numwrite, (uint32) (MyWalSnd->write >> 32), (uint32) MyWalSnd->write,
- numflush, (uint32) (MyWalSnd->flush >> 32), (uint32) MyWalSnd->flush);
+ numflush, (uint32) (MyWalSnd->flush >> 32), (uint32) MyWalSnd->flush);
/*
* If we are managing the highest priority standby, though we weren't
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index 911a66ba887..9261449d706 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -86,7 +86,7 @@ walrcv_disconnect_type walrcv_disconnect = NULL;
* corresponding the filename of recvFile.
*/
static int recvFile = -1;
-static TimeLineID recvFileTLI = 0;
+static TimeLineID recvFileTLI = 0;
static XLogSegNo recvSegNo = 0;
static uint32 recvOff = 0;
@@ -107,8 +107,8 @@ static struct
XLogRecPtr Flush; /* last byte + 1 flushed in the standby */
} LogstreamResult;
-static StringInfoData reply_message;
-static StringInfoData incoming_message;
+static StringInfoData reply_message;
+static StringInfoData incoming_message;
/*
* About SIGTERM handling:
@@ -332,12 +332,13 @@ WalReceiverMain(void)
/*
* Get any missing history files. We do this always, even when we're
- * not interested in that timeline, so that if we're promoted to become
- * the master later on, we don't select the same timeline that was
- * already used in the current master. This isn't bullet-proof - you'll
- * need some external software to manage your cluster if you need to
- * ensure that a unique timeline id is chosen in every case, but let's
- * avoid the confusion of timeline id collisions where we can.
+ * not interested in that timeline, so that if we're promoted to
+ * become the master later on, we don't select the same timeline that
+ * was already used in the current master. This isn't bullet-proof -
+ * you'll need some external software to manage your cluster if you
+ * need to ensure that a unique timeline id is chosen in every case,
+ * but let's avoid the confusion of timeline id collisions where we
+ * can.
*/
WalRcvFetchTimeLineHistoryFiles(startpointTLI, primaryTLI);
@@ -356,18 +357,18 @@ WalReceiverMain(void)
ThisTimeLineID = startpointTLI;
if (walrcv_startstreaming(startpointTLI, startpoint))
{
- bool endofwal = false;
+ bool endofwal = false;
if (first_stream)
ereport(LOG,
(errmsg("started streaming WAL from primary at %X/%X on timeline %u",
- (uint32) (startpoint >> 32), (uint32) startpoint,
+ (uint32) (startpoint >> 32), (uint32) startpoint,
startpointTLI)));
else
ereport(LOG,
- (errmsg("restarted WAL streaming at %X/%X on timeline %u",
- (uint32) (startpoint >> 32), (uint32) startpoint,
- startpointTLI)));
+ (errmsg("restarted WAL streaming at %X/%X on timeline %u",
+ (uint32) (startpoint >> 32), (uint32) startpoint,
+ startpointTLI)));
first_stream = false;
/* Initialize LogstreamResult and buffers for processing messages */
@@ -387,7 +388,8 @@ WalReceiverMain(void)
/*
* Emergency bailout if postmaster has died. This is to avoid
- * the necessity for manual cleanup of all postmaster children.
+ * the necessity for manual cleanup of all postmaster
+ * children.
*/
if (!PostmasterIsAlive())
exit(1);
@@ -422,7 +424,10 @@ WalReceiverMain(void)
{
if (len > 0)
{
- /* Something was received from master, so reset timeout */
+ /*
+ * Something was received from master, so reset
+ * timeout
+ */
last_recv_timestamp = GetCurrentTimestamp();
ping_sent = false;
XLogWalRcvProcessMsg(buf[0], &buf[1], len - 1);
@@ -457,12 +462,13 @@ WalReceiverMain(void)
/*
* We didn't receive anything new. If we haven't heard
* anything from the server for more than
- * wal_receiver_timeout / 2, ping the server. Also, if it's
- * been longer than wal_receiver_status_interval since the
- * last update we sent, send a status update to the master
- * anyway, to report any progress in applying WAL.
+ * wal_receiver_timeout / 2, ping the server. Also, if
+ * it's been longer than wal_receiver_status_interval
+ * since the last update we sent, send a status update to
+ * the master anyway, to report any progress in applying
+ * WAL.
*/
- bool requestReply = false;
+ bool requestReply = false;
/*
* Check if time since last receive from standby has
@@ -482,13 +488,13 @@ WalReceiverMain(void)
(errmsg("terminating walreceiver due to timeout")));
/*
- * We didn't receive anything new, for half of receiver
- * replication timeout. Ping the server.
+ * We didn't receive anything new, for half of
+ * receiver replication timeout. Ping the server.
*/
if (!ping_sent)
{
timeout = TimestampTzPlusMilliseconds(last_recv_timestamp,
- (wal_receiver_timeout/2));
+ (wal_receiver_timeout / 2));
if (now >= timeout)
{
requestReply = true;
@@ -511,9 +517,9 @@ WalReceiverMain(void)
DisableWalRcvImmediateExit();
/*
- * If the server had switched to a new timeline that we didn't know
- * about when we began streaming, fetch its timeline history file
- * now.
+ * If the server had switched to a new timeline that we didn't
+ * know about when we began streaming, fetch its timeline history
+ * file now.
*/
WalRcvFetchTimeLineHistoryFiles(startpointTLI, primaryTLI);
}
@@ -614,8 +620,8 @@ WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI)
if (walrcv->walRcvState == WALRCV_STOPPING)
{
/*
- * We should've received SIGTERM if the startup process wants
- * us to die, but might as well check it here too.
+ * We should've received SIGTERM if the startup process wants us
+ * to die, but might as well check it here too.
*/
SpinLockRelease(&walrcv->mutex);
exit(1);
@@ -643,7 +649,7 @@ WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI)
static void
WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last)
{
- TimeLineID tli;
+ TimeLineID tli;
for (tli = first; tli <= last; tli++)
{
@@ -664,8 +670,9 @@ WalRcvFetchTimeLineHistoryFiles(TimeLineID first, TimeLineID last)
DisableWalRcvImmediateExit();
/*
- * Check that the filename on the master matches what we calculated
- * ourselves. This is just a sanity check, it should always match.
+ * Check that the filename on the master matches what we
+ * calculated ourselves. This is just a sanity check, it should
+ * always match.
*/
TLHistoryFileName(expectedfname, tli);
if (strcmp(fname, expectedfname) != 0)
@@ -791,7 +798,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
int hdrlen;
XLogRecPtr dataStart;
XLogRecPtr walEnd;
- TimestampTz sendTime;
+ TimestampTz sendTime;
bool replyRequested;
resetStringInfo(&incoming_message);
@@ -812,7 +819,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
dataStart = pq_getmsgint64(&incoming_message);
walEnd = pq_getmsgint64(&incoming_message);
sendTime = IntegerTimestampToTimestampTz(
- pq_getmsgint64(&incoming_message));
+ pq_getmsgint64(&incoming_message));
ProcessWalSndrMessage(walEnd, sendTime);
buf += hdrlen;
@@ -833,7 +840,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len)
/* read the fields */
walEnd = pq_getmsgint64(&incoming_message);
sendTime = IntegerTimestampToTimestampTz(
- pq_getmsgint64(&incoming_message));
+ pq_getmsgint64(&incoming_message));
replyRequested = pq_getmsgbyte(&incoming_message);
ProcessWalSndrMessage(walEnd, sendTime);
@@ -890,8 +897,8 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
XLogFileNameP(recvFileTLI, recvSegNo))));
/*
- * Create .done file forcibly to prevent the streamed segment from
- * being archived later.
+ * Create .done file forcibly to prevent the streamed segment
+ * from being archived later.
*/
XLogFileName(xlogfname, recvFileTLI, recvSegNo);
XLogArchiveForceDone(xlogfname);
@@ -920,9 +927,9 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
if (lseek(recvFile, (off_t) startoff, SEEK_SET) < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not seek in log segment %s, to offset %u: %m",
- XLogFileNameP(recvFileTLI, recvSegNo),
- startoff)));
+ errmsg("could not seek in log segment %s, to offset %u: %m",
+ XLogFileNameP(recvFileTLI, recvSegNo),
+ startoff)));
recvOff = startoff;
}
@@ -1110,7 +1117,7 @@ XLogWalRcvSendHSFeedback(bool immed)
* Send feedback at most once per wal_receiver_status_interval.
*/
if (!TimestampDifferenceExceeds(sendTime, now,
- wal_receiver_status_interval * 1000))
+ wal_receiver_status_interval * 1000))
return;
sendTime = now;
}
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 1dcb0f57f44..717cbfd61c6 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -94,12 +94,13 @@ bool am_cascading_walsender = false; /* Am I cascading WAL to
/* User-settable parameters for walsender */
int max_wal_senders = 0; /* the maximum number of concurrent walsenders */
-int wal_sender_timeout = 60 * 1000; /* maximum time to send one
+int wal_sender_timeout = 60 * 1000; /* maximum time to send one
* WAL data message */
+
/*
* State for WalSndWakeupRequest
*/
-bool wake_wal_senders = false;
+bool wake_wal_senders = false;
/*
* These variables are used similarly to openLogFile/Id/Seg/Off,
@@ -110,7 +111,7 @@ static XLogSegNo sendSegNo = 0;
static uint32 sendOff = 0;
/* Timeline ID of the currently open file */
-static TimeLineID curFileTimeLine = 0;
+static TimeLineID curFileTimeLine = 0;
/*
* These variables keep track of the state of the timeline we're currently
@@ -118,10 +119,10 @@ static TimeLineID curFileTimeLine = 0;
* the timeline is not the latest timeline on this server, and the server's
* history forked off from that timeline at sendTimeLineValidUpto.
*/
-static TimeLineID sendTimeLine = 0;
-static TimeLineID sendTimeLineNextTLI = 0;
-static bool sendTimeLineIsHistoric = false;
-static XLogRecPtr sendTimeLineValidUpto = InvalidXLogRecPtr;
+static TimeLineID sendTimeLine = 0;
+static TimeLineID sendTimeLineNextTLI = 0;
+static bool sendTimeLineIsHistoric = false;
+static XLogRecPtr sendTimeLineValidUpto = InvalidXLogRecPtr;
/*
* How far have we sent WAL already? This is also advertised in
@@ -138,8 +139,9 @@ static StringInfoData tmpbuf;
* Timestamp of the last receipt of the reply from the standby.
*/
static TimestampTz last_reply_timestamp;
+
/* Have we sent a heartbeat message asking for reply, since last reply? */
-static bool ping_sent = false;
+static bool ping_sent = false;
/*
* While streaming WAL in Copy mode, streamingDoneSending is set to true
@@ -147,8 +149,8 @@ static bool ping_sent = false;
* after that. streamingDoneReceiving is set to true when we receive CopyDone
* from the other end. When both become true, it's time to exit Copy mode.
*/
-static bool streamingDoneSending;
-static bool streamingDoneReceiving;
+static bool streamingDoneSending;
+static bool streamingDoneReceiving;
/* Flags set by signal handlers for later service in main loop */
static volatile sig_atomic_t got_SIGHUP = false;
@@ -322,8 +324,8 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd)
off_t bytesleft;
/*
- * Reply with a result set with one row, and two columns. The first col
- * is the name of the history file, 2nd is the contents.
+ * Reply with a result set with one row, and two columns. The first col is
+ * the name of the history file, 2nd is the contents.
*/
TLHistoryFileName(histfname, cmd->timeline);
@@ -343,7 +345,7 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd)
pq_sendint(&buf, 0, 2); /* format code */
/* second field */
- pq_sendstring(&buf, "content"); /* col name */
+ pq_sendstring(&buf, "content"); /* col name */
pq_sendint(&buf, 0, 4); /* table oid */
pq_sendint(&buf, 0, 2); /* attnum */
pq_sendint(&buf, BYTEAOID, 4); /* type oid */
@@ -355,7 +357,7 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd)
/* Send a DataRow message */
pq_beginmessage(&buf, 'D');
pq_sendint(&buf, 2, 2); /* # of columns */
- pq_sendint(&buf, strlen(histfname), 4); /* col1 len */
+ pq_sendint(&buf, strlen(histfname), 4); /* col1 len */
pq_sendbytes(&buf, histfname, strlen(histfname));
fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0666);
@@ -373,15 +375,15 @@ SendTimeLineHistory(TimeLineHistoryCmd *cmd)
if (lseek(fd, 0, SEEK_SET) != 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not seek to beginning of file \"%s\": %m", path)));
+ errmsg("could not seek to beginning of file \"%s\": %m", path)));
pq_sendint(&buf, histfilelen, 4); /* col2 len */
bytesleft = histfilelen;
while (bytesleft > 0)
{
- char rbuf[BLCKSZ];
- int nread;
+ char rbuf[BLCKSZ];
+ int nread;
nread = read(fd, rbuf, sizeof(rbuf));
if (nread <= 0)
@@ -407,7 +409,7 @@ static void
StartReplication(StartReplicationCmd *cmd)
{
StringInfoData buf;
- XLogRecPtr FlushPtr;
+ XLogRecPtr FlushPtr;
/*
* We assume here that we're logging enough information in the WAL for
@@ -420,8 +422,8 @@ StartReplication(StartReplicationCmd *cmd)
/*
* Select the timeline. If it was given explicitly by the client, use
- * that. Otherwise use the timeline of the last replayed record, which
- * is kept in ThisTimeLineID.
+ * that. Otherwise use the timeline of the last replayed record, which is
+ * kept in ThisTimeLineID.
*/
if (am_cascading_walsender)
{
@@ -448,8 +450,8 @@ StartReplication(StartReplicationCmd *cmd)
sendTimeLineIsHistoric = true;
/*
- * Check that the timeline the client requested for exists, and the
- * requested start location is on that timeline.
+ * Check that the timeline the client requested for exists, and
+ * the requested start location is on that timeline.
*/
timeLineHistory = readTimeLineHistory(ThisTimeLineID);
switchpoint = tliSwitchPoint(cmd->timeline, timeLineHistory,
@@ -461,14 +463,14 @@ StartReplication(StartReplicationCmd *cmd)
* requested startpoint is on that timeline in our history.
*
* This is quite loose on purpose. We only check that we didn't
- * fork off the requested timeline before the switchpoint. We don't
- * check that we switched *to* it before the requested starting
- * point. This is because the client can legitimately request to
- * start replication from the beginning of the WAL segment that
- * contains switchpoint, but on the new timeline, so that it
- * doesn't end up with a partial segment. If you ask for a too old
- * starting point, you'll get an error later when we fail to find
- * the requested WAL segment in pg_xlog.
+ * fork off the requested timeline before the switchpoint. We
+ * don't check that we switched *to* it before the requested
+ * starting point. This is because the client can legitimately
+ * request to start replication from the beginning of the WAL
+ * segment that contains switchpoint, but on the new timeline, so
+ * that it doesn't end up with a partial segment. If you ask for a
+ * too old starting point, you'll get an error later when we fail
+ * to find the requested WAL segment in pg_xlog.
*
* XXX: we could be more strict here and only allow a startpoint
* that's older than the switchpoint, if it it's still in the same
@@ -503,12 +505,13 @@ StartReplication(StartReplicationCmd *cmd)
if (!sendTimeLineIsHistoric || cmd->startpoint < sendTimeLineValidUpto)
{
/*
- * When we first start replication the standby will be behind the primary.
- * For some applications, for example, synchronous replication, it is
- * important to have a clear state for this initial catchup mode, so we
- * can trigger actions when we change streaming state later. We may stay
- * in this state for a long time, which is exactly why we want to be able
- * to monitor whether or not we are still here.
+ * When we first start replication the standby will be behind the
+ * primary. For some applications, for example, synchronous
+ * replication, it is important to have a clear state for this initial
+ * catchup mode, so we can trigger actions when we change streaming
+ * state later. We may stay in this state for a long time, which is
+ * exactly why we want to be able to monitor whether or not we are
+ * still here.
*/
WalSndSetState(WALSNDSTATE_CATCHUP);
@@ -568,20 +571,21 @@ StartReplication(StartReplicationCmd *cmd)
if (sendTimeLineIsHistoric)
{
char tli_str[11];
- char startpos_str[8+1+8+1];
+ char startpos_str[8 + 1 + 8 + 1];
snprintf(tli_str, sizeof(tli_str), "%u", sendTimeLineNextTLI);
snprintf(startpos_str, sizeof(startpos_str), "%X/%X",
(uint32) (sendTimeLineValidUpto >> 32),
(uint32) sendTimeLineValidUpto);
- pq_beginmessage(&buf, 'T'); /* RowDescription */
- pq_sendint(&buf, 2, 2); /* 2 fields */
+ pq_beginmessage(&buf, 'T'); /* RowDescription */
+ pq_sendint(&buf, 2, 2); /* 2 fields */
/* Field header */
pq_sendstring(&buf, "next_tli");
- pq_sendint(&buf, 0, 4); /* table oid */
- pq_sendint(&buf, 0, 2); /* attnum */
+ pq_sendint(&buf, 0, 4); /* table oid */
+ pq_sendint(&buf, 0, 2); /* attnum */
+
/*
* int8 may seem like a surprising data type for this, but in theory
* int4 would not be wide enough for this, as TimeLineID is unsigned.
@@ -592,8 +596,8 @@ StartReplication(StartReplicationCmd *cmd)
pq_sendint(&buf, 0, 2);
pq_sendstring(&buf, "next_tli_startpos");
- pq_sendint(&buf, 0, 4); /* table oid */
- pq_sendint(&buf, 0, 2); /* attnum */
+ pq_sendint(&buf, 0, 4); /* table oid */
+ pq_sendint(&buf, 0, 2); /* attnum */
pq_sendint(&buf, TEXTOID, 4); /* type oid */
pq_sendint(&buf, -1, 2);
pq_sendint(&buf, 0, 4);
@@ -602,12 +606,12 @@ StartReplication(StartReplicationCmd *cmd)
/* Data row */
pq_beginmessage(&buf, 'D');
- pq_sendint(&buf, 2, 2); /* number of columns */
+ pq_sendint(&buf, 2, 2); /* number of columns */
pq_sendint(&buf, strlen(tli_str), 4); /* length */
pq_sendbytes(&buf, tli_str, strlen(tli_str));
- pq_sendint(&buf, strlen(startpos_str), 4); /* length */
+ pq_sendint(&buf, strlen(startpos_str), 4); /* length */
pq_sendbytes(&buf, startpos_str, strlen(startpos_str));
pq_endmessage(&buf);
@@ -840,7 +844,7 @@ ProcessStandbyReplyMessage(void)
writePtr = pq_getmsgint64(&reply_message);
flushPtr = pq_getmsgint64(&reply_message);
applyPtr = pq_getmsgint64(&reply_message);
- (void) pq_getmsgint64(&reply_message); /* sendTime; not used ATM */
+ (void) pq_getmsgint64(&reply_message); /* sendTime; not used ATM */
replyRequested = pq_getmsgbyte(&reply_message);
elog(DEBUG2, "write %X/%X flush %X/%X apply %X/%X%s",
@@ -887,7 +891,7 @@ ProcessStandbyHSFeedbackMessage(void)
* Decipher the reply message. The caller already consumed the msgtype
* byte.
*/
- (void) pq_getmsgint64(&reply_message); /* sendTime; not used ATM */
+ (void) pq_getmsgint64(&reply_message); /* sendTime; not used ATM */
feedbackXmin = pq_getmsgint(&reply_message, 4);
feedbackEpoch = pq_getmsgint(&reply_message, 4);
@@ -932,11 +936,11 @@ ProcessStandbyHSFeedbackMessage(void)
* cleanup conflicts on the standby server.
*
* There is a small window for a race condition here: although we just
- * checked that feedbackXmin precedes nextXid, the nextXid could have gotten
- * advanced between our fetching it and applying the xmin below, perhaps
- * far enough to make feedbackXmin wrap around. In that case the xmin we
- * set here would be "in the future" and have no effect. No point in
- * worrying about this since it's too late to save the desired data
+ * checked that feedbackXmin precedes nextXid, the nextXid could have
+ * gotten advanced between our fetching it and applying the xmin below,
+ * perhaps far enough to make feedbackXmin wrap around. In that case the
+ * xmin we set here would be "in the future" and have no effect. No point
+ * in worrying about this since it's too late to save the desired data
* anyway. Assuming that the standby sends us an increasing sequence of
* xmins, this could only happen during the first reply cycle, else our
* own xmin would prevent nextXid from advancing so far.
@@ -969,8 +973,8 @@ WalSndLoop(void)
ping_sent = false;
/*
- * Loop until we reach the end of this timeline or the client requests
- * to stop streaming.
+ * Loop until we reach the end of this timeline or the client requests to
+ * stop streaming.
*/
for (;;)
{
@@ -1082,8 +1086,8 @@ WalSndLoop(void)
{
/*
* If half of wal_sender_timeout has lapsed without receiving
- * any reply from standby, send a keep-alive message to standby
- * requesting an immediate reply.
+ * any reply from standby, send a keep-alive message to
+ * standby requesting an immediate reply.
*/
timeout = TimestampTzPlusMilliseconds(last_reply_timestamp,
wal_sender_timeout / 2);
@@ -1133,6 +1137,7 @@ WalSndLoop(void)
return;
send_failure:
+
/*
* Get here on send failure. Clean up and exit.
*
@@ -1290,7 +1295,7 @@ retry:
curFileTimeLine = sendTimeLine;
if (sendTimeLineIsHistoric)
{
- XLogSegNo endSegNo;
+ XLogSegNo endSegNo;
XLByteToSeg(sendTimeLineValidUpto, endSegNo);
if (sendSegNo == endSegNo)
@@ -1311,7 +1316,7 @@ retry:
ereport(ERROR,
(errcode_for_file_access(),
errmsg("requested WAL segment %s has already been removed",
- XLogFileNameP(curFileTimeLine, sendSegNo))));
+ XLogFileNameP(curFileTimeLine, sendSegNo))));
else
ereport(ERROR,
(errcode_for_file_access(),
@@ -1327,9 +1332,9 @@ retry:
if (lseek(sendFile, (off_t) startoff, SEEK_SET) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not seek in log segment %s to offset %u: %m",
- XLogFileNameP(curFileTimeLine, sendSegNo),
- startoff)));
+ errmsg("could not seek in log segment %s to offset %u: %m",
+ XLogFileNameP(curFileTimeLine, sendSegNo),
+ startoff)));
sendOff = startoff;
}
@@ -1344,9 +1349,9 @@ retry:
{
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from log segment %s, offset %u, length %lu: %m",
- XLogFileNameP(curFileTimeLine, sendSegNo),
- sendOff, (unsigned long) segbytes)));
+ errmsg("could not read from log segment %s, offset %u, length %lu: %m",
+ XLogFileNameP(curFileTimeLine, sendSegNo),
+ sendOff, (unsigned long) segbytes)));
}
/* Update state for read */
@@ -1431,16 +1436,16 @@ XLogSend(bool *caughtup)
/*
* Streaming the latest timeline on a standby.
*
- * Attempt to send all WAL that has already been replayed, so that
- * we know it's valid. If we're receiving WAL through streaming
+ * Attempt to send all WAL that has already been replayed, so that we
+ * know it's valid. If we're receiving WAL through streaming
* replication, it's also OK to send any WAL that has been received
* but not replayed.
*
* The timeline we're recovering from can change, or we can be
- * promoted. In either case, the current timeline becomes historic.
- * We need to detect that so that we don't try to stream past the
- * point where we switched to another timeline. We check for promotion
- * or timeline switch after calculating FlushPtr, to avoid a race
+ * promoted. In either case, the current timeline becomes historic. We
+ * need to detect that so that we don't try to stream past the point
+ * where we switched to another timeline. We check for promotion or
+ * timeline switch after calculating FlushPtr, to avoid a race
* condition: if the timeline becomes historic just after we checked
* that it was still current, it's still be OK to stream it up to the
* FlushPtr that was calculated before it became historic.
@@ -1496,7 +1501,7 @@ XLogSend(bool *caughtup)
*
* Attempt to send all data that's already been written out and
* fsync'd to disk. We cannot go further than what's been written out
- * given the current implementation of XLogRead(). And in any case
+ * given the current implementation of XLogRead(). And in any case
* it's unsafe to send WAL that is not securely down to disk on the
* master: if the master subsequently crashes and restarts, slaves
* must not have applied any WAL that gets lost on the master.
@@ -1509,13 +1514,14 @@ XLogSend(bool *caughtup)
* forked to the next timeline, stop streaming.
*
* Note: We might already have sent WAL > sendTimeLineValidUpto. The
- * startup process will normally replay all WAL that has been received from
- * the master, before promoting, but if the WAL streaming is terminated at
- * a WAL page boundary, the valid portion of the timeline might end in the
- * middle of a WAL record. We might've already sent the first half of that
- * partial WAL record to the cascading standby, so that sentPtr >
- * sendTimeLineValidUpto. That's OK; the cascading standby can't replay the
- * partial WAL record either, so it can still follow our timeline switch.
+ * startup process will normally replay all WAL that has been received
+ * from the master, before promoting, but if the WAL streaming is
+ * terminated at a WAL page boundary, the valid portion of the timeline
+ * might end in the middle of a WAL record. We might've already sent the
+ * first half of that partial WAL record to the cascading standby, so that
+ * sentPtr > sendTimeLineValidUpto. That's OK; the cascading standby can't
+ * replay the partial WAL record either, so it can still follow our
+ * timeline switch.
*/
if (sendTimeLineIsHistoric && sendTimeLineValidUpto <= sentPtr)
{
@@ -1585,8 +1591,8 @@ XLogSend(bool *caughtup)
pq_sendbyte(&output_message, 'w');
pq_sendint64(&output_message, startptr); /* dataStart */
- pq_sendint64(&output_message, SendRqstPtr); /* walEnd */
- pq_sendint64(&output_message, 0); /* sendtime, filled in last */
+ pq_sendint64(&output_message, SendRqstPtr); /* walEnd */
+ pq_sendint64(&output_message, 0); /* sendtime, filled in last */
/*
* Read the log directly into the output buffer to avoid extra memcpy
@@ -1643,16 +1649,16 @@ XLogSend(bool *caughtup)
static XLogRecPtr
GetStandbyFlushRecPtr(void)
{
- XLogRecPtr replayPtr;
- TimeLineID replayTLI;
- XLogRecPtr receivePtr;
- TimeLineID receiveTLI;
+ XLogRecPtr replayPtr;
+ TimeLineID replayTLI;
+ XLogRecPtr receivePtr;
+ TimeLineID receiveTLI;
XLogRecPtr result;
/*
* We can safely send what's already been replayed. Also, if walreceiver
- * is streaming WAL from the same timeline, we can send anything that
- * it has streamed, but hasn't been replayed yet.
+ * is streaming WAL from the same timeline, we can send anything that it
+ * has streamed, but hasn't been replayed yet.
*/
receivePtr = GetWalRcvWriteRecPtr(NULL, &receiveTLI);
@@ -1742,8 +1748,8 @@ WalSndSignals(void)
pqsignal(SIGHUP, WalSndSigHupHandler); /* set flag to read config
* file */
pqsignal(SIGINT, SIG_IGN); /* not used */
- pqsignal(SIGTERM, die); /* request shutdown */
- pqsignal(SIGQUIT, quickdie); /* hard crash time */
+ pqsignal(SIGTERM, die); /* request shutdown */
+ pqsignal(SIGQUIT, quickdie); /* hard crash time */
InitializeTimeouts(); /* establishes SIGALRM handler */
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, WalSndXLogSendHandler); /* request WAL sending */
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index cb59f139e14..fb576219627 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -241,7 +241,7 @@ DefineQueryRewrite(char *rulename,
ListCell *l;
Query *query;
bool RelisBecomingView = false;
- Oid ruleId = InvalidOid;
+ Oid ruleId = InvalidOid;
/*
* If we are installing an ON SELECT rule, we had better grab
@@ -517,11 +517,11 @@ DefineQueryRewrite(char *rulename,
* If the relation is becoming a view:
* - delete the associated storage files
* - get rid of any system attributes in pg_attribute; a view shouldn't
- * have any of those
+ * have any of those
* - remove the toast table; there is no need for it anymore, and its
- * presence would make vacuum slightly more complicated
+ * presence would make vacuum slightly more complicated
* - set relkind to RELKIND_VIEW, and adjust other pg_class fields
- * to be appropriate for a view
+ * to be appropriate for a view
*
* NB: we had better have AccessExclusiveLock to do this ...
* ---------------------------------------------------------------------
@@ -541,9 +541,9 @@ DefineQueryRewrite(char *rulename,
DeleteSystemAttributeTuples(event_relid);
/*
- * Drop the toast table if any. (This won't take care of updating
- * the toast fields in the relation's own pg_class entry; we handle
- * that below.)
+ * Drop the toast table if any. (This won't take care of updating the
+ * toast fields in the relation's own pg_class entry; we handle that
+ * below.)
*/
if (OidIsValid(toastrelid))
{
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 83f26e3f42e..01875fcd45f 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -56,7 +56,7 @@ static void rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation,
static void rewriteTargetListUD(Query *parsetree, RangeTblEntry *target_rte,
Relation target_relation);
static void markQueryForLocking(Query *qry, Node *jtnode,
- LockClauseStrength strength, bool noWait, bool pushedDown);
+ LockClauseStrength strength, bool noWait, bool pushedDown);
static List *matchLocks(CmdType event, RuleLock *rulelocks,
int varno, Query *parsetree);
static Query *fireRIRrules(Query *parsetree, List *activeRIRs,
@@ -131,9 +131,9 @@ AcquireRewriteLocks(Query *parsetree, bool forUpdatePushedDown)
*
* If the relation is the query's result relation, then we
* need RowExclusiveLock. Otherwise, check to see if the
- * relation is accessed FOR [KEY] UPDATE/SHARE or not. We can't
- * just grab AccessShareLock because then the executor would
- * be trying to upgrade the lock, leading to possible
+ * relation is accessed FOR [KEY] UPDATE/SHARE or not. We
+ * can't just grab AccessShareLock because then the executor
+ * would be trying to upgrade the lock, leading to possible
* deadlocks.
*/
if (rt_index == parsetree->resultRelation)
@@ -1375,8 +1375,8 @@ ApplyRetrieveRule(Query *parsetree,
}
/*
- * If FOR [KEY] UPDATE/SHARE of view, be sure we get right initial lock on the
- * relations it references.
+ * If FOR [KEY] UPDATE/SHARE of view, be sure we get right initial lock on
+ * the relations it references.
*/
rc = get_parse_rowmark(parsetree, rt_index);
forUpdatePushedDown |= (rc != NULL);
@@ -1423,9 +1423,9 @@ ApplyRetrieveRule(Query *parsetree,
rte->modifiedCols = NULL;
/*
- * If FOR [KEY] UPDATE/SHARE of view, mark all the contained tables as implicit
- * FOR [KEY] UPDATE/SHARE, the same as the parser would have done if the view's
- * subquery had been written out explicitly.
+ * If FOR [KEY] UPDATE/SHARE of view, mark all the contained tables as
+ * implicit FOR [KEY] UPDATE/SHARE, the same as the parser would have done
+ * if the view's subquery had been written out explicitly.
*
* Note: we don't consider forUpdatePushedDown here; such marks will be
* made by recursing from the upper level in markQueryForLocking.
@@ -2089,9 +2089,9 @@ relation_is_updatable(Oid reloid, int req_events)
/*
* If the relation doesn't exist, say "false" rather than throwing an
- * error. This is helpful since scanning an information_schema view
- * under MVCC rules can result in referencing rels that were just
- * deleted according to a SnapshotNow probe.
+ * error. This is helpful since scanning an information_schema view under
+ * MVCC rules can result in referencing rels that were just deleted
+ * according to a SnapshotNow probe.
*/
if (rel == NULL)
return false;
@@ -2378,7 +2378,7 @@ rewriteTargetView(Query *parsetree, Relation view)
* that does not correspond to what happens in ordinary SELECT usage of a
* view: all referenced columns must have read permission, even if
* optimization finds that some of them can be discarded during query
- * transformation. The flattening we're doing here is an optional
+ * transformation. The flattening we're doing here is an optional
* optimization, too. (If you are unpersuaded and want to change this,
* note that applying adjust_view_column_set to view_rte->selectedCols is
* clearly *not* the right answer, since that neglects base-rel columns
@@ -2680,10 +2680,10 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
parsetree = rewriteTargetView(parsetree, rt_entry_relation);
/*
- * At this point product_queries contains any DO ALSO rule actions.
- * Add the rewritten query before or after those. This must match
- * the handling the original query would have gotten below, if
- * we allowed it to be included again.
+ * At this point product_queries contains any DO ALSO rule
+ * actions. Add the rewritten query before or after those. This
+ * must match the handling the original query would have gotten
+ * below, if we allowed it to be included again.
*/
if (parsetree->commandType == CMD_INSERT)
product_queries = lcons(parsetree, product_queries);
@@ -2701,43 +2701,43 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
returning = true;
}
- /*
- * If we got any product queries, recursively rewrite them --- but
- * first check for recursion!
- */
- if (product_queries != NIL)
- {
- ListCell *n;
- rewrite_event *rev;
+ /*
+ * If we got any product queries, recursively rewrite them --- but
+ * first check for recursion!
+ */
+ if (product_queries != NIL)
+ {
+ ListCell *n;
+ rewrite_event *rev;
- foreach(n, rewrite_events)
- {
- rev = (rewrite_event *) lfirst(n);
- if (rev->relation == RelationGetRelid(rt_entry_relation) &&
- rev->event == event)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("infinite recursion detected in rules for relation \"%s\"",
+ foreach(n, rewrite_events)
+ {
+ rev = (rewrite_event *) lfirst(n);
+ if (rev->relation == RelationGetRelid(rt_entry_relation) &&
+ rev->event == event)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("infinite recursion detected in rules for relation \"%s\"",
RelationGetRelationName(rt_entry_relation))));
- }
-
- rev = (rewrite_event *) palloc(sizeof(rewrite_event));
- rev->relation = RelationGetRelid(rt_entry_relation);
- rev->event = event;
- rewrite_events = lcons(rev, rewrite_events);
+ }
- foreach(n, product_queries)
- {
- Query *pt = (Query *) lfirst(n);
- List *newstuff;
+ rev = (rewrite_event *) palloc(sizeof(rewrite_event));
+ rev->relation = RelationGetRelid(rt_entry_relation);
+ rev->event = event;
+ rewrite_events = lcons(rev, rewrite_events);
- newstuff = RewriteQuery(pt, rewrite_events);
- rewritten = list_concat(rewritten, newstuff);
- }
+ foreach(n, product_queries)
+ {
+ Query *pt = (Query *) lfirst(n);
+ List *newstuff;
- rewrite_events = list_delete_first(rewrite_events);
+ newstuff = RewriteQuery(pt, rewrite_events);
+ rewritten = list_concat(rewritten, newstuff);
}
+ rewrite_events = list_delete_first(rewrite_events);
+ }
+
/*
* If there is an INSTEAD, and the original query has a RETURNING, we
* have to have found a RETURNING in the rule(s), else fail. (Because
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 9c83614e141..6ea91f5b211 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -1221,7 +1221,7 @@ replace_rte_variables_mutator(Node *node,
* If the expression tree contains a whole-row Var for the target RTE,
* the Var is not changed but *found_whole_row is returned as TRUE.
* For most callers this is an error condition, but we leave it to the caller
- * to report the error so that useful context can be provided. (In some
+ * to report the error so that useful context can be provided. (In some
* usages it would be appropriate to modify the Var's vartype and insert a
* ConvertRowtypeExpr node to map back to the original vartype. We might
* someday extend this function's API to support that. For now, the only
@@ -1235,10 +1235,10 @@ replace_rte_variables_mutator(Node *node,
typedef struct
{
- int target_varno; /* RTE index to search for */
- int sublevels_up; /* (current) nesting depth */
+ int target_varno; /* RTE index to search for */
+ int sublevels_up; /* (current) nesting depth */
const AttrNumber *attno_map; /* map array for user attnos */
- int map_length; /* number of entries in attno_map[] */
+ int map_length; /* number of entries in attno_map[] */
bool *found_whole_row; /* output flag */
} map_variable_attnos_context;
@@ -1256,8 +1256,8 @@ map_variable_attnos_mutator(Node *node,
var->varlevelsup == context->sublevels_up)
{
/* Found a matching variable, make the substitution */
- Var *newvar = (Var *) palloc(sizeof(Var));
- int attno = var->varattno;
+ Var *newvar = (Var *) palloc(sizeof(Var));
+ int attno = var->varattno;
*newvar = *var;
if (attno > 0)
@@ -1406,13 +1406,14 @@ ReplaceVarsFromTargetList_callback(Var *var,
return (Node *) var;
case REPLACEVARS_SUBSTITUTE_NULL:
+
/*
* If Var is of domain type, we should add a CoerceToDomain
* node, in case there is a NOT NULL domain constraint.
*/
return coerce_to_domain((Node *) makeNullConst(var->vartype,
var->vartypmod,
- var->varcollid),
+ var->varcollid),
InvalidOid, -1,
var->vartype,
COERCE_IMPLICIT_CAST,
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 1c414281ae5..43eb7d59f46 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -110,7 +110,7 @@ static volatile BufferDesc *BufferAlloc(SMgrRelation smgr,
bool *foundPtr);
static void FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln);
static void AtProcExit_Buffers(int code, Datum arg);
-static int rnode_comparator(const void *p1, const void *p2);
+static int rnode_comparator(const void *p1, const void *p2);
/*
@@ -476,9 +476,9 @@ ReadBuffer_common(SMgrRelation smgr, char relpersistence, ForkNumber forkNum,
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("invalid page in block %u of relation %s",
- blockNum,
- relpath(smgr->smgr_rnode, forkNum))));
+ errmsg("invalid page in block %u of relation %s",
+ blockNum,
+ relpath(smgr->smgr_rnode, forkNum))));
}
}
}
@@ -1220,7 +1220,8 @@ BufferSync(int flags)
/*
* Unless this is a shutdown checkpoint, we write only permanent, dirty
- * buffers. But at shutdown or end of recovery, we write all dirty buffers.
+ * buffers. But at shutdown or end of recovery, we write all dirty
+ * buffers.
*/
if (!((flags & CHECKPOINT_IS_SHUTDOWN) || (flags & CHECKPOINT_END_OF_RECOVERY)))
mask |= BM_PERMANENT;
@@ -1918,7 +1919,7 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
instr_time io_start,
io_time;
Block bufBlock;
- char *bufToWrite;
+ char *bufToWrite;
/*
* Acquire the buffer's io_in_progress lock. If StartBufferIO returns
@@ -1964,14 +1965,14 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
* However, this rule does not apply to unlogged relations, which will be
* lost after a crash anyway. Most unlogged relation pages do not bear
* LSNs since we never emit WAL records for them, and therefore flushing
- * up through the buffer LSN would be useless, but harmless. However, GiST
- * indexes use LSNs internally to track page-splits, and therefore unlogged
- * GiST pages bear "fake" LSNs generated by GetFakeLSNForUnloggedRel. It
- * is unlikely but possible that the fake LSN counter could advance past
- * the WAL insertion point; and if it did happen, attempting to flush WAL
- * through that location would fail, with disastrous system-wide
- * consequences. To make sure that can't happen, skip the flush if the
- * buffer isn't permanent.
+ * up through the buffer LSN would be useless, but harmless. However,
+ * GiST indexes use LSNs internally to track page-splits, and therefore
+ * unlogged GiST pages bear "fake" LSNs generated by
+ * GetFakeLSNForUnloggedRel. It is unlikely but possible that the fake
+ * LSN counter could advance past the WAL insertion point; and if it did
+ * happen, attempting to flush WAL through that location would fail, with
+ * disastrous system-wide consequences. To make sure that can't happen,
+ * skip the flush if the buffer isn't permanent.
*/
if (buf->flags & BM_PERMANENT)
XLogFlush(recptr);
@@ -2076,8 +2077,8 @@ XLogRecPtr
BufferGetLSNAtomic(Buffer buffer)
{
volatile BufferDesc *bufHdr = &BufferDescriptors[buffer - 1];
- char *page = BufferGetPage(buffer);
- XLogRecPtr lsn;
+ char *page = BufferGetPage(buffer);
+ XLogRecPtr lsn;
/*
* If we don't need locking for correctness, fastpath out.
@@ -2181,7 +2182,7 @@ DropRelFileNodeBuffers(RelFileNodeBackend rnode, ForkNumber forkNum,
void
DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
{
- int i,
+ int i,
n = 0;
RelFileNode *nodes;
bool use_bsearch;
@@ -2189,7 +2190,7 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
if (nnodes == 0)
return;
- nodes = palloc(sizeof(RelFileNode) * nnodes); /* non-local relations */
+ nodes = palloc(sizeof(RelFileNode) * nnodes); /* non-local relations */
/* If it's a local relation, it's localbuf.c's problem. */
for (i = 0; i < nnodes; i++)
@@ -2204,8 +2205,8 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
}
/*
- * If there are no non-local relations, then we're done. Release the memory
- * and return.
+ * If there are no non-local relations, then we're done. Release the
+ * memory and return.
*/
if (n == 0)
{
@@ -2215,8 +2216,8 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
/*
* For low number of relations to drop just use a simple walk through, to
- * save the bsearch overhead. The threshold to use is rather a guess than a
- * exactly determined value, as it depends on many factors (CPU and RAM
+ * save the bsearch overhead. The threshold to use is rather a guess than
+ * a exactly determined value, as it depends on many factors (CPU and RAM
* speeds, amount of shared buffers etc.).
*/
use_bsearch = n > DROP_RELS_BSEARCH_THRESHOLD;
@@ -2237,7 +2238,7 @@ DropRelFileNodesAllBuffers(RelFileNodeBackend *rnodes, int nnodes)
if (!use_bsearch)
{
- int j;
+ int j;
for (j = 0; j < n; j++)
{
@@ -2397,8 +2398,8 @@ FlushRelationBuffers(Relation rel)
if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
(bufHdr->flags & BM_VALID) && (bufHdr->flags & BM_DIRTY))
{
- ErrorContextCallback errcallback;
- Page localpage;
+ ErrorContextCallback errcallback;
+ Page localpage;
localpage = (char *) LocalBufHdrGetBlock(bufHdr);
@@ -2575,17 +2576,17 @@ IncrBufferRefCount(Buffer buffer)
* This is essentially the same as MarkBufferDirty, except:
*
* 1. The caller does not write WAL; so if checksums are enabled, we may need
- * to write an XLOG_HINT WAL record to protect against torn pages.
+ * to write an XLOG_HINT WAL record to protect against torn pages.
* 2. The caller might have only share-lock instead of exclusive-lock on the
- * buffer's content lock.
+ * buffer's content lock.
* 3. This function does not guarantee that the buffer is always marked dirty
- * (due to a race condition), so it cannot be used for important changes.
+ * (due to a race condition), so it cannot be used for important changes.
*/
void
MarkBufferDirtyHint(Buffer buffer)
{
volatile BufferDesc *bufHdr;
- Page page = BufferGetPage(buffer);
+ Page page = BufferGetPage(buffer);
if (!BufferIsValid(buffer))
elog(ERROR, "bad buffer ID: %d", buffer);
@@ -2605,13 +2606,13 @@ MarkBufferDirtyHint(Buffer buffer)
/*
* This routine might get called many times on the same page, if we are
* making the first scan after commit of an xact that added/deleted many
- * tuples. So, be as quick as we can if the buffer is already dirty. We do
- * this by not acquiring spinlock if it looks like the status bits are
+ * tuples. So, be as quick as we can if the buffer is already dirty. We
+ * do this by not acquiring spinlock if it looks like the status bits are
* already set. Since we make this test unlocked, there's a chance we
* might fail to notice that the flags have just been cleared, and failed
* to reset them, due to memory-ordering issues. But since this function
- * is only intended to be used in cases where failing to write out the data
- * would be harmless anyway, it doesn't really matter.
+ * is only intended to be used in cases where failing to write out the
+ * data would be harmless anyway, it doesn't really matter.
*/
if ((bufHdr->flags & (BM_DIRTY | BM_JUST_DIRTIED)) !=
(BM_DIRTY | BM_JUST_DIRTIED))
@@ -2622,21 +2623,20 @@ MarkBufferDirtyHint(Buffer buffer)
/*
* If checksums are enabled, and the buffer is permanent, then a full
- * page image may be required even for some hint bit updates to protect
- * against torn pages. This full page image is only necessary if the
- * hint bit update is the first change to the page since the last
- * checkpoint.
+ * page image may be required even for some hint bit updates to
+ * protect against torn pages. This full page image is only necessary
+ * if the hint bit update is the first change to the page since the
+ * last checkpoint.
*
- * We don't check full_page_writes here because that logic is
- * included when we call XLogInsert() since the value changes
- * dynamically.
+ * We don't check full_page_writes here because that logic is included
+ * when we call XLogInsert() since the value changes dynamically.
*/
if (DataChecksumsEnabled() && (bufHdr->flags & BM_PERMANENT))
{
/*
* If we're in recovery we cannot dirty a page because of a hint.
- * We can set the hint, just not dirty the page as a result so
- * the hint is lost when we evict the page or shutdown.
+ * We can set the hint, just not dirty the page as a result so the
+ * hint is lost when we evict the page or shutdown.
*
* See src/backend/storage/page/README for longer discussion.
*/
@@ -2646,21 +2646,21 @@ MarkBufferDirtyHint(Buffer buffer)
/*
* If the block is already dirty because we either made a change
* or set a hint already, then we don't need to write a full page
- * image. Note that aggressive cleaning of blocks
- * dirtied by hint bit setting would increase the call rate.
- * Bulk setting of hint bits would reduce the call rate...
+ * image. Note that aggressive cleaning of blocks dirtied by hint
+ * bit setting would increase the call rate. Bulk setting of hint
+ * bits would reduce the call rate...
*
* We must issue the WAL record before we mark the buffer dirty.
- * Otherwise we might write the page before we write the WAL.
- * That causes a race condition, since a checkpoint might occur
- * between writing the WAL record and marking the buffer dirty.
- * We solve that with a kluge, but one that is already in use
- * during transaction commit to prevent race conditions.
- * Basically, we simply prevent the checkpoint WAL record from
- * being written until we have marked the buffer dirty. We don't
- * start the checkpoint flush until we have marked dirty, so our
- * checkpoint must flush the change to disk successfully or the
- * checkpoint never gets written, so crash recovery will fix.
+ * Otherwise we might write the page before we write the WAL. That
+ * causes a race condition, since a checkpoint might occur between
+ * writing the WAL record and marking the buffer dirty. We solve
+ * that with a kluge, but one that is already in use during
+ * transaction commit to prevent race conditions. Basically, we
+ * simply prevent the checkpoint WAL record from being written
+ * until we have marked the buffer dirty. We don't start the
+ * checkpoint flush until we have marked dirty, so our checkpoint
+ * must flush the change to disk successfully or the checkpoint
+ * never gets written, so crash recovery will fix.
*
* It's possible we may enter here without an xid, so it is
* essential that CreateCheckpoint waits for virtual transactions
@@ -2677,13 +2677,13 @@ MarkBufferDirtyHint(Buffer buffer)
dirtied = true; /* Means "will be dirtied by this action" */
/*
- * Set the page LSN if we wrote a backup block. We aren't
- * supposed to set this when only holding a share lock but
- * as long as we serialise it somehow we're OK. We choose to
- * set LSN while holding the buffer header lock, which causes
- * any reader of an LSN who holds only a share lock to also
- * obtain a buffer header lock before using PageGetLSN(),
- * which is enforced in BufferGetLSNAtomic().
+ * Set the page LSN if we wrote a backup block. We aren't supposed
+ * to set this when only holding a share lock but as long as we
+ * serialise it somehow we're OK. We choose to set LSN while
+ * holding the buffer header lock, which causes any reader of an
+ * LSN who holds only a share lock to also obtain a buffer header
+ * lock before using PageGetLSN(), which is enforced in
+ * BufferGetLSNAtomic().
*
* If checksums are enabled, you might think we should reset the
* checksum here. That will happen when the page is written
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index c67271a4bdf..44eecee3cac 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -196,8 +196,8 @@ LocalBufferAlloc(SMgrRelation smgr, ForkNumber forkNum, BlockNumber blockNum,
*/
if (bufHdr->flags & BM_DIRTY)
{
- SMgrRelation oreln;
- Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
+ SMgrRelation oreln;
+ Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
/* Find smgr relation for buffer */
oreln = smgropen(bufHdr->tag.rnode, MyBackendId);
@@ -509,7 +509,7 @@ AtEOXact_LocalBuffers(bool isCommit)
{
if (LocalRefCount[i] != 0)
{
- Buffer b = -i - 1;
+ Buffer b = -i - 1;
PrintBufferLeakWarning(b);
RefCountErrors++;
@@ -541,7 +541,7 @@ AtProcExit_LocalBuffers(void)
{
if (LocalRefCount[i] != 0)
{
- Buffer b = -i - 1;
+ Buffer b = -i - 1;
PrintBufferLeakWarning(b);
RefCountErrors++;
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 4308128c7fd..8605fe76707 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -400,7 +400,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- pgxact->delayChkpt = false; /* be sure this is cleared in abort */
+ pgxact->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
/* Clear the subtransaction-XID cache too while holding the lock */
@@ -427,7 +427,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- pgxact->delayChkpt = false; /* be sure this is cleared in abort */
+ pgxact->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
Assert(pgxact->nxids == 0);
@@ -1429,11 +1429,11 @@ GetSnapshotData(Snapshot snapshot)
* depending upon when the snapshot was taken, or change normal
* snapshot processing so it matches.
*
- * Note: It is possible for recovery to end before we finish taking the
- * snapshot, and for newly assigned transaction ids to be added to the
- * ProcArray. xmax cannot change while we hold ProcArrayLock, so those
- * newly added transaction ids would be filtered away, so we need not
- * be concerned about them.
+ * Note: It is possible for recovery to end before we finish taking
+ * the snapshot, and for newly assigned transaction ids to be added to
+ * the ProcArray. xmax cannot change while we hold ProcArrayLock, so
+ * those newly added transaction ids would be filtered away, so we
+ * need not be concerned about them.
*/
subcount = KnownAssignedXidsGetAndSetXmin(snapshot->subxip, &xmin,
xmax);
@@ -1688,8 +1688,8 @@ GetRunningTransactionData(void)
/*
* Top-level XID of a transaction is always less than any of
- * its subxids, so we don't need to check if any of the subxids
- * are smaller than oldestRunningXid
+ * its subxids, so we don't need to check if any of the
+ * subxids are smaller than oldestRunningXid
*/
}
}
@@ -1811,9 +1811,9 @@ GetVirtualXIDsDelayingChkpt(int *nvxids)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
if (pgxact->delayChkpt)
{
@@ -1853,9 +1853,9 @@ HaveVirtualXIDsDelayingChkpt(VirtualTransactionId *vxids, int nvxids)
{
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
VirtualTransactionId vxid;
GET_VXID_FROM_PGPROC(vxid, *proc);
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index fcf08f42b36..615278b8ca2 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -443,10 +443,10 @@ ResolveRecoveryConflictWithBufferPin(void)
ProcWaitForSignal();
/*
- * Clear any timeout requests established above. We assume here that
- * the Startup process doesn't have any other timeouts than what this
- * function uses. If that stops being true, we could cancel the
- * timeouts individually, but that'd be slower.
+ * Clear any timeout requests established above. We assume here that the
+ * Startup process doesn't have any other timeouts than what this function
+ * uses. If that stops being true, we could cancel the timeouts
+ * individually, but that'd be slower.
*/
disable_all_timeouts(false);
}
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index f73c4ef5a56..8cd871f4b40 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -1210,7 +1210,7 @@ SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
static void
RemoveLocalLock(LOCALLOCK *locallock)
{
- int i;
+ int i;
for (i = locallock->numLockOwners - 1; i >= 0; i--)
{
@@ -1988,7 +1988,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
/* If session lock is above array position 0, move it down to 0 */
- for (i = 0; i < locallock->numLockOwners ; i++)
+ for (i = 0; i < locallock->numLockOwners; i++)
{
if (lockOwners[i].owner == NULL)
lockOwners[0] = lockOwners[i];
@@ -2214,7 +2214,7 @@ LockReleaseCurrentOwner(LOCALLOCK **locallocks, int nlocks)
}
else
{
- int i;
+ int i;
for (i = nlocks - 1; i >= 0; i--)
ReleaseLockIfHeld(locallocks[i], false);
@@ -2313,7 +2313,7 @@ LockReassignCurrentOwner(LOCALLOCK **locallocks, int nlocks)
}
else
{
- int i;
+ int i;
for (i = nlocks - 1; i >= 0; i--)
LockReassignOwner(locallocks[i], parent);
@@ -2333,8 +2333,8 @@ LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
int ip = -1;
/*
- * Scan to see if there are any locks belonging to current owner or
- * its parent
+ * Scan to see if there are any locks belonging to current owner or its
+ * parent
*/
lockOwners = locallock->lockOwners;
for (i = locallock->numLockOwners - 1; i >= 0; i--)
@@ -2346,7 +2346,7 @@ LockReassignOwner(LOCALLOCK *locallock, ResourceOwner parent)
}
if (ic < 0)
- return; /* no current locks */
+ return; /* no current locks */
if (ip < 0)
{
@@ -2690,9 +2690,9 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
LWLockAcquire(proc->backendLock, LW_SHARED);
/*
- * If the target backend isn't referencing the same database as the
- * lock, then we needn't examine the individual relation IDs at
- * all; none of them can be relevant.
+ * If the target backend isn't referencing the same database as
+ * the lock, then we needn't examine the individual relation IDs
+ * at all; none of them can be relevant.
*
* See FastPathTransferLocks() for discussion of why we do this
* test after acquiring the lock.
@@ -3158,15 +3158,15 @@ PostPrepare_Locks(TransactionId xid)
/*
* We cannot simply modify proclock->tag.myProc to reassign
* ownership of the lock, because that's part of the hash key and
- * the proclock would then be in the wrong hash chain. Instead
+ * the proclock would then be in the wrong hash chain. Instead
* use hash_update_hash_key. (We used to create a new hash entry,
* but that risks out-of-memory failure if other processes are
- * busy making proclocks too.) We must unlink the proclock from
+ * busy making proclocks too.) We must unlink the proclock from
* our procLink chain and put it into the new proc's chain, too.
*
* Note: the updated proclock hash key will still belong to the
- * same hash partition, cf proclock_hash(). So the partition
- * lock we already hold is sufficient for this.
+ * same hash partition, cf proclock_hash(). So the partition lock
+ * we already hold is sufficient for this.
*/
SHMQueueDelete(&proclock->procLink);
@@ -3177,9 +3177,9 @@ PostPrepare_Locks(TransactionId xid)
proclocktag.myProc = newproc;
/*
- * Update the proclock. We should not find any existing entry
- * for the same hash key, since there can be only one entry for
- * any given lock with my own proc.
+ * Update the proclock. We should not find any existing entry for
+ * the same hash key, since there can be only one entry for any
+ * given lock with my own proc.
*/
if (!hash_update_hash_key(LockMethodProcLockHash,
(void *) proclock,
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 6029cfb78e3..b012df1c5d9 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -1575,8 +1575,8 @@ GetSerializableTransactionSnapshot(Snapshot snapshot)
/*
* Can't use serializable mode while recovery is still active, as it is,
- * for example, on a hot standby. We could get here despite the check
- * in check_XactIsoLevel() if default_transaction_isolation is set to
+ * for example, on a hot standby. We could get here despite the check in
+ * check_XactIsoLevel() if default_transaction_isolation is set to
* serializable, so phrase the hint accordingly.
*/
if (RecoveryInProgress())
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 5809a797986..6d72a637f74 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -186,8 +186,8 @@ InitProcGlobal(void)
* five separate consumers: (1) normal backends, (2) autovacuum workers
* and the autovacuum launcher, (3) background workers, (4) auxiliary
* processes, and (5) prepared transactions. Each PGPROC structure is
- * dedicated to exactly one of these purposes, and they do not move between
- * groups.
+ * dedicated to exactly one of these purposes, and they do not move
+ * between groups.
*/
procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
ProcGlobal->allProcs = procs;
@@ -291,7 +291,7 @@ InitProcess(void)
elog(ERROR, "you already exist");
/*
- * Initialize process-local latch support. This could fail if the kernel
+ * Initialize process-local latch support. This could fail if the kernel
* is low on resources, and if so we want to exit cleanly before acquiring
* any shared-memory resources.
*/
@@ -476,7 +476,7 @@ InitAuxiliaryProcess(void)
elog(ERROR, "you already exist");
/*
- * Initialize process-local latch support. This could fail if the kernel
+ * Initialize process-local latch support. This could fail if the kernel
* is low on resources, and if so we want to exit cleanly before acquiring
* any shared-memory resources.
*/
@@ -1153,25 +1153,25 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
{
int pid = autovac->pid;
StringInfoData locktagbuf;
- StringInfoData logbuf; /* errdetail for server log */
+ StringInfoData logbuf; /* errdetail for server log */
initStringInfo(&locktagbuf);
initStringInfo(&logbuf);
DescribeLockTag(&locktagbuf, &lock->tag);
appendStringInfo(&logbuf,
- _("Process %d waits for %s on %s."),
- MyProcPid,
- GetLockmodeName(lock->tag.locktag_lockmethodid,
- lockmode),
- locktagbuf.data);
+ _("Process %d waits for %s on %s."),
+ MyProcPid,
+ GetLockmodeName(lock->tag.locktag_lockmethodid,
+ lockmode),
+ locktagbuf.data);
/* release lock as quickly as possible */
LWLockRelease(ProcArrayLock);
ereport(LOG,
- (errmsg("sending cancel to blocking autovacuum PID %d",
- pid),
- errdetail_log("%s", logbuf.data)));
+ (errmsg("sending cancel to blocking autovacuum PID %d",
+ pid),
+ errdetail_log("%s", logbuf.data)));
pfree(logbuf.data);
pfree(locktagbuf.data);
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index 3d7a8f36a9c..5503925788e 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -51,7 +51,7 @@ SpinlockSemas(void)
int
SpinlockSemas(void)
{
- int nsemas;
+ int nsemas;
/*
* It would be cleaner to distribute this logic into the affected modules,
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index f0e365379a4..a5594bde64e 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -18,9 +18,9 @@
#include "access/xlog.h"
#include "storage/checksum.h"
-bool ignore_checksum_failure = false;
+bool ignore_checksum_failure = false;
-static char pageCopyData[BLCKSZ]; /* for checksum calculation */
+static char pageCopyData[BLCKSZ]; /* for checksum calculation */
static Page pageCopy = pageCopyData;
static uint16 PageCalcChecksum16(Page page, BlockNumber blkno);
@@ -101,16 +101,16 @@ PageIsVerified(Page page, BlockNumber blkno)
}
/*
- * The following checks don't prove the header is correct,
- * only that it looks sane enough to allow into the buffer pool.
- * Later usage of the block can still reveal problems,
- * which is why we offer the checksum option.
+ * The following checks don't prove the header is correct, only that
+ * it looks sane enough to allow into the buffer pool. Later usage of
+ * the block can still reveal problems, which is why we offer the
+ * checksum option.
*/
if ((p->pd_flags & ~PD_VALID_FLAG_BITS) == 0 &&
- p->pd_lower <= p->pd_upper &&
- p->pd_upper <= p->pd_special &&
- p->pd_special <= BLCKSZ &&
- p->pd_special == MAXALIGN(p->pd_special))
+ p->pd_lower <= p->pd_upper &&
+ p->pd_upper <= p->pd_special &&
+ p->pd_special <= BLCKSZ &&
+ p->pd_special == MAXALIGN(p->pd_special))
header_sane = true;
if (header_sane && !checksum_failure)
@@ -905,10 +905,10 @@ PageSetChecksumCopy(Page page, BlockNumber blkno)
/*
* We make a copy iff we need to calculate a checksum because other
- * backends may set hint bits on this page while we write, which
- * would mean the checksum differs from the page contents. It doesn't
- * matter if we include or exclude hints during the copy, as long
- * as we write a valid page and associated checksum.
+ * backends may set hint bits on this page while we write, which would
+ * mean the checksum differs from the page contents. It doesn't matter if
+ * we include or exclude hints during the copy, as long as we write a
+ * valid page and associated checksum.
*/
memcpy((char *) pageCopy, (char *) page, BLCKSZ);
PageSetChecksumInplace(pageCopy, blkno);
@@ -931,6 +931,7 @@ PageSetChecksumInplace(Page page, BlockNumber blkno)
if (DataChecksumsEnabled())
{
PageHeader p = (PageHeader) page;
+
p->pd_checksum = PageCalcChecksum16(page, blkno);
}
@@ -949,7 +950,7 @@ PageSetChecksumInplace(Page page, BlockNumber blkno)
static uint16
PageCalcChecksum16(Page page, BlockNumber blkno)
{
- PageHeader phdr = (PageHeader) page;
+ PageHeader phdr = (PageHeader) page;
uint16 save_checksum;
uint32 checksum;
@@ -958,9 +959,8 @@ PageCalcChecksum16(Page page, BlockNumber blkno)
/*
* Save pd_checksum and set it to zero, so that the checksum calculation
- * isn't affected by the checksum stored on the page. We do this to
- * allow optimization of the checksum calculation on the whole block
- * in one go.
+ * isn't affected by the checksum stored on the page. We do this to allow
+ * optimization of the checksum calculation on the whole block in one go.
*/
save_checksum = phdr->pd_checksum;
phdr->pd_checksum = 0;
diff --git a/src/backend/storage/page/checksum.c b/src/backend/storage/page/checksum.c
index d9348ee3c29..41c8ae784de 100644
--- a/src/backend/storage/page/checksum.c
+++ b/src/backend/storage/page/checksum.c
@@ -23,7 +23,7 @@
* for Fowler/Noll/Vo) The primitive of a plain FNV-1a hash folds in data 1
* byte at a time according to the formula:
*
- * hash = (hash ^ value) * FNV_PRIME
+ * hash = (hash ^ value) * FNV_PRIME
*
* FNV-1a algorithm is described at http://www.isthe.com/chongo/tech/comp/fnv/
*
@@ -36,7 +36,7 @@
* avalanche into lower positions. For performance reasons we choose to combine
* 4 bytes at a time. The actual hash formula used as the basis is:
*
- * hash = (hash ^ value) * FNV_PRIME ^ ((hash ^ value) >> 17)
+ * hash = (hash ^ value) * FNV_PRIME ^ ((hash ^ value) >> 17)
*
* The main bottleneck in this calculation is the multiplication latency. To
* hide the latency and to make use of SIMD parallelism multiple hash values
@@ -131,19 +131,20 @@ static const uint32 checksumBaseOffsets[N_SUMS] = {
uint32
checksum_block(char *data, uint32 size)
{
- uint32 sums[N_SUMS];
- uint32 (*dataArr)[N_SUMS] = (uint32 (*)[N_SUMS]) data;
- uint32 result = 0;
- int i, j;
+ uint32 sums[N_SUMS];
+ uint32 (*dataArr)[N_SUMS] = (uint32 (*)[N_SUMS]) data;
+ uint32 result = 0;
+ int i,
+ j;
/* ensure that the size is compatible with the algorithm */
- Assert((size % (sizeof(uint32)*N_SUMS)) == 0);
+ Assert((size % (sizeof(uint32) * N_SUMS)) == 0);
/* initialize partial checksums to their corresponding offsets */
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets));
/* main checksum calculation */
- for (i = 0; i < size/sizeof(uint32)/N_SUMS; i++)
+ for (i = 0; i < size / sizeof(uint32) / N_SUMS; i++)
for (j = 0; j < N_SUMS; j++)
CHECKSUM_COMP(sums[j], dataArr[i][j]);
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 3aa6325481f..f7f1437dd8f 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -435,16 +435,16 @@ smgrdounlink(SMgrRelation reln, bool isRedo)
void
smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
{
- int i = 0;
+ int i = 0;
RelFileNodeBackend *rnodes;
- ForkNumber forknum;
+ ForkNumber forknum;
if (nrels == 0)
return;
/*
- * create an array which contains all relations to be dropped, and
- * close each relation's forks at the smgr level while at it
+ * create an array which contains all relations to be dropped, and close
+ * each relation's forks at the smgr level while at it
*/
rnodes = palloc(sizeof(RelFileNodeBackend) * nrels);
for (i = 0; i < nrels; i++)
@@ -460,14 +460,14 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
}
/*
- * Get rid of any remaining buffers for the relations. bufmgr will just
+ * Get rid of any remaining buffers for the relations. bufmgr will just
* drop them without bothering to write the contents.
*/
DropRelFileNodesAllBuffers(rnodes, nrels);
/*
- * It'd be nice to tell the stats collector to forget them immediately, too.
- * But we can't because we don't know the OIDs.
+ * It'd be nice to tell the stats collector to forget them immediately,
+ * too. But we can't because we don't know the OIDs.
*/
/*
@@ -475,8 +475,8 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
* dangling smgr references they may have for these rels. We should do
* this before starting the actual unlinking, in case we fail partway
* through that step. Note that the sinval messages will eventually come
- * back to this backend, too, and thereby provide a backstop that we closed
- * our own smgr rel.
+ * back to this backend, too, and thereby provide a backstop that we
+ * closed our own smgr rel.
*/
for (i = 0; i < nrels; i++)
CacheInvalidateSmgr(rnodes[i]);
@@ -491,7 +491,8 @@ smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
for (i = 0; i < nrels; i++)
{
- int which = rels[i]->smgr_which;
+ int which = rels[i]->smgr_which;
+
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
(*(smgrsw[which].smgr_unlink)) (rnodes[i], forknum, isRedo);
}
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index f0783031808..31ea31304b4 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -3622,7 +3622,7 @@ PostgresMain(int argc, char *argv[],
pqsignal(SIGQUIT, quickdie); /* hard crash time */
else
pqsignal(SIGQUIT, die); /* cancel current query and exit */
- InitializeTimeouts(); /* establishes SIGALRM handler */
+ InitializeTimeouts(); /* establishes SIGALRM handler */
/*
* Ignore failure to write to frontend. Note: if frontend closes
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 2c3156a2e94..f8989f7a9e9 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -1184,7 +1184,7 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
ProcessUtility(utilityStmt,
portal->sourceText,
- isTopLevel ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY,
+ isTopLevel ? PROCESS_UTILITY_TOPLEVEL : PROCESS_UTILITY_QUERY,
portal->portalParams,
dest,
completionTag);
diff --git a/src/backend/tsearch/ts_selfuncs.c b/src/backend/tsearch/ts_selfuncs.c
index b3fcadc1674..8fb8875b66d 100644
--- a/src/backend/tsearch/ts_selfuncs.c
+++ b/src/backend/tsearch/ts_selfuncs.c
@@ -319,7 +319,7 @@ tsquery_opr_selec(QueryItem *item, char *operand,
* exclusive. We treat occurrences as independent events.
*
* This is only a good plan if we have a pretty fair number of
- * MCELEMs available; we set the threshold at 100. If no stats or
+ * MCELEMs available; we set the threshold at 100. If no stats or
* insufficient stats, arbitrarily use DEFAULT_TS_MATCH_SEL*4.
*/
if (lookup == NULL || length < 100)
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index fca47d2e257..ae7bb8a8b81 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -388,8 +388,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* If element type is pass-by-reference, we must copy it into
- * palloc'd space, so that we can release the array below.
- * (We do this so that the space needed for element values is
+ * palloc'd space, so that we can release the array below. (We
+ * do this so that the space needed for element values is
* limited by the size of the hashtable; if we kept all the
* array values around, it could be much more.)
*/
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index f53a0d248a6..1d61d5c7c8d 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -5187,7 +5187,7 @@ array_unnest(PG_FUNCTION_ARGS)
*
* Find all array entries matching (not distinct from) search/search_isnull,
* and delete them if remove is true, else replace them with
- * replace/replace_isnull. Comparisons are done using the specified
+ * replace/replace_isnull. Comparisons are done using the specified
* collation. fcinfo is passed only for caching purposes.
*/
static ArrayType *
@@ -5250,8 +5250,8 @@ array_replace_internal(ArrayType *array,
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
fcinfo->flinfo->fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
@@ -5259,7 +5259,7 @@ array_replace_internal(ArrayType *array,
typalign = typentry->typalign;
/*
- * Detoast values if they are toasted. The replacement value must be
+ * Detoast values if they are toasted. The replacement value must be
* detoasted for insertion into the result array, while detoasting the
* search value only once saves cycles.
*/
@@ -5370,8 +5370,8 @@ array_replace_internal(ArrayType *array,
if (!AllocSizeIsValid(nbytes))
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("array size exceeds the maximum allowed (%d)",
- (int) MaxAllocSize)));
+ errmsg("array size exceeds the maximum allowed (%d)",
+ (int) MaxAllocSize)));
}
nresult++;
}
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 5dd27c4d650..8677520cb6f 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -2699,8 +2699,8 @@ timetz_izone(PG_FUNCTION_ARGS)
if (zone->month != 0 || zone->day != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not include months or days",
- DatumGetCString(DirectFunctionCall1(interval_out,
+ errmsg("interval time zone \"%s\" must not include months or days",
+ DatumGetCString(DirectFunctionCall1(interval_out,
PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 59805047b20..7a08b9279d9 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -945,6 +945,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
break;
case DTK_TIME:
+
/*
* This might be an ISO time following a "t" field.
*/
@@ -2180,7 +2181,7 @@ DecodeDate(char *str, int fmask, int *tmask, bool *is2digits,
str++;
if (*str == '\0')
- return DTERR_BAD_FORMAT; /* end of string after separator */
+ return DTERR_BAD_FORMAT; /* end of string after separator */
field[nf] = str;
if (isdigit((unsigned char) *str))
@@ -2894,7 +2895,7 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * Check for signed hh:mm or hh:mm:ss. If so, process exactly
+ * Check for signed hh:mm or hh:mm:ss. If so, process exactly
* like DTK_TIME case above, plus handling the sign.
*/
if (strchr(field[i] + 1, ':') != NULL &&
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 81e3329ef60..7b854062f0d 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1045,7 +1045,6 @@ suff_search(char *str, KeySuffix *suf, int type)
static void
NUMDesc_prepare(NUMDesc *num, FormatNode *n)
{
-
if (n->type != NODE_TYPE_ACTION)
return;
@@ -2535,7 +2534,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
strcpy(s, str_toupper_z(localized_full_months[tm->tm_mon - 1], collid));
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
- asc_toupper_z(months_full[tm->tm_mon - 1]));
+ asc_toupper_z(months_full[tm->tm_mon - 1]));
s += strlen(s);
break;
case DCH_Month:
@@ -3561,17 +3560,17 @@ do_to_timestamp(text *date_txt, text *fmt,
}
else
/* find century year for dates ending in "00" */
- tm->tm_year = tmfc.cc * 100 + ((tmfc.cc >= 0) ? 0 : 1);
+ tm->tm_year = tmfc.cc * 100 + ((tmfc.cc >= 0) ? 0 : 1);
}
else
- /* If a 4-digit year is provided, we use that and ignore CC. */
+ /* If a 4-digit year is provided, we use that and ignore CC. */
{
tm->tm_year = tmfc.year;
if (tmfc.bc && tm->tm_year > 0)
tm->tm_year = -(tm->tm_year - 1);
}
}
- else if (tmfc.cc) /* use first year of century */
+ else if (tmfc.cc) /* use first year of century */
{
if (tmfc.bc)
tmfc.cc = -tmfc.cc;
@@ -3606,7 +3605,7 @@ do_to_timestamp(text *date_txt, text *fmt,
if (tmfc.w)
tmfc.dd = (tmfc.w - 1) * 7 + 1;
if (tmfc.d)
- tm->tm_wday = tmfc.d - 1; /* convert to native numbering */
+ tm->tm_wday = tmfc.d - 1; /* convert to native numbering */
if (tmfc.dd)
tm->tm_mday = tmfc.dd;
if (tmfc.ddd)
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 507c91ff97b..aaf99bddf27 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -46,7 +46,7 @@ typedef enum /* contexts of JSON parser */
JSON_PARSE_OBJECT_NEXT, /* saw object value, expecting ',' or '}' */
JSON_PARSE_OBJECT_COMMA, /* saw object ',', expecting next label */
JSON_PARSE_END /* saw the end of a document, expect nothing */
-} JsonParseContext;
+} JsonParseContext;
static inline void json_lex(JsonLexContext *lex);
static inline void json_lex_string(JsonLexContext *lex);
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 03378a3ea9b..dd625a4e47f 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -96,7 +96,7 @@ typedef enum
JSON_SEARCH_OBJECT = 1,
JSON_SEARCH_ARRAY,
JSON_SEARCH_PATH
-} JsonSearch;
+} JsonSearch;
/* state for json_object_keys */
typedef struct okeysState
@@ -682,10 +682,10 @@ get_array_start(void *state)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot extract field from a non-object")));
- /*
- * initialize array count for this nesting level
- * Note: the lex_level seen by array_start is one less than that seen by
- * the elements of the array.
+
+ /*
+ * initialize array count for this nesting level Note: the lex_level seen
+ * by array_start is one less than that seen by the elements of the array.
*/
if (_state->search_type == JSON_SEARCH_PATH &&
lex_level < _state->npath)
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 4e38d7c06c2..829ce59888c 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -95,11 +95,11 @@ pg_signal_backend(int pid, int sig)
/*
* BackendPidGetProc returns NULL if the pid isn't valid; but by the time
- * we reach kill(), a process for which we get a valid proc here might have
- * terminated on its own. There's no way to acquire a lock on an arbitrary
- * process to prevent that. But since so far all the callers of this
- * mechanism involve some request for ending the process anyway, that it
- * might end on its own first is not a problem.
+ * we reach kill(), a process for which we get a valid proc here might
+ * have terminated on its own. There's no way to acquire a lock on an
+ * arbitrary process to prevent that. But since so far all the callers of
+ * this mechanism involve some request for ending the process anyway, that
+ * it might end on its own first is not a problem.
*/
if (proc == NULL)
{
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index b343b5fe0f6..b4d639428ac 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -3402,7 +3402,7 @@ init_var_from_num(Numeric num, NumericVar *dest)
dest->sign = NUMERIC_SIGN(num);
dest->dscale = NUMERIC_DSCALE(num);
dest->digits = NUMERIC_DIGITS(num);
- dest->buf = NULL; /* digits array is not palloc'd */
+ dest->buf = NULL; /* digits array is not palloc'd */
}
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 890aa198167..7081b00500b 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -718,13 +718,13 @@ cache_locale_time(void)
* Convert a Windows setlocale() argument to a Unix-style one.
*
* Regardless of platform, we install message catalogs under a Unix-style
- * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
+ * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
* following that style will elicit localized interface strings.
*
* Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C"
* (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>],
* case-insensitive. setlocale() returns the fully-qualified form; for
- * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
+ * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
* setlocale() and _create_locale() select a "locale identifier"[1] and store
* it in an undocumented _locale_t field. From that LCID, we can retrieve the
* ISO 639 language and the ISO 3166 country. Character encoding does not
@@ -735,12 +735,12 @@ cache_locale_time(void)
* Studio 2012, setlocale() accepts locale names in addition to the strings it
* accepted historically. It does not standardize them; setlocale("Th-tH")
* returns "Th-tH". setlocale(category, "") still returns a traditional
- * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
+ * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
* content to carry locale names instead of locale identifiers.
*
* MinGW headers declare _create_locale(), but msvcrt.dll lacks that symbol.
* IsoLocaleName() always fails in a MinGW-built postgres.exe, so only
- * Unix-style values of the lc_messages GUC can elicit localized messages. In
+ * Unix-style values of the lc_messages GUC can elicit localized messages. In
* particular, every lc_messages setting that initdb can select automatically
* will yield only C-locale messages. XXX This could be fixed by running the
* fully-qualified locale name through a lookup table.
@@ -784,7 +784,7 @@ IsoLocaleName(const char *winlocname)
* need not standardize letter case here. So long as we do not ship
* message catalogs for which it would matter, we also need not
* translate the script/variant portion, e.g. uz-Cyrl-UZ to
- * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
+ * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
*
* Note that the locale name can be less-specific than the value we
* would derive under earlier Visual Studio releases. For example,
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index fe9d18d0f44..04650d8ba4a 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -293,7 +293,7 @@ trigger_out(PG_FUNCTION_ARGS)
/*
- * event_trigger_in - input routine for pseudo-type event_trigger.
+ * event_trigger_in - input routine for pseudo-type event_trigger.
*/
Datum
event_trigger_in(PG_FUNCTION_ARGS)
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 84a4aca16c0..cd5c5f6621c 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -737,7 +737,7 @@ bounds_adjacent(TypeCacheEntry *typcache, RangeBound boundA, RangeBound boundB)
cmp = range_cmp_bound_values(typcache, &boundA, &boundB);
if (cmp < 0)
{
- RangeType *r;
+ RangeType *r;
/*
* Bounds do not overlap; see if there are points in between.
@@ -764,7 +764,7 @@ bounds_adjacent(TypeCacheEntry *typcache, RangeBound boundA, RangeBound boundB)
else if (cmp == 0)
return boundA.inclusive != boundB.inclusive;
else
- return false; /* bounds overlap */
+ return false; /* bounds overlap */
}
/* adjacent to (but not overlapping)? (internal version) */
@@ -1877,7 +1877,7 @@ range_parse_flags(const char *flags_str)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid range bound flags"),
- errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
+ errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
switch (flags_str[0])
{
@@ -1890,7 +1890,7 @@ range_parse_flags(const char *flags_str)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid range bound flags"),
- errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
+ errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
}
switch (flags_str[1])
@@ -1904,7 +1904,7 @@ range_parse_flags(const char *flags_str)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid range bound flags"),
- errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
+ errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\".")));
}
return flags;
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index ea1251a5e65..464b37fe1fd 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -677,6 +677,7 @@ range_gist_same(PG_FUNCTION_ARGS)
else
{
TypeCacheEntry *typcache;
+
typcache = range_get_typcache(fcinfo, RangeTypeGetOid(r1));
*result = range_eq_internal(typcache, r1, r2);
@@ -781,36 +782,36 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
return (!range_overright_internal(typcache, key,
- DatumGetRangeType(query)));
+ DatumGetRangeType(query)));
case RANGESTRAT_OVERLEFT:
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
return (!range_after_internal(typcache, key,
- DatumGetRangeType(query)));
+ DatumGetRangeType(query)));
case RANGESTRAT_OVERLAPS:
return range_overlaps_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_OVERRIGHT:
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
return (!range_before_internal(typcache, key,
- DatumGetRangeType(query)));
+ DatumGetRangeType(query)));
case RANGESTRAT_AFTER:
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
return (!range_overleft_internal(typcache, key,
- DatumGetRangeType(query)));
+ DatumGetRangeType(query)));
case RANGESTRAT_ADJACENT:
if (RangeIsEmpty(key) || RangeIsEmpty(DatumGetRangeType(query)))
return false;
if (range_adjacent_internal(typcache, key,
- DatumGetRangeType(query)))
+ DatumGetRangeType(query)))
return true;
return range_overlaps_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINS:
return range_contains_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINED_BY:
/*
@@ -821,7 +822,7 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
if (RangeIsOrContainsEmpty(key))
return true;
return range_overlaps_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINS_ELEM:
return range_contains_elem_internal(typcache, key, query);
case RANGESTRAT_EQ:
@@ -833,10 +834,10 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
if (RangeIsEmpty(DatumGetRangeType(query)))
return RangeIsOrContainsEmpty(key);
return range_contains_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
default:
elog(ERROR, "unrecognized range strategy: %d", strategy);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
}
@@ -851,35 +852,35 @@ range_gist_consistent_leaf(TypeCacheEntry *typcache, StrategyNumber strategy,
{
case RANGESTRAT_BEFORE:
return range_before_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_OVERLEFT:
return range_overleft_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_OVERLAPS:
return range_overlaps_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_OVERRIGHT:
return range_overright_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_AFTER:
return range_after_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_ADJACENT:
return range_adjacent_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINS:
return range_contains_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINED_BY:
return range_contained_by_internal(typcache, key,
- DatumGetRangeType(query));
+ DatumGetRangeType(query));
case RANGESTRAT_CONTAINS_ELEM:
return range_contains_elem_internal(typcache, key, query);
case RANGESTRAT_EQ:
return range_eq_internal(typcache, key, DatumGetRangeType(query));
default:
elog(ERROR, "unrecognized range strategy: %d", strategy);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
}
diff --git a/src/backend/utils/adt/rangetypes_selfuncs.c b/src/backend/utils/adt/rangetypes_selfuncs.c
index c450c6a1580..074d326b121 100644
--- a/src/backend/utils/adt/rangetypes_selfuncs.c
+++ b/src/backend/utils/adt/rangetypes_selfuncs.c
@@ -42,19 +42,19 @@ static float8 get_position(TypeCacheEntry *typcache, RangeBound *value,
RangeBound *hist1, RangeBound *hist2);
static float8 get_len_position(double value, double hist1, double hist2);
static float8 get_distance(TypeCacheEntry *typcache, RangeBound *bound1,
- RangeBound *bound2);
+ RangeBound *bound2);
static int length_hist_bsearch(Datum *length_hist_values,
int length_hist_nvalues, double value, bool equal);
static double calc_length_hist_frac(Datum *length_hist_values,
- int length_hist_nvalues, double length1, double length2, bool equal);
+ int length_hist_nvalues, double length1, double length2, bool equal);
static double calc_hist_selectivity_contained(TypeCacheEntry *typcache,
RangeBound *lower, RangeBound *upper,
RangeBound *hist_lower, int hist_nvalues,
- Datum *length_hist_values, int length_hist_nvalues);
+ Datum *length_hist_values, int length_hist_nvalues);
static double calc_hist_selectivity_contains(TypeCacheEntry *typcache,
RangeBound *lower, RangeBound *upper,
RangeBound *hist_lower, int hist_nvalues,
- Datum *length_hist_values, int length_hist_nvalues);
+ Datum *length_hist_values, int length_hist_nvalues);
/*
* Returns a default selectivity estimate for given operator, when we don't
@@ -73,6 +73,7 @@ default_range_selectivity(Oid operator)
return 0.005;
case OID_RANGE_CONTAINS_ELEM_OP:
+
/*
* "range @> elem" is more or less identical to a scalar
* inequality "A >= b AND A <= c".
@@ -162,8 +163,8 @@ rangesel(PG_FUNCTION_ARGS)
*
* If the operator is "range @> element", the constant should be of the
* element type of the range column. Convert it to a range that includes
- * only that single point, so that we don't need special handling for
- * that in what follows.
+ * only that single point, so that we don't need special handling for that
+ * in what follows.
*/
if (operator == OID_RANGE_CONTAINS_ELEM_OP)
{
@@ -171,7 +172,9 @@ rangesel(PG_FUNCTION_ARGS)
if (((Const *) other)->consttype == typcache->rngelemtype->type_id)
{
- RangeBound lower, upper;
+ RangeBound lower,
+ upper;
+
lower.inclusive = true;
lower.val = ((Const *) other)->constvalue;
lower.infinite = false;
@@ -193,8 +196,8 @@ rangesel(PG_FUNCTION_ARGS)
/*
* If we got a valid constant on one side of the operator, proceed to
- * estimate using statistics. Otherwise punt and return a default
- * constant estimate.
+ * estimate using statistics. Otherwise punt and return a default constant
+ * estimate.
*/
if (constrange)
selec = calc_rangesel(typcache, &vardata, constrange, operator);
@@ -214,7 +217,8 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
{
double hist_selec;
double selec;
- float4 empty_frac, null_frac;
+ float4 empty_frac,
+ null_frac;
/*
* First look up the fraction of NULLs and empty ranges from pg_statistic.
@@ -231,13 +235,13 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
/* Try to get fraction of empty ranges */
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
- STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM, InvalidOid,
+ STATISTIC_KIND_RANGE_LENGTH_HISTOGRAM, InvalidOid,
NULL,
NULL, NULL,
&numbers, &nnumbers))
{
if (nnumbers != 1)
- elog(ERROR, "invalid empty fraction statistic"); /* shouldn't happen */
+ elog(ERROR, "invalid empty fraction statistic"); /* shouldn't happen */
empty_frac = numbers[0];
}
else
@@ -250,8 +254,8 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
{
/*
* No stats are available. Follow through the calculations below
- * anyway, assuming no NULLs and no empty ranges. This still allows
- * us to give a better-than-nothing estimate based on whether the
+ * anyway, assuming no NULLs and no empty ranges. This still allows us
+ * to give a better-than-nothing estimate based on whether the
* constant is an empty range or not.
*/
null_frac = 0.0;
@@ -278,6 +282,7 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
case OID_RANGE_CONTAINED_OP:
case OID_RANGE_LESS_EQUAL_OP:
case OID_RANGE_GREATER_EQUAL_OP:
+
/*
* these return true when both args are empty, false if only
* one is empty
@@ -293,7 +298,7 @@ calc_rangesel(TypeCacheEntry *typcache, VariableStatData *vardata,
case OID_RANGE_CONTAINS_ELEM_OP:
default:
elog(ERROR, "unexpected operator %u", operator);
- selec = 0.0; /* keep compiler quiet */
+ selec = 0.0; /* keep compiler quiet */
break;
}
}
@@ -406,7 +411,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
/* Extract the bounds of the constant value. */
range_deserialize(typcache, constval, &const_lower, &const_upper, &empty);
- Assert (!empty);
+ Assert(!empty);
/*
* Calculate selectivity comparing the lower or upper bound of the
@@ -415,6 +420,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
switch (operator)
{
case OID_RANGE_LESS_OP:
+
/*
* The regular b-tree comparison operators (<, <=, >, >=) compare
* the lower bounds first, and the upper bounds for values with
@@ -476,11 +482,13 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
case OID_RANGE_OVERLAP_OP:
case OID_RANGE_CONTAINS_ELEM_OP:
+
/*
* A && B <=> NOT (A << B OR A >> B).
*
- * Since A << B and A >> B are mutually exclusive events we can sum
- * their probabilities to find probability of (A << B OR A >> B).
+ * Since A << B and A >> B are mutually exclusive events we can
+ * sum their probabilities to find probability of (A << B OR A >>
+ * B).
*
* "range @> elem" is equivalent to "range && [elem,elem]". The
* caller already constructed the singular range from the element
@@ -491,15 +499,15 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
nhist, false);
hist_selec +=
(1.0 - calc_hist_selectivity_scalar(typcache, &const_upper, hist_lower,
- nhist, true));
+ nhist, true));
hist_selec = 1.0 - hist_selec;
break;
case OID_RANGE_CONTAINS_OP:
hist_selec =
calc_hist_selectivity_contains(typcache, &const_lower,
- &const_upper, hist_lower, nhist,
- length_hist_values, length_nhist);
+ &const_upper, hist_lower, nhist,
+ length_hist_values, length_nhist);
break;
case OID_RANGE_CONTAINED_OP:
@@ -517,20 +525,20 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata,
{
hist_selec =
1.0 - calc_hist_selectivity_scalar(typcache, &const_lower,
- hist_lower, nhist, false);
+ hist_lower, nhist, false);
}
else
{
hist_selec =
calc_hist_selectivity_contained(typcache, &const_lower,
- &const_upper, hist_lower, nhist,
- length_hist_values, length_nhist);
+ &const_upper, hist_lower, nhist,
+ length_hist_values, length_nhist);
}
break;
default:
elog(ERROR, "unknown range operator %u", operator);
- hist_selec = -1.0; /* keep compiler quiet */
+ hist_selec = -1.0; /* keep compiler quiet */
break;
}
@@ -546,7 +554,7 @@ static double
calc_hist_selectivity_scalar(TypeCacheEntry *typcache, RangeBound *constbound,
RangeBound *hist, int hist_nvalues, bool equal)
{
- Selectivity selec;
+ Selectivity selec;
int index;
/*
@@ -576,7 +584,7 @@ calc_hist_selectivity_scalar(TypeCacheEntry *typcache, RangeBound *constbound,
*/
static int
rbound_bsearch(TypeCacheEntry *typcache, RangeBound *value, RangeBound *hist,
- int hist_length, bool equal)
+ int hist_length, bool equal)
{
int lower = -1,
upper = hist_length - 1,
@@ -613,7 +621,7 @@ length_hist_bsearch(Datum *length_hist_values, int length_hist_nvalues,
while (lower < upper)
{
- double middleval;
+ double middleval;
middle = (lower + upper + 1) / 2;
@@ -659,7 +667,7 @@ get_position(TypeCacheEntry *typcache, RangeBound *value, RangeBound *hist1,
hist2->val,
hist1->val));
if (bin_width <= 0.0)
- return 0.5; /* zero width bin */
+ return 0.5; /* zero width bin */
position = DatumGetFloat8(FunctionCall2Coll(
&typcache->rng_subdiff_finfo,
@@ -724,9 +732,8 @@ get_len_position(double value, double hist1, double hist2)
else if (is_infinite(hist1) && !is_infinite(hist2))
{
/*
- * Lower bin boundary is -infinite, upper is finite.
- * Return 1.0 to indicate the value is infinitely far from the lower
- * bound.
+ * Lower bin boundary is -infinite, upper is finite. Return 1.0 to
+ * indicate the value is infinitely far from the lower bound.
*/
return 1.0;
}
@@ -740,8 +747,8 @@ get_len_position(double value, double hist1, double hist2)
/*
* If both bin boundaries are infinite, they should be equal to each
* other, and the value should also be infinite and equal to both
- * bounds. (But don't Assert that, to avoid crashing unnecessarily
- * if the caller messes up)
+ * bounds. (But don't Assert that, to avoid crashing unnecessarily if
+ * the caller messes up)
*
* Assume the value to lie in the middle of the infinite bounds.
*/
@@ -755,7 +762,7 @@ get_len_position(double value, double hist1, double hist2)
static float8
get_distance(TypeCacheEntry *typcache, RangeBound *bound1, RangeBound *bound2)
{
- bool has_subdiff = OidIsValid(typcache->rng_subdiff_finfo.fn_oid);
+ bool has_subdiff = OidIsValid(typcache->rng_subdiff_finfo.fn_oid);
if (!bound1->infinite && !bound2->infinite)
{
@@ -797,7 +804,10 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
double length1, double length2, bool equal)
{
double frac;
- double A, B, PA, PB;
+ double A,
+ B,
+ PA,
+ PB;
double pos;
int i;
double area;
@@ -805,7 +815,7 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
Assert(length2 >= length1);
if (length2 < 0.0)
- return 0.0; /* shouldn't happen, but doesn't hurt to check */
+ return 0.0; /* shouldn't happen, but doesn't hurt to check */
/* All lengths in the table are <= infinite. */
if (is_infinite(length2) && equal)
@@ -815,25 +825,25 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
* The average of a function between A and B can be calculated by the
* formula:
*
- * B
- * 1 /
- * ------- | P(x)dx
- * B - A /
- * A
+ * B
+ * 1 /
+ * ------- | P(x)dx
+ * B - A /
+ * A
*
* The geometrical interpretation of the integral is the area under the
* graph of P(x). P(x) is defined by the length histogram. We calculate
* the area in a piecewise fashion, iterating through the length histogram
* bins. Each bin is a trapezoid:
*
- * P(x2)
- * /|
- * / |
+ * P(x2)
+ * /|
+ * / |
* P(x1)/ |
- * | |
- * | |
- * ---+---+--
- * x1 x2
+ * | |
+ * | |
+ * ---+---+--
+ * x1 x2
*
* where x1 and x2 are the boundaries of the current histogram, and P(x1)
* and P(x1) are the cumulative fraction of tuples at the boundaries.
@@ -845,7 +855,7 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
* boundary to calculate P(x1). Likewise for the last bin: we use linear
* interpolation to calculate P(x2). For the bins in between, x1 and x2
* lie on histogram bin boundaries, so P(x1) and P(x2) are simply:
- * P(x1) = (bin index) / (number of bins)
+ * P(x1) = (bin index) / (number of bins)
* P(x2) = (bin index + 1 / (number of bins)
*/
@@ -870,9 +880,9 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
B = length1;
/*
- * In the degenerate case that length1 == length2, simply return P(length1).
- * This is not merely an optimization: if length1 == length2, we'd divide
- * by zero later on.
+ * In the degenerate case that length1 == length2, simply return
+ * P(length1). This is not merely an optimization: if length1 == length2,
+ * we'd divide by zero later on.
*/
if (length2 == length1)
return PB;
@@ -885,32 +895,34 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
area = 0.0;
for (; i < length_hist_nvalues - 1; i++)
{
- double bin_upper = DatumGetFloat8(length_hist_values[i + 1]);
+ double bin_upper = DatumGetFloat8(length_hist_values[i + 1]);
/* check if we've reached the last bin */
if (!(bin_upper < length2 || (equal && bin_upper <= length2)))
break;
/* the upper bound of previous bin is the lower bound of this bin */
- A = B; PA = PB;
+ A = B;
+ PA = PB;
B = bin_upper;
PB = (double) i / (double) (length_hist_nvalues - 1);
/*
* Add the area of this trapezoid to the total. The point of the
- * if-check is to avoid NaN, in the corner case that PA == PB == 0, and
- * B - A == Inf. The area of a zero-height trapezoid (PA == PB == 0) is
- * zero, regardless of the width (B - A).
+ * if-check is to avoid NaN, in the corner case that PA == PB == 0,
+ * and B - A == Inf. The area of a zero-height trapezoid (PA == PB ==
+ * 0) is zero, regardless of the width (B - A).
*/
if (PA > 0 || PB > 0)
area += 0.5 * (PB + PA) * (B - A);
}
/* Last bin */
- A = B; PA = PB;
+ A = B;
+ PA = PB;
- B = length2; /* last bin ends at the query upper bound */
+ B = length2; /* last bin ends at the query upper bound */
if (i >= length_hist_nvalues - 1)
pos = 0.0;
else
@@ -953,8 +965,8 @@ calc_length_hist_frac(Datum *length_hist_values, int length_hist_nvalues,
static double
calc_hist_selectivity_contained(TypeCacheEntry *typcache,
RangeBound *lower, RangeBound *upper,
- RangeBound *hist_lower, int hist_nvalues,
- Datum *length_hist_values, int length_hist_nvalues)
+ RangeBound *hist_lower, int hist_nvalues,
+ Datum *length_hist_values, int length_hist_nvalues)
{
int i,
upper_index;
@@ -1013,9 +1025,10 @@ calc_hist_selectivity_contained(TypeCacheEntry *typcache,
if (range_cmp_bounds(typcache, &hist_lower[i], lower) < 0)
{
dist = get_distance(typcache, lower, upper);
+
/*
- * Subtract from bin_width the portion of this bin that we want
- * to ignore.
+ * Subtract from bin_width the portion of this bin that we want to
+ * ignore.
*/
bin_width -= get_position(typcache, lower, &hist_lower[i],
&hist_lower[i + 1]);
@@ -1035,8 +1048,8 @@ calc_hist_selectivity_contained(TypeCacheEntry *typcache,
prev_dist, dist, true);
/*
- * Add the fraction of tuples in this bin, with a suitable length,
- * to the total.
+ * Add the fraction of tuples in this bin, with a suitable length, to
+ * the total.
*/
sum_frac += length_hist_frac * bin_width / (double) (hist_nvalues - 1);
@@ -1063,7 +1076,7 @@ static double
calc_hist_selectivity_contains(TypeCacheEntry *typcache,
RangeBound *lower, RangeBound *upper,
RangeBound *hist_lower, int hist_nvalues,
- Datum *length_hist_values, int length_hist_nvalues)
+ Datum *length_hist_values, int length_hist_nvalues)
{
int i,
lower_index;
@@ -1083,17 +1096,17 @@ calc_hist_selectivity_contains(TypeCacheEntry *typcache,
*/
if (lower_index >= 0 && lower_index < hist_nvalues - 1)
lower_bin_width = get_position(typcache, lower, &hist_lower[lower_index],
- &hist_lower[lower_index + 1]);
+ &hist_lower[lower_index + 1]);
else
lower_bin_width = 0.0;
/*
* Loop through all the lower bound bins, smaller than the query lower
- * bound. In the loop, dist and prev_dist are the distance of the "current"
- * bin's lower and upper bounds from the constant upper bound. We begin
- * from query lower bound, and walk backwards, so the first bin's upper
- * bound is the query lower bound, and its distance to the query upper
- * bound is the length of the query range.
+ * bound. In the loop, dist and prev_dist are the distance of the
+ * "current" bin's lower and upper bounds from the constant upper bound.
+ * We begin from query lower bound, and walk backwards, so the first bin's
+ * upper bound is the query lower bound, and its distance to the query
+ * upper bound is the length of the query range.
*
* bin_width represents the width of the current bin. Normally it is 1.0,
* meaning a full width bin, except for the first bin, which is only
@@ -1108,9 +1121,9 @@ calc_hist_selectivity_contains(TypeCacheEntry *typcache,
double length_hist_frac;
/*
- * dist -- distance from upper bound of query range to current
- * value of lower bound histogram or lower bound of query range (if
- * we've reach it).
+ * dist -- distance from upper bound of query range to current value
+ * of lower bound histogram or lower bound of query range (if we've
+ * reach it).
*/
dist = get_distance(typcache, &hist_lower[i], upper);
diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c
index 9a7f20d9f37..0d47854974e 100644
--- a/src/backend/utils/adt/rangetypes_spgist.c
+++ b/src/backend/utils/adt/rangetypes_spgist.c
@@ -151,8 +151,8 @@ spg_range_quad_choose(PG_FUNCTION_ARGS)
/*
* A node with no centroid divides ranges purely on whether they're empty
- * or not. All empty ranges go to child node 0, all non-empty ranges go
- * to node 1.
+ * or not. All empty ranges go to child node 0, all non-empty ranges go to
+ * node 1.
*/
if (!in->hasPrefix)
{
@@ -307,8 +307,8 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
/*
* For adjacent search we need also previous centroid (if any) to improve
- * the precision of the consistent check. In this case needPrevious flag is
- * set and centroid is passed into reconstructedValues. This is not the
+ * the precision of the consistent check. In this case needPrevious flag
+ * is set and centroid is passed into reconstructedValues. This is not the
* intended purpose of reconstructedValues (because we already have the
* full value available at the leaf), but it's a convenient place to store
* state while traversing the tree.
@@ -370,18 +370,20 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_CONTAINS:
+
/*
- * All ranges contain an empty range. Only non-empty ranges
- * can contain a non-empty range.
+ * All ranges contain an empty range. Only non-empty
+ * ranges can contain a non-empty range.
*/
if (!empty)
which &= (1 << 2);
break;
case RANGESTRAT_CONTAINED_BY:
+
/*
- * Only an empty range is contained by an empty range. Both
- * empty and non-empty ranges can be contained by a
+ * Only an empty range is contained by an empty range.
+ * Both empty and non-empty ranges can be contained by a
* non-empty range.
*/
if (empty)
@@ -438,11 +440,13 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
upper;
bool empty;
RangeType *range = NULL;
+
/* Restrictions on range bounds according to scan strategy */
RangeBound *minLower = NULL,
*maxLower = NULL,
*minUpper = NULL,
*maxUpper = NULL;
+
/* Are the restrictions on range bounds inclusive? */
bool inclusive = true;
bool strictEmpty = true;
@@ -482,9 +486,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
/*
* Most strategies are handled by forming a bounding box from the
- * search key, defined by a minLower, maxLower, minUpper, maxUpper.
- * Some modify 'which' directly, to specify exactly which quadrants
- * need to be visited.
+ * search key, defined by a minLower, maxLower, minUpper,
+ * maxUpper. Some modify 'which' directly, to specify exactly
+ * which quadrants need to be visited.
*
* For most strategies, nothing matches an empty search key, and
* an empty range never matches a non-empty key. If a strategy
@@ -494,6 +498,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
switch (strategy)
{
case RANGESTRAT_BEFORE:
+
/*
* Range A is before range B if upper bound of A is lower
* than lower bound of B.
@@ -503,6 +508,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_OVERLEFT:
+
/*
* Range A is overleft to range B if upper bound of A is
* less or equal to upper bound of B.
@@ -511,6 +517,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_OVERLAPS:
+
/*
* Non-empty ranges overlap, if lower bound of each range
* is lower or equal to upper bound of the other range.
@@ -520,6 +527,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_OVERRIGHT:
+
/*
* Range A is overright to range B if lower bound of A is
* greater or equal to lower bound of B.
@@ -528,6 +536,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_AFTER:
+
/*
* Range A is after range B if lower bound of A is greater
* than upper bound of B.
@@ -538,12 +547,13 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
case RANGESTRAT_ADJACENT:
if (empty)
- break; /* Skip to strictEmpty check. */
+ break; /* Skip to strictEmpty check. */
/*
* which1 is bitmask for possibility to be adjacent with
* lower bound of argument. which2 is bitmask for
- * possibility to be adjacent with upper bound of argument.
+ * possibility to be adjacent with upper bound of
+ * argument.
*/
which1 = which2 = (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
@@ -622,9 +632,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
/*
* For a range's lower bound to be adjacent to the
* argument's upper bound, it will be found along the
- * line adjacent to (and just right of)
- * X=upper. Therefore, if the argument's upper bound is
- * less than (and not adjacent to) the centroid's upper
+ * line adjacent to (and just right of) X=upper.
+ * Therefore, if the argument's upper bound is less
+ * than (and not adjacent to) the centroid's upper
* bound, the line falls in quadrants 3 and 4; if
* greater or equal to, the line falls in quadrants 1
* and 2.
@@ -649,6 +659,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_CONTAINS:
+
/*
* Non-empty range A contains non-empty range B if lower
* bound of A is lower or equal to lower bound of range B
@@ -682,6 +693,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_EQ:
+
/*
* Equal range can be only in the same quadrant where
* argument would be placed to.
@@ -717,10 +729,10 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
if (minLower)
{
/*
- * If the centroid's lower bound is less than or equal to
- * the minimum lower bound, anything in the 3rd and 4th
- * quadrants will have an even smaller lower bound, and thus
- * can't match.
+ * If the centroid's lower bound is less than or equal to the
+ * minimum lower bound, anything in the 3rd and 4th quadrants
+ * will have an even smaller lower bound, and thus can't
+ * match.
*/
if (range_cmp_bounds(typcache, &centroidLower, minLower) <= 0)
which &= (1 << 1) | (1 << 2) | (1 << 5);
@@ -731,9 +743,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
* If the centroid's lower bound is greater than the maximum
* lower bound, anything in the 1st and 2nd quadrants will
* also have a greater than or equal lower bound, and thus
- * can't match. If the centroid's lower bound is equal to
- * the maximum lower bound, we can still exclude the 1st and
- * 2nd quadrants if we're looking for a value strictly greater
+ * can't match. If the centroid's lower bound is equal to the
+ * maximum lower bound, we can still exclude the 1st and 2nd
+ * quadrants if we're looking for a value strictly greater
* than the maximum.
*/
int cmp;
@@ -745,10 +757,10 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
if (minUpper)
{
/*
- * If the centroid's upper bound is less than or equal to
- * the minimum upper bound, anything in the 2nd and 3rd
- * quadrants will have an even smaller upper bound, and thus
- * can't match.
+ * If the centroid's upper bound is less than or equal to the
+ * minimum upper bound, anything in the 2nd and 3rd quadrants
+ * will have an even smaller upper bound, and thus can't
+ * match.
*/
if (range_cmp_bounds(typcache, &centroidUpper, minUpper) <= 0)
which &= (1 << 1) | (1 << 4) | (1 << 5);
@@ -759,9 +771,9 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
* If the centroid's upper bound is greater than the maximum
* upper bound, anything in the 1st and 4th quadrants will
* also have a greater than or equal upper bound, and thus
- * can't match. If the centroid's upper bound is equal to
- * the maximum upper bound, we can still exclude the 1st and
- * 4th quadrants if we're looking for a value strictly greater
+ * can't match. If the centroid's upper bound is equal to the
+ * maximum upper bound, we can still exclude the 1st and 4th
+ * quadrants if we're looking for a value strictly greater
* than the maximum.
*/
int cmp;
@@ -848,7 +860,7 @@ spg_range_quad_leaf_consistent(PG_FUNCTION_ARGS)
break;
case RANGESTRAT_ADJACENT:
res = range_adjacent_internal(typcache, leafRange,
- DatumGetRangeType(keyDatum));
+ DatumGetRangeType(keyDatum));
break;
case RANGESTRAT_CONTAINS:
res = range_contains_internal(typcache, leafRange,
diff --git a/src/backend/utils/adt/rangetypes_typanalyze.c b/src/backend/utils/adt/rangetypes_typanalyze.c
index e111f8ff979..114bce015c6 100644
--- a/src/backend/utils/adt/rangetypes_typanalyze.c
+++ b/src/backend/utils/adt/rangetypes_typanalyze.c
@@ -29,8 +29,8 @@
#include "utils/builtins.h"
#include "utils/rangetypes.h"
-static int float8_qsort_cmp(const void *a1, const void *a2);
-static int range_bound_qsort_cmp(const void *a1, const void *a2, void *arg);
+static int float8_qsort_cmp(const void *a1, const void *a2);
+static int range_bound_qsort_cmp(const void *a1, const void *a2, void *arg);
static void compute_range_stats(VacAttrStats *stats,
AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows);
@@ -48,7 +48,7 @@ range_typanalyze(PG_FUNCTION_ARGS)
typcache = range_get_typcache(fcinfo, stats->attrtypid);
if (attr->attstattarget < 0)
- attr->attstattarget = default_statistics_target;
+ attr->attstattarget = default_statistics_target;
stats->compute_stats = compute_range_stats;
stats->extra_data = typcache;
@@ -81,9 +81,9 @@ float8_qsort_cmp(const void *a1, const void *a2)
static int
range_bound_qsort_cmp(const void *a1, const void *a2, void *arg)
{
- RangeBound *b1 = (RangeBound *)a1;
- RangeBound *b2 = (RangeBound *)a2;
- TypeCacheEntry *typcache = (TypeCacheEntry *)arg;
+ RangeBound *b1 = (RangeBound *) a1;
+ RangeBound *b2 = (RangeBound *) a2;
+ TypeCacheEntry *typcache = (TypeCacheEntry *) arg;
return range_cmp_bounds(typcache, b1, b2);
}
@@ -106,7 +106,8 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
int num_bins = stats->attr->attstattarget;
int num_hist;
float8 *lengths;
- RangeBound *lowers, *uppers;
+ RangeBound *lowers,
+ *uppers;
double total_width = 0;
/* Allocate memory to hold range bounds and lengths of the sample ranges. */
@@ -163,9 +164,9 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* and lower bound values.
*/
length = DatumGetFloat8(FunctionCall2Coll(
- &typcache->rng_subdiff_finfo,
- typcache->rng_collation,
- upper.val, lower.val));
+ &typcache->rng_subdiff_finfo,
+ typcache->rng_collation,
+ upper.val, lower.val));
}
else
{
@@ -227,13 +228,13 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* The object of this loop is to construct ranges from first and
* last entries in lowers[] and uppers[] along with evenly-spaced
- * values in between. So the i'th value is a range of
- * lowers[(i * (nvals - 1)) / (num_hist - 1)] and
- * uppers[(i * (nvals - 1)) / (num_hist - 1)]. But computing that
- * subscript directly risks integer overflow when the stats target
- * is more than a couple thousand. Instead we add
- * (nvals - 1) / (num_hist - 1) to pos at each step, tracking the
- * integral and fractional parts of the sum separately.
+ * values in between. So the i'th value is a range of lowers[(i *
+ * (nvals - 1)) / (num_hist - 1)] and uppers[(i * (nvals - 1)) /
+ * (num_hist - 1)]. But computing that subscript directly risks
+ * integer overflow when the stats target is more than a couple
+ * thousand. Instead we add (nvals - 1) / (num_hist - 1) to pos
+ * at each step, tracking the integral and fractional parts of the
+ * sum separately.
*/
delta = (non_empty_cnt - 1) / (num_hist - 1);
deltafrac = (non_empty_cnt - 1) % (num_hist - 1);
@@ -242,7 +243,7 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
for (i = 0; i < num_hist; i++)
{
bound_hist_values[i] = PointerGetDatum(range_serialize(
- typcache, &lowers[pos], &uppers[pos], false));
+ typcache, &lowers[pos], &uppers[pos], false));
pos += delta;
posfrac += deltafrac;
if (posfrac >= (num_hist - 1))
@@ -281,10 +282,10 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* The object of this loop is to copy the first and last lengths[]
* entries along with evenly-spaced values in between. So the i'th
* value is lengths[(i * (nvals - 1)) / (num_hist - 1)]. But
- * computing that subscript directly risks integer overflow when the
- * stats target is more than a couple thousand. Instead we add
- * (nvals - 1) / (num_hist - 1) to pos at each step, tracking the
- * integral and fractional parts of the sum separately.
+ * computing that subscript directly risks integer overflow when
+ * the stats target is more than a couple thousand. Instead we
+ * add (nvals - 1) / (num_hist - 1) to pos at each step, tracking
+ * the integral and fractional parts of the sum separately.
*/
delta = (non_empty_cnt - 1) / (num_hist - 1);
deltafrac = (non_empty_cnt - 1) % (num_hist - 1);
@@ -342,9 +343,10 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/* We found only nulls; assume the column is entirely null */
stats->stats_valid = true;
stats->stanullfrac = 1.0;
- stats->stawidth = 0; /* "unknown" */
- stats->stadistinct = 0.0; /* "unknown" */
+ stats->stawidth = 0; /* "unknown" */
+ stats->stadistinct = 0.0; /* "unknown" */
}
+
/*
* We don't need to bother cleaning up any of our temporary palloc's. The
* hashtable should also go away, as it used a child memory context.
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 700247e4741..0d1ff61bf9f 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -319,7 +319,7 @@ format_procedure_qualified(Oid procedure_oid)
* Routine to produce regprocedure names; see format_procedure above.
*
* force_qualify says whether to schema-qualify; if true, the name is always
- * qualified regardless of search_path visibility. Otherwise the name is only
+ * qualified regardless of search_path visibility. Otherwise the name is only
* qualified if the function is not in path.
*/
static char *
@@ -698,7 +698,8 @@ format_operator_internal(Oid operator_oid, bool force_qualify)
/*
* Would this oper be found (given the right args) by regoperatorin?
- * If not, or if caller explicitely requests it, we need to qualify it.
+ * If not, or if caller explicitely requests it, we need to qualify
+ * it.
*/
if (force_qualify || !OperatorIsVisible(operator_oid))
{
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 43228447ea4..65edc1fb04e 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -81,8 +81,8 @@
#define RI_PLAN_RESTRICT_UPD_CHECKREF 6
#define RI_PLAN_SETNULL_DEL_DOUPDATE 7
#define RI_PLAN_SETNULL_UPD_DOUPDATE 8
-#define RI_PLAN_SETDEFAULT_DEL_DOUPDATE 9
-#define RI_PLAN_SETDEFAULT_UPD_DOUPDATE 10
+#define RI_PLAN_SETDEFAULT_DEL_DOUPDATE 9
+#define RI_PLAN_SETDEFAULT_UPD_DOUPDATE 10
#define MAX_QUOTED_NAME_LEN (NAMEDATALEN*2+3)
#define MAX_QUOTED_REL_NAME_LEN (MAX_QUOTED_NAME_LEN*2)
@@ -135,7 +135,7 @@ typedef struct RI_ConstraintInfo
typedef struct RI_QueryKey
{
Oid constr_id; /* OID of pg_constraint entry */
- int32 constr_queryno; /* query type ID, see RI_PLAN_XXX above */
+ int32 constr_queryno; /* query type ID, see RI_PLAN_XXX above */
} RI_QueryKey;
@@ -403,7 +403,7 @@ RI_FKey_check(TriggerData *trigdata)
/* ----------
* The query string built is
* SELECT 1 FROM ONLY <pktable> x WHERE pkatt1 = $1 [AND ...]
- * FOR KEY SHARE OF x
+ * FOR KEY SHARE OF x
* The type id's for the $ parameters are those of the
* corresponding FK attributes.
* ----------
@@ -539,7 +539,7 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
/* ----------
* The query string built is
* SELECT 1 FROM ONLY <pktable> x WHERE pkatt1 = $1 [AND ...]
- * FOR KEY SHARE OF x
+ * FOR KEY SHARE OF x
* The type id's for the $ parameters are those of the
* PK attributes themselves.
* ----------
@@ -697,8 +697,8 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
}
/*
- * If another PK row now exists providing the old key values,
- * we should not do anything. However, this check should only be
+ * If another PK row now exists providing the old key values, we
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -729,7 +729,7 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
/* ----------
* The query string built is
* SELECT 1 FROM ONLY <fktable> x WHERE $1 = fkatt1 [AND ...]
- * FOR KEY SHARE OF x
+ * FOR KEY SHARE OF x
* The type id's for the $ parameters are those of the
* corresponding PK attributes.
* ----------
@@ -921,8 +921,8 @@ ri_restrict_upd(TriggerData *trigdata, bool is_no_action)
}
/*
- * If another PK row now exists providing the old key values,
- * we should not do anything. However, this check should only be
+ * If another PK row now exists providing the old key values, we
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -1850,7 +1850,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE would remove such rows, while SET NULL is certain
* to result in rows that satisfy the FK constraint.)
*/
@@ -2041,7 +2041,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE must change the FK key values, while SET NULL is
* certain to result in rows that satisfy the FK constraint.)
*/
@@ -2150,6 +2150,7 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
switch (riinfo->confmatchtype)
{
case FKCONSTR_MATCH_SIMPLE:
+
/*
* If any new key value is NULL, the row must satisfy the
* constraint, so no check is needed.
@@ -2176,6 +2177,7 @@ RI_FKey_fk_upd_check_required(Trigger *trigger, Relation fk_rel,
return true;
case FKCONSTR_MATCH_FULL:
+
/*
* If all new key values are NULL, the row must satisfy the
* constraint, so no check is needed. On the other hand, if only
@@ -2449,7 +2451,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
/*
* The columns to look at in the result tuple are 1..N, not whatever
- * they are in the fk_rel. Hack up riinfo so that the subroutines
+ * they are in the fk_rel. Hack up riinfo so that the subroutines
* called here will behave properly.
*
* In addition to this, we have to pass the correct tupdesc to
@@ -2676,8 +2678,8 @@ ri_BuildQueryKey(RI_QueryKey *key, const RI_ConstraintInfo *riinfo,
int32 constr_queryno)
{
/*
- * We assume struct RI_QueryKey contains no padding bytes, else we'd
- * need to use memset to clear them.
+ * We assume struct RI_QueryKey contains no padding bytes, else we'd need
+ * to use memset to clear them.
*/
key->constr_id = riinfo->constraint_id;
key->constr_queryno = constr_queryno;
@@ -2812,14 +2814,14 @@ ri_LoadConstraintInfo(Oid constraintOid)
elog(ERROR, "cache lookup failed for constraint %u", constraintOid);
conForm = (Form_pg_constraint) GETSTRUCT(tup);
- if (conForm->contype != CONSTRAINT_FOREIGN) /* should not happen */
+ if (conForm->contype != CONSTRAINT_FOREIGN) /* should not happen */
elog(ERROR, "constraint %u is not a foreign key constraint",
constraintOid);
/* And extract data */
Assert(riinfo->constraint_id == constraintOid);
riinfo->oidHashValue = GetSysCacheHashValue1(CONSTROID,
- ObjectIdGetDatum(constraintOid));
+ ObjectIdGetDatum(constraintOid));
memcpy(&riinfo->conname, &conForm->conname, sizeof(NameData));
riinfo->pk_relid = conForm->confrelid;
riinfo->fk_relid = conForm->conrelid;
@@ -3020,10 +3022,10 @@ ri_PerformCheck(const RI_ConstraintInfo *riinfo,
/*
* The values for the query are taken from the table on which the trigger
- * is called - it is normally the other one with respect to query_rel.
- * An exception is ri_Check_Pk_Match(), which uses the PK table for both
- * (and sets queryno to RI_PLAN_CHECK_LOOKUPPK_FROM_PK). We might
- * eventually need some less klugy way to determine this.
+ * is called - it is normally the other one with respect to query_rel. An
+ * exception is ri_Check_Pk_Match(), which uses the PK table for both (and
+ * sets queryno to RI_PLAN_CHECK_LOOKUPPK_FROM_PK). We might eventually
+ * need some less klugy way to determine this.
*/
if (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK)
{
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 043baf3c790..a1ed7813f24 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -1258,7 +1258,7 @@ pg_get_constraintdef(PG_FUNCTION_ARGS)
prettyFlags = PRETTYFLAG_INDENT;
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
false,
- prettyFlags)));
+ prettyFlags)));
}
Datum
@@ -1271,7 +1271,7 @@ pg_get_constraintdef_ext(PG_FUNCTION_ARGS)
prettyFlags = pretty ? PRETTYFLAG_PAREN | PRETTYFLAG_INDENT : PRETTYFLAG_INDENT;
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
false,
- prettyFlags)));
+ prettyFlags)));
}
/* Internal version that returns a palloc'd C string; no pretty-printing */
@@ -4229,19 +4229,19 @@ get_select_query_def(Query *query, deparse_context *context,
{
case LCS_FORKEYSHARE:
appendContextKeyword(context, " FOR KEY SHARE",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
case LCS_FORSHARE:
appendContextKeyword(context, " FOR SHARE",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
case LCS_FORNOKEYUPDATE:
appendContextKeyword(context, " FOR NO KEY UPDATE",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
case LCS_FORUPDATE:
appendContextKeyword(context, " FOR UPDATE",
- -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ -PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
break;
}
@@ -5340,8 +5340,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* If it's an unnamed join, look at the expansion of the alias variable.
* If it's a simple reference to one of the input vars, then recursively
- * print the name of that var instead. When it's not a simple reference,
- * we have to just print the unqualified join column name. (This can only
+ * print the name of that var instead. When it's not a simple reference,
+ * we have to just print the unqualified join column name. (This can only
* happen with columns that were merged by USING or NATURAL clauses in a
* FULL JOIN; we took pains previously to make the unqualified column name
* unique in such cases.)
@@ -8550,7 +8550,7 @@ generate_relation_name(Oid relid, List *namespaces)
* means a FuncExpr and not some other way of calling the function), then
* was_variadic must specify whether VARIADIC appeared in the original call,
* and *use_variadic_p will be set to indicate whether to print VARIADIC in
- * the output. For non-FuncExpr cases, was_variadic should be FALSE and
+ * the output. For non-FuncExpr cases, was_variadic should be FALSE and
* use_variadic_p can be NULL.
*
* The result includes all necessary quoting and schema-prefixing.
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 0d5cafba962..da66f347078 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -194,10 +194,10 @@ static Selectivity prefix_selectivity(PlannerInfo *root,
VariableStatData *vardata,
Oid vartype, Oid opfamily, Const *prefixcon);
static Selectivity like_selectivity(const char *patt, int pattlen,
- bool case_insensitive);
+ bool case_insensitive);
static Selectivity regex_selectivity(const char *patt, int pattlen,
- bool case_insensitive,
- int fixed_prefix_len);
+ bool case_insensitive,
+ int fixed_prefix_len);
static Datum string_to_datum(const char *str, Oid datatype);
static Const *string_to_const(const char *str, Oid datatype);
static Const *string_to_bytea_const(const char *str, size_t str_len);
@@ -1123,7 +1123,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
Pattern_Prefix_Status pstatus;
Const *patt;
Const *prefix = NULL;
- Selectivity rest_selec = 0;
+ Selectivity rest_selec = 0;
double result;
/*
@@ -1214,7 +1214,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* Pull out any fixed prefix implied by the pattern, and estimate the
- * fractional selectivity of the remainder of the pattern. Unlike many of
+ * fractional selectivity of the remainder of the pattern. Unlike many of
* the other functions in this file, we use the pattern operator's actual
* collation for this step. This is not because we expect the collation
* to make a big difference in the selectivity estimate (it seldom would),
@@ -1867,17 +1867,17 @@ scalararraysel(PlannerInfo *root,
s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
clause->inputcollid,
PointerGetDatum(root),
- ObjectIdGetDatum(operator),
+ ObjectIdGetDatum(operator),
PointerGetDatum(args),
Int16GetDatum(jointype),
- PointerGetDatum(sjinfo)));
+ PointerGetDatum(sjinfo)));
else
s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
clause->inputcollid,
PointerGetDatum(root),
- ObjectIdGetDatum(operator),
+ ObjectIdGetDatum(operator),
PointerGetDatum(args),
- Int32GetDatum(varRelid)));
+ Int32GetDatum(varRelid)));
if (useOr)
{
@@ -1934,17 +1934,17 @@ scalararraysel(PlannerInfo *root,
s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc,
clause->inputcollid,
PointerGetDatum(root),
- ObjectIdGetDatum(operator),
+ ObjectIdGetDatum(operator),
PointerGetDatum(args),
Int16GetDatum(jointype),
- PointerGetDatum(sjinfo)));
+ PointerGetDatum(sjinfo)));
else
s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc,
clause->inputcollid,
PointerGetDatum(root),
- ObjectIdGetDatum(operator),
+ ObjectIdGetDatum(operator),
PointerGetDatum(args),
- Int32GetDatum(varRelid)));
+ Int32GetDatum(varRelid)));
if (useOr)
{
@@ -5293,7 +5293,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
if (rest_selec != NULL)
{
- char *patt = TextDatumGetCString(patt_const->constvalue);
+ char *patt = TextDatumGetCString(patt_const->constvalue);
*rest_selec = regex_selectivity(patt, strlen(patt),
case_insensitive,
@@ -5315,7 +5315,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation,
}
else
{
- char *patt = TextDatumGetCString(patt_const->constvalue);
+ char *patt = TextDatumGetCString(patt_const->constvalue);
*rest_selec = regex_selectivity(patt, strlen(patt),
case_insensitive,
@@ -5928,7 +5928,7 @@ string_to_bytea_const(const char *str, size_t str_len)
* genericcostestimate is a general-purpose estimator that can be used for
* most index types. In some cases we use genericcostestimate as the base
* code and then incorporate additional index-type-specific knowledge in
- * the type-specific calling function. To avoid code duplication, we make
+ * the type-specific calling function. To avoid code duplication, we make
* genericcostestimate return a number of intermediate values as well as
* its preliminary estimates of the output cost values. The GenericCosts
* struct includes all these values.
@@ -5941,15 +5941,15 @@ typedef struct
{
/* These are the values the cost estimator must return to the planner */
Cost indexStartupCost; /* index-related startup cost */
- Cost indexTotalCost; /* total index-related scan cost */
- Selectivity indexSelectivity; /* selectivity of index */
+ Cost indexTotalCost; /* total index-related scan cost */
+ Selectivity indexSelectivity; /* selectivity of index */
double indexCorrelation; /* order correlation of index */
/* Intermediate values we obtain along the way */
- double numIndexPages; /* number of leaf pages visited */
- double numIndexTuples; /* number of leaf tuples visited */
+ double numIndexPages; /* number of leaf pages visited */
+ double numIndexTuples; /* number of leaf tuples visited */
double spc_random_page_cost; /* relevant random_page_cost value */
- double num_sa_scans; /* # indexscans from ScalarArrayOps */
+ double num_sa_scans; /* # indexscans from ScalarArrayOps */
} GenericCosts;
static void
@@ -5963,7 +5963,7 @@ genericcostestimate(PlannerInfo *root,
List *indexOrderBys = path->indexorderbys;
Cost indexStartupCost;
Cost indexTotalCost;
- Selectivity indexSelectivity;
+ Selectivity indexSelectivity;
double indexCorrelation;
double numIndexPages;
double numIndexTuples;
@@ -6048,7 +6048,7 @@ genericcostestimate(PlannerInfo *root,
*
* In practice access to upper index levels is often nearly free because
* those tend to stay in cache under load; moreover, the cost involved is
- * highly dependent on index type. We therefore ignore such costs here
+ * highly dependent on index type. We therefore ignore such costs here
* and leave it to the caller to add a suitable charge if needed.
*/
if (index->pages > 1 && index->tuples > 1)
@@ -6570,7 +6570,7 @@ hashcostestimate(PG_FUNCTION_ARGS)
* because the hash AM makes sure that's always one page.
*
* Likewise, we could consider charging some CPU for each index tuple in
- * the bucket, if we knew how many there were. But the per-tuple cost is
+ * the bucket, if we knew how many there were. But the per-tuple cost is
* just a hash value comparison, not a general datatype-dependent
* comparison, so any such charge ought to be quite a bit less than
* cpu_operator_cost; which makes it probably not worth worrying about.
@@ -6617,7 +6617,7 @@ gistcostestimate(PG_FUNCTION_ARGS)
* Although this computation isn't really expensive enough to require
* caching, we might as well use index->tree_height to cache it.
*/
- if (index->tree_height < 0) /* unknown? */
+ if (index->tree_height < 0) /* unknown? */
{
if (index->pages > 1) /* avoid computing log(0) */
index->tree_height = (int) (log(index->pages) / log(100.0));
@@ -6626,9 +6626,9 @@ gistcostestimate(PG_FUNCTION_ARGS)
}
/*
- * Add a CPU-cost component to represent the costs of initial descent.
- * We just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * Add a CPU-cost component to represent the costs of initial descent. We
+ * just use log(N) here not log2(N) since the branching factor isn't
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6679,7 +6679,7 @@ spgcostestimate(PG_FUNCTION_ARGS)
* Although this computation isn't really expensive enough to require
* caching, we might as well use index->tree_height to cache it.
*/
- if (index->tree_height < 0) /* unknown? */
+ if (index->tree_height < 0) /* unknown? */
{
if (index->pages > 1) /* avoid computing log(0) */
index->tree_height = (int) (log(index->pages) / log(100.0));
@@ -6688,9 +6688,9 @@ spgcostestimate(PG_FUNCTION_ARGS)
}
/*
- * Add a CPU-cost component to represent the costs of initial descent.
- * We just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * Add a CPU-cost component to represent the costs of initial descent. We
+ * just use log(N) here not log2(N) since the branching factor isn't
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6801,14 +6801,14 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
collation = DEFAULT_COLLATION_OID;
OidFunctionCall7Coll(extractProcOid,
- collation,
- query,
- PointerGetDatum(&nentries),
- UInt16GetDatum(strategy_op),
- PointerGetDatum(&partial_matches),
- PointerGetDatum(&extra_data),
- PointerGetDatum(&nullFlags),
- PointerGetDatum(&searchMode));
+ collation,
+ query,
+ PointerGetDatum(&nentries),
+ UInt16GetDatum(strategy_op),
+ PointerGetDatum(&partial_matches),
+ PointerGetDatum(&extra_data),
+ PointerGetDatum(&nullFlags),
+ PointerGetDatum(&searchMode));
if (nentries <= 0 && searchMode == GIN_SEARCH_MODE_DEFAULT)
{
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 60f29533b71..94b2a3608a6 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -1296,7 +1296,7 @@ GetCurrentTimestamp(void)
int64
GetCurrentIntegerTimestamp(void)
{
- int64 result;
+ int64 result;
struct timeval tp;
gettimeofday(&tp, NULL);
@@ -3759,7 +3759,7 @@ interval_trunc(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("interval units \"%s\" not supported "
- "because months usually have fractional weeks",
+ "because months usually have fractional weeks",
lowunits)));
else
ereport(ERROR,
@@ -4608,8 +4608,8 @@ timestamp_izone(PG_FUNCTION_ARGS)
if (zone->month != 0 || zone->day != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not include months or days",
- DatumGetCString(DirectFunctionCall1(interval_out,
+ errmsg("interval time zone \"%s\" must not include months or days",
+ DatumGetCString(DirectFunctionCall1(interval_out,
PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
@@ -4781,8 +4781,8 @@ timestamptz_izone(PG_FUNCTION_ARGS)
if (zone->month != 0 || zone->day != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not include months or days",
- DatumGetCString(DirectFunctionCall1(interval_out,
+ errmsg("interval time zone \"%s\" must not include months or days",
+ DatumGetCString(DirectFunctionCall1(interval_out,
PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
diff --git a/src/backend/utils/adt/tsquery_rewrite.c b/src/backend/utils/adt/tsquery_rewrite.c
index 6d3f618e8fd..a301f8fc180 100644
--- a/src/backend/utils/adt/tsquery_rewrite.c
+++ b/src/backend/utils/adt/tsquery_rewrite.c
@@ -46,7 +46,6 @@ addone(int *counters, int last, int total)
static QTNode *
findeq(QTNode *node, QTNode *ex, QTNode *subs, bool *isfind)
{
-
if ((node->sign & ex->sign) != ex->sign ||
node->valnode->type != ex->valnode->type)
return node;
@@ -196,7 +195,6 @@ dofindsubquery(QTNode *root, QTNode *ex, QTNode *subs, bool *isfind)
static QTNode *
dropvoidsubtree(QTNode *root)
{
-
if (!root)
return NULL;
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index bb85faf1a7b..56349e7e2aa 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -4245,7 +4245,7 @@ text_format(PG_FUNCTION_ARGS)
/*
* Get the appropriate typOutput function, reusing previous one if
- * same type as previous argument. That's particularly useful in the
+ * same type as previous argument. That's particularly useful in the
* variadic-array case, but often saves work even for ordinary calls.
*/
if (typid != prev_type)
@@ -4274,8 +4274,8 @@ text_format(PG_FUNCTION_ARGS)
/* should not get here, because of previous check */
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized conversion type specifier \"%c\"",
- *cp)));
+ errmsg("unrecognized conversion type specifier \"%c\"",
+ *cp)));
break;
}
}
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 9c5daec31e9..25ab79b1979 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -1499,7 +1499,7 @@ xml_pstrdup(const char *string)
/*
* xmlPgEntityLoader --- entity loader callback function
*
- * Silently prevent any external entity URL from being loaded. We don't want
+ * Silently prevent any external entity URL from being loaded. We don't want
* to throw an error, so instead make the entity appear to expand to an empty
* string.
*
@@ -1609,6 +1609,7 @@ xml_errorHandler(void *data, xmlErrorPtr error)
case XML_FROM_NONE:
case XML_FROM_MEMORY:
case XML_FROM_IO:
+
/*
* Suppress warnings about undeclared entities. We need to do
* this to avoid problems due to not loading DTD definitions.
@@ -2002,8 +2003,8 @@ map_sql_value_to_xml_value(Datum value, Oid type, bool xml_escape_strings)
char *str;
/*
- * Flatten domains; the special-case treatments below should apply
- * to, eg, domains over boolean not just boolean.
+ * Flatten domains; the special-case treatments below should apply to,
+ * eg, domains over boolean not just boolean.
*/
type = getBaseType(type);
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 25f50e56670..cc91406582b 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -291,7 +291,7 @@ CatalogCacheComputeTupleHashValue(CatCache *cache, HeapTuple tuple)
static void
CatCachePrintStats(int code, Datum arg)
{
- slist_iter iter;
+ slist_iter iter;
long cc_searches = 0;
long cc_hits = 0;
long cc_neg_hits = 0;
@@ -444,7 +444,7 @@ CatCacheRemoveCList(CatCache *cache, CatCList *cl)
void
CatalogCacheIdInvalidate(int cacheId, uint32 hashValue)
{
- slist_iter cache_iter;
+ slist_iter cache_iter;
CACHE1_elog(DEBUG2, "CatalogCacheIdInvalidate: called");
@@ -554,12 +554,12 @@ AtEOXact_CatCache(bool isCommit)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
- slist_iter cache_iter;
+ slist_iter cache_iter;
slist_foreach(cache_iter, &CacheHdr->ch_caches)
{
CatCache *ccp = slist_container(CatCache, cc_next, cache_iter.cur);
- dlist_iter iter;
+ dlist_iter iter;
int i;
/* Check CatCLists */
@@ -649,7 +649,7 @@ ResetCatalogCache(CatCache *cache)
void
ResetCatalogCaches(void)
{
- slist_iter iter;
+ slist_iter iter;
CACHE1_elog(DEBUG2, "ResetCatalogCaches called");
@@ -679,7 +679,7 @@ ResetCatalogCaches(void)
void
CatalogCacheFlushCatalog(Oid catId)
{
- slist_iter iter;
+ slist_iter iter;
CACHE2_elog(DEBUG2, "CatalogCacheFlushCatalog called for %u", catId);
@@ -1343,7 +1343,7 @@ SearchCatCacheList(CatCache *cache,
{
ScanKeyData cur_skey[CATCACHE_MAXKEYS];
uint32 lHashValue;
- dlist_iter iter;
+ dlist_iter iter;
CatCList *cl;
CatCTup *ct;
List *volatile ctlist;
@@ -1789,7 +1789,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple newtuple,
void (*function) (int, uint32, Oid))
{
- slist_iter iter;
+ slist_iter iter;
Oid reloid;
CACHE1_elog(DEBUG2, "PrepareToInvalidateCacheTuple: called");
diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c
index bbd3ae369d3..2180f2abcc1 100644
--- a/src/backend/utils/cache/evtcache.c
+++ b/src/backend/utils/cache/evtcache.c
@@ -40,7 +40,7 @@ typedef enum
typedef struct
{
- EventTriggerEvent event;
+ EventTriggerEvent event;
List *triggerlist;
} EventTriggerCacheEntry;
@@ -51,7 +51,7 @@ static EventTriggerCacheStateType EventTriggerCacheState = ETCS_NEEDS_REBUILD;
static void BuildEventTriggerCache(void);
static void InvalidateEventCacheCallback(Datum arg,
int cacheid, uint32 hashvalue);
-static int DecodeTextArrayToCString(Datum array, char ***cstringp);
+static int DecodeTextArrayToCString(Datum array, char ***cstringp);
/*
* Search the event cache by trigger event.
@@ -77,12 +77,12 @@ EventCacheLookup(EventTriggerEvent event)
static void
BuildEventTriggerCache(void)
{
- HASHCTL ctl;
- HTAB *cache;
- MemoryContext oldcontext;
- Relation rel;
- Relation irel;
- SysScanDesc scan;
+ HASHCTL ctl;
+ HTAB *cache;
+ MemoryContext oldcontext;
+ Relation rel;
+ Relation irel;
+ SysScanDesc scan;
if (EventTriggerCacheContext != NULL)
{
@@ -96,8 +96,8 @@ BuildEventTriggerCache(void)
else
{
/*
- * This is our first time attempting to build the cache, so we need
- * to set up the memory context and register a syscache callback to
+ * This is our first time attempting to build the cache, so we need to
+ * set up the memory context and register a syscache callback to
* capture future invalidation events.
*/
if (CacheMemoryContext == NULL)
@@ -129,24 +129,24 @@ BuildEventTriggerCache(void)
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
/*
- * Prepare to scan pg_event_trigger in name order. We use an MVCC
- * snapshot to avoid getting inconsistent results if the table is
- * being concurrently updated.
+ * Prepare to scan pg_event_trigger in name order. We use an MVCC
+ * snapshot to avoid getting inconsistent results if the table is being
+ * concurrently updated.
*/
rel = relation_open(EventTriggerRelationId, AccessShareLock);
irel = index_open(EventTriggerNameIndexId, AccessShareLock);
scan = systable_beginscan_ordered(rel, irel, GetLatestSnapshot(), 0, NULL);
/*
- * Build a cache item for each pg_event_trigger tuple, and append each
- * one to the appropriate cache entry.
+ * Build a cache item for each pg_event_trigger tuple, and append each one
+ * to the appropriate cache entry.
*/
for (;;)
{
- HeapTuple tup;
- Form_pg_event_trigger form;
+ HeapTuple tup;
+ Form_pg_event_trigger form;
char *evtevent;
- EventTriggerEvent event;
+ EventTriggerEvent event;
EventTriggerCacheItem *item;
Datum evttags;
bool evttags_isnull;
@@ -257,9 +257,9 @@ static void
InvalidateEventCacheCallback(Datum arg, int cacheid, uint32 hashvalue)
{
/*
- * If the cache isn't valid, then there might be a rebuild in progress,
- * so we can't immediately blow it away. But it's advantageous to do
- * this when possible, so as to immediately free memory.
+ * If the cache isn't valid, then there might be a rebuild in progress, so
+ * we can't immediately blow it away. But it's advantageous to do this
+ * when possible, so as to immediately free memory.
*/
if (EventTriggerCacheState == ETCS_VALID)
{
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index c4960d597e0..26cae97d955 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -216,7 +216,7 @@ CreateCachedPlan(Node *raw_parse_tree,
* in that context.
*
* A one-shot plan cannot be saved or copied, since we make no effort to
- * preserve the raw parse tree unmodified. There is also no support for
+ * preserve the raw parse tree unmodified. There is also no support for
* invalidation, so plan use must be completed in the current transaction,
* and DDL that might invalidate the querytree_list must be avoided as well.
*
@@ -373,9 +373,9 @@ CompleteCachedPlan(CachedPlanSource *plansource,
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This
+ * Also save the current search_path in the query_context. (This
* should not generate much extra cruft either, since almost certainly
- * the path is already valid.) Again, we don't really need this for
+ * the path is already valid.) Again, we don't really need this for
* one-shot plans; and we *must* skip this for transaction control
* commands, because this could result in catalog accesses.
*/
@@ -554,9 +554,9 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
/*
* For one-shot plans, we do not support revalidation checking; it's
* assumed the query is parsed, planned, and executed in one transaction,
- * so that no lock re-acquisition is necessary. Also, there is never
- * any need to revalidate plans for transaction control commands (and
- * we mustn't risk any catalog accesses when handling those).
+ * so that no lock re-acquisition is necessary. Also, there is never any
+ * need to revalidate plans for transaction control commands (and we
+ * mustn't risk any catalog accesses when handling those).
*/
if (plansource->is_oneshot || IsTransactionStmtPlan(plansource))
{
@@ -725,7 +725,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This should
+ * Also save the current search_path in the query_context. (This should
* not generate much extra cruft either, since almost certainly the path
* is already valid.)
*/
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 7888d387234..f1140385883 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2313,7 +2313,7 @@ AtEOXact_RelationCache(bool isCommit)
* For simplicity, eoxact_list[] entries are not deleted till end of
* top-level transaction, even though we could remove them at
* subtransaction end in some cases, or remove relations from the list if
- * they are cleared for other reasons. Therefore we should expect the
+ * they are cleared for other reasons. Therefore we should expect the
* case that list entries are not found in the hashtable; if not, there's
* nothing to do for them.
*/
@@ -2354,66 +2354,66 @@ AtEOXact_RelationCache(bool isCommit)
static void
AtEOXact_cleanup(Relation relation, bool isCommit)
{
- /*
- * The relcache entry's ref count should be back to its normal
- * not-in-a-transaction state: 0 unless it's nailed in cache.
- *
- * In bootstrap mode, this is NOT true, so don't check it --- the
- * bootstrap code expects relations to stay open across start/commit
- * transaction calls. (That seems bogus, but it's not worth fixing.)
- *
- * Note: ideally this check would be applied to every relcache entry,
- * not just those that have eoxact work to do. But it's not worth
- * forcing a scan of the whole relcache just for this. (Moreover,
- * doing so would mean that assert-enabled testing never tests the
- * hash_search code path above, which seems a bad idea.)
- */
+ /*
+ * The relcache entry's ref count should be back to its normal
+ * not-in-a-transaction state: 0 unless it's nailed in cache.
+ *
+ * In bootstrap mode, this is NOT true, so don't check it --- the
+ * bootstrap code expects relations to stay open across start/commit
+ * transaction calls. (That seems bogus, but it's not worth fixing.)
+ *
+ * Note: ideally this check would be applied to every relcache entry, not
+ * just those that have eoxact work to do. But it's not worth forcing a
+ * scan of the whole relcache just for this. (Moreover, doing so would
+ * mean that assert-enabled testing never tests the hash_search code path
+ * above, which seems a bad idea.)
+ */
#ifdef USE_ASSERT_CHECKING
- if (!IsBootstrapProcessingMode())
- {
- int expected_refcnt;
+ if (!IsBootstrapProcessingMode())
+ {
+ int expected_refcnt;
- expected_refcnt = relation->rd_isnailed ? 1 : 0;
- Assert(relation->rd_refcnt == expected_refcnt);
- }
+ expected_refcnt = relation->rd_isnailed ? 1 : 0;
+ Assert(relation->rd_refcnt == expected_refcnt);
+ }
#endif
- /*
- * Is it a relation created in the current transaction?
- *
- * During commit, reset the flag to zero, since we are now out of the
- * creating transaction. During abort, simply delete the relcache
- * entry --- it isn't interesting any longer. (NOTE: if we have
- * forgotten the new-ness of a new relation due to a forced cache
- * flush, the entry will get deleted anyway by shared-cache-inval
- * processing of the aborted pg_class insertion.)
- */
- if (relation->rd_createSubid != InvalidSubTransactionId)
+ /*
+ * Is it a relation created in the current transaction?
+ *
+ * During commit, reset the flag to zero, since we are now out of the
+ * creating transaction. During abort, simply delete the relcache entry
+ * --- it isn't interesting any longer. (NOTE: if we have forgotten the
+ * new-ness of a new relation due to a forced cache flush, the entry will
+ * get deleted anyway by shared-cache-inval processing of the aborted
+ * pg_class insertion.)
+ */
+ if (relation->rd_createSubid != InvalidSubTransactionId)
+ {
+ if (isCommit)
+ relation->rd_createSubid = InvalidSubTransactionId;
+ else
{
- if (isCommit)
- relation->rd_createSubid = InvalidSubTransactionId;
- else
- {
- RelationClearRelation(relation, false);
- return;
- }
+ RelationClearRelation(relation, false);
+ return;
}
+ }
- /*
- * Likewise, reset the hint about the relfilenode being new.
- */
- relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
+ /*
+ * Likewise, reset the hint about the relfilenode being new.
+ */
+ relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- /*
- * Flush any temporary index list.
- */
- if (relation->rd_indexvalid == 2)
- {
- list_free(relation->rd_indexlist);
- relation->rd_indexlist = NIL;
- relation->rd_oidindex = InvalidOid;
- relation->rd_indexvalid = 0;
- }
+ /*
+ * Flush any temporary index list.
+ */
+ if (relation->rd_indexvalid == 2)
+ {
+ list_free(relation->rd_indexlist);
+ relation->rd_indexlist = NIL;
+ relation->rd_oidindex = InvalidOid;
+ relation->rd_indexvalid = 0;
+ }
}
/*
@@ -2474,45 +2474,44 @@ static void
AtEOSubXact_cleanup(Relation relation, bool isCommit,
SubTransactionId mySubid, SubTransactionId parentSubid)
{
- /*
- * Is it a relation created in the current subtransaction?
- *
- * During subcommit, mark it as belonging to the parent, instead.
- * During subabort, simply delete the relcache entry.
- */
- if (relation->rd_createSubid == mySubid)
+ /*
+ * Is it a relation created in the current subtransaction?
+ *
+ * During subcommit, mark it as belonging to the parent, instead. During
+ * subabort, simply delete the relcache entry.
+ */
+ if (relation->rd_createSubid == mySubid)
+ {
+ if (isCommit)
+ relation->rd_createSubid = parentSubid;
+ else
{
- if (isCommit)
- relation->rd_createSubid = parentSubid;
- else
- {
- RelationClearRelation(relation, false);
- return;
- }
+ RelationClearRelation(relation, false);
+ return;
}
+ }
- /*
- * Likewise, update or drop any new-relfilenode-in-subtransaction
- * hint.
- */
- if (relation->rd_newRelfilenodeSubid == mySubid)
- {
- if (isCommit)
- relation->rd_newRelfilenodeSubid = parentSubid;
- else
- relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- }
+ /*
+ * Likewise, update or drop any new-relfilenode-in-subtransaction hint.
+ */
+ if (relation->rd_newRelfilenodeSubid == mySubid)
+ {
+ if (isCommit)
+ relation->rd_newRelfilenodeSubid = parentSubid;
+ else
+ relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
+ }
- /*
- * Flush any temporary index list.
- */
- if (relation->rd_indexvalid == 2)
- {
- list_free(relation->rd_indexlist);
- relation->rd_indexlist = NIL;
- relation->rd_oidindex = InvalidOid;
- relation->rd_indexvalid = 0;
- }
+ /*
+ * Flush any temporary index list.
+ */
+ if (relation->rd_indexvalid == 2)
+ {
+ list_free(relation->rd_indexlist);
+ relation->rd_indexlist = NIL;
+ relation->rd_oidindex = InvalidOid;
+ relation->rd_indexvalid = 0;
+ }
}
@@ -2699,8 +2698,8 @@ RelationBuildLocalRelation(const char *relname,
RelationCacheInsert(rel);
/*
- * Flag relation as needing eoxact cleanup (to clear rd_createSubid).
- * We can't do this before storing relid in it.
+ * Flag relation as needing eoxact cleanup (to clear rd_createSubid). We
+ * can't do this before storing relid in it.
*/
EOXactListAdd(rel);
@@ -3847,8 +3846,8 @@ RelationGetIndexAttrBitmap(Relation relation, bool keyAttrs)
/* Can this index be referenced by a foreign key? */
isKey = indexInfo->ii_Unique &&
- indexInfo->ii_Expressions == NIL &&
- indexInfo->ii_Predicate == NIL;
+ indexInfo->ii_Expressions == NIL &&
+ indexInfo->ii_Predicate == NIL;
/* Collect simple attribute references */
for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
@@ -3861,7 +3860,7 @@ RelationGetIndexAttrBitmap(Relation relation, bool keyAttrs)
attrnum - FirstLowInvalidHeapAttributeNumber);
if (isKey)
uindexattrs = bms_add_member(uindexattrs,
- attrnum - FirstLowInvalidHeapAttributeNumber);
+ attrnum - FirstLowInvalidHeapAttributeNumber);
}
}
@@ -4030,7 +4029,7 @@ errtable(Relation rel)
get_namespace_name(RelationGetNamespace(rel)));
err_generic_string(PG_DIAG_TABLE_NAME, RelationGetRelationName(rel));
- return 0; /* return value does not matter */
+ return 0; /* return value does not matter */
}
/*
@@ -4061,7 +4060,7 @@ errtablecol(Relation rel, int attnum)
* given directly rather than extracted from the relation's catalog data.
*
* Don't use this directly unless errtablecol() is inconvenient for some
- * reason. This might possibly be needed during intermediate states in ALTER
+ * reason. This might possibly be needed during intermediate states in ALTER
* TABLE, for instance.
*/
int
@@ -4070,7 +4069,7 @@ errtablecolname(Relation rel, const char *colname)
errtable(rel);
err_generic_string(PG_DIAG_COLUMN_NAME, colname);
- return 0; /* return value does not matter */
+ return 0; /* return value does not matter */
}
/*
@@ -4083,7 +4082,7 @@ errtableconstraint(Relation rel, const char *conname)
errtable(rel);
err_generic_string(PG_DIAG_CONSTRAINT_NAME, conname);
- return 0; /* return value does not matter */
+ return 0; /* return value does not matter */
}
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index bfc3c86aa8c..ecb0f96d467 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -382,7 +382,7 @@ static const struct cachedesc cacheinfo[] = {
},
256
},
- {EventTriggerRelationId, /* EVENTTRIGGERNAME */
+ {EventTriggerRelationId, /* EVENTTRIGGERNAME */
EventTriggerNameIndexId,
1,
{
@@ -393,7 +393,7 @@ static const struct cachedesc cacheinfo[] = {
},
8
},
- {EventTriggerRelationId, /* EVENTTRIGGEROID */
+ {EventTriggerRelationId, /* EVENTTRIGGEROID */
EventTriggerOidIndexId,
1,
{
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index f8cf190e652..e9eb3d5be8c 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -741,7 +741,7 @@ errcode_for_socket_access(void)
StringInfoData buf; \
/* Internationalize the error format string */ \
if (!in_error_recursion_trouble()) \
- fmt = dngettext((domain), fmt_singular, fmt_plural, n); \
+ fmt = dngettext((domain), fmt_singular, fmt_plural, n); \
else \
fmt = (n == 1 ? fmt_singular : fmt_plural); \
/* Expand %m in format string */ \
@@ -1151,7 +1151,7 @@ err_generic_string(int field, const char *str)
break;
}
- return 0; /* return value does not matter */
+ return 0; /* return value does not matter */
}
/*
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 5454befe152..7c3f9206e5e 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -1042,9 +1042,9 @@ hash_update_hash_key(HTAB *hashp,
hashp->tabname);
/*
- * Lookup the existing element using its saved hash value. We need to
- * do this to be able to unlink it from its hash chain, but as a side
- * benefit we can verify the validity of the passed existingEntry pointer.
+ * Lookup the existing element using its saved hash value. We need to do
+ * this to be able to unlink it from its hash chain, but as a side benefit
+ * we can verify the validity of the passed existingEntry pointer.
*/
bucket = calc_bucket(hctl, existingElement->hashvalue);
@@ -1074,8 +1074,8 @@ hash_update_hash_key(HTAB *hashp,
oldPrevPtr = prevBucketPtr;
/*
- * Now perform the equivalent of a HASH_ENTER operation to locate the
- * hash chain we want to put the entry into.
+ * Now perform the equivalent of a HASH_ENTER operation to locate the hash
+ * chain we want to put the entry into.
*/
newhashvalue = hashp->hash(newKeyPtr, hashp->keysize);
@@ -1119,7 +1119,7 @@ hash_update_hash_key(HTAB *hashp,
/*
* If old and new hash values belong to the same bucket, we need not
* change any chain links, and indeed should not since this simplistic
- * update will corrupt the list if currBucket is the last element. (We
+ * update will corrupt the list if currBucket is the last element. (We
* cannot fall out earlier, however, since we need to scan the bucket to
* check for duplicate keys.)
*/
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 493e91ca610..cb78caf8ebd 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -498,8 +498,8 @@ void
InitializeSessionUserIdStandalone(void)
{
/*
- * This function should only be called in single-user mode, in
- * autovacuum workers, and in background workers.
+ * This function should only be called in single-user mode, in autovacuum
+ * workers, and in background workers.
*/
AssertState(!IsUnderPostmaster || IsAutoVacuumWorkerProcess() || IsBackgroundWorker);
@@ -894,7 +894,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Successfully created the file, now fill it. See comment in miscadmin.h
- * about the contents. Note that we write the same first five lines into
+ * about the contents. Note that we write the same first five lines into
* both datadir and socket lockfiles; although more stuff may get added to
* the datadir lockfile later.
*/
@@ -948,9 +948,9 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * Arrange to unlink the lock file(s) at proc_exit. If this is the
- * first one, set up the on_proc_exit function to do it; then add this
- * lock file to the list of files to unlink.
+ * Arrange to unlink the lock file(s) at proc_exit. If this is the first
+ * one, set up the on_proc_exit function to do it; then add this lock file
+ * to the list of files to unlink.
*/
if (lock_files == NIL)
on_proc_exit(UnlinkLockFiles, 0);
@@ -1077,8 +1077,8 @@ AddToDataDirLockFile(int target_line, const char *str)
srcbuffer[len] = '\0';
/*
- * Advance over lines we are not supposed to rewrite, then copy them
- * to destbuffer.
+ * Advance over lines we are not supposed to rewrite, then copy them to
+ * destbuffer.
*/
srcptr = srcbuffer;
for (lineno = 1; lineno < target_line; lineno++)
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 5b52bd27973..e0abff1145a 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -203,9 +203,9 @@ PerformAuthentication(Port *port)
{
/*
* It is ok to continue if we fail to load the IDENT file, although it
- * means that you cannot log in using any of the authentication methods
- * that need a user name mapping. load_ident() already logged the
- * details of error to the log.
+ * means that you cannot log in using any of the authentication
+ * methods that need a user name mapping. load_ident() already logged
+ * the details of error to the log.
*/
}
#endif
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 287ff808fc1..4582219af73 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -714,14 +714,14 @@ pg_encoding_mb2wchar_with_len(int encoding,
int
pg_wchar2mb(const pg_wchar *from, char *to)
{
- return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *)to, pg_wchar_strlen(from));
+ return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *) to, pg_wchar_strlen(from));
}
/* convert a wchar string to a multibyte with a limited length */
int
pg_wchar2mb_with_len(const pg_wchar *from, char *to, int len)
{
- return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *)to, len);
+ return (*pg_wchar_table[DatabaseEncoding->encoding].wchar2mb_with_len) (from, (unsigned char *) to, len);
}
/* same, with any encoding */
@@ -729,7 +729,7 @@ int
pg_encoding_wchar2mb_with_len(int encoding,
const pg_wchar *from, char *to, int len)
{
- return (*pg_wchar_table[encoding].wchar2mb_with_len) (from, (unsigned char *)to, len);
+ return (*pg_wchar_table[encoding].wchar2mb_with_len) (from, (unsigned char *) to, len);
}
/* returns the byte length of a multibyte character */
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 2fc17feb5eb..45bc3c1604b 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -98,7 +98,7 @@ pg_euc2wchar_with_len(const unsigned char *from, pg_wchar *to, int len)
*to |= *from++;
len -= 2;
}
- else /* must be ASCII */
+ else /* must be ASCII */
{
*to = *from++;
len--;
@@ -513,7 +513,7 @@ pg_wchar2utf_with_len(const pg_wchar *from, unsigned char *to, int len)
while (len > 0 && *from)
{
- int char_len;
+ int char_len;
unicode_to_utf8(*from, to);
char_len = pg_utf_mblen(to);
@@ -1721,7 +1721,7 @@ pg_eucjp_increment(unsigned char *charptr, int length)
*-------------------------------------------------------------------
*/
pg_wchar_tbl pg_wchar_table[] = {
- {pg_ascii2wchar_with_len, pg_wchar2single_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1}, /* PG_SQL_ASCII */
+ {pg_ascii2wchar_with_len, pg_wchar2single_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1}, /* PG_SQL_ASCII */
{pg_eucjp2wchar_with_len, pg_wchar2euc_with_len, pg_eucjp_mblen, pg_eucjp_dsplen, pg_eucjp_verifier, 3}, /* PG_EUC_JP */
{pg_euccn2wchar_with_len, pg_wchar2euc_with_len, pg_euccn_mblen, pg_euccn_dsplen, pg_euccn_verifier, 2}, /* PG_EUC_CN */
{pg_euckr2wchar_with_len, pg_wchar2euc_with_len, pg_euckr_mblen, pg_euckr_dsplen, pg_euckr_verifier, 3}, /* PG_EUC_KR */
@@ -1756,13 +1756,13 @@ pg_wchar_tbl pg_wchar_table[] = {
{pg_latin12wchar_with_len, pg_wchar2single_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* PG_WIN1255 */
{pg_latin12wchar_with_len, pg_wchar2single_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* PG_WIN1257 */
{pg_latin12wchar_with_len, pg_wchar2single_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* PG_KOI8U */
- {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}, /* PG_SJIS */
- {0, 0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2}, /* PG_BIG5 */
- {0, 0, pg_gbk_mblen, pg_gbk_dsplen, pg_gbk_verifier, 2}, /* PG_GBK */
- {0, 0, pg_uhc_mblen, pg_uhc_dsplen, pg_uhc_verifier, 2}, /* PG_UHC */
- {0, 0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 4}, /* PG_GB18030 */
- {0, 0, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* PG_JOHAB */
- {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2} /* PG_SHIFT_JIS_2004 */
+ {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}, /* PG_SJIS */
+ {0, 0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2}, /* PG_BIG5 */
+ {0, 0, pg_gbk_mblen, pg_gbk_dsplen, pg_gbk_verifier, 2}, /* PG_GBK */
+ {0, 0, pg_uhc_mblen, pg_uhc_dsplen, pg_uhc_verifier, 2}, /* PG_UHC */
+ {0, 0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 4}, /* PG_GB18030 */
+ {0, 0, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* PG_JOHAB */
+ {0, 0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2} /* PG_SHIFT_JIS_2004 */
};
/* returns the byte length of a word for mule internal code */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 22ba35fef93..ea16c64619f 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -814,8 +814,8 @@ static struct config_bool ConfigureNamesBool[] =
gettext_noop("Detection of a checksum failure normally causes PostgreSQL to "
"report an error, aborting the current transaction. Setting "
"ignore_checksum_failure to true causes the system to ignore the failure "
- "(but still report a warning), and continue processing. This "
- "behavior could cause crashes or other serious problems. Only "
+ "(but still report a warning), and continue processing. This "
+ "behavior could cause crashes or other serious problems. Only "
"has an effect if checksums are enabled."),
GUC_NOT_IN_SAMPLE
},
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 6c3f965151a..e7ec3931f12 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -62,8 +62,8 @@ typedef struct ResourceOwnerData
int maxbuffers; /* currently allocated array size */
/* We can remember up to MAX_RESOWNER_LOCKS references to local locks. */
- int nlocks; /* number of owned locks */
- LOCALLOCK *locks[MAX_RESOWNER_LOCKS]; /* list of owned locks */
+ int nlocks; /* number of owned locks */
+ LOCALLOCK *locks[MAX_RESOWNER_LOCKS]; /* list of owned locks */
/* We have built-in support for remembering catcache references */
int ncatrefs; /* number of owned catcache pins */
@@ -641,10 +641,10 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
* the entry.
*/
void
-ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK * locallock)
+ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
{
if (owner->nlocks > MAX_RESOWNER_LOCKS)
- return; /* we have already overflowed */
+ return; /* we have already overflowed */
if (owner->nlocks < MAX_RESOWNER_LOCKS)
owner->locks[owner->nlocks] = locallock;
@@ -664,7 +664,7 @@ ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
int i;
if (owner->nlocks > MAX_RESOWNER_LOCKS)
- return; /* we have overflowed */
+ return; /* we have overflowed */
Assert(owner->nlocks > 0);
for (i = owner->nlocks - 1; i >= 0; i--)
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 57d0d3f5e8b..ea9bc04823d 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -575,8 +575,8 @@ grow_memtuples(Tuplestorestate *state)
* strategy and instead increase as much as we safely can.
*
* To stay within allowedMem, we can't increase memtupsize by more
- * than availMem / sizeof(void *) elements. In practice, we want
- * to increase it by considerably less, because we need to leave some
+ * than availMem / sizeof(void *) elements. In practice, we want to
+ * increase it by considerably less, because we need to leave some
* space for the tuples to which the new array slots will refer. We
* assume the new tuples will be about the same size as the tuples
* we've already seen, and thus we can extrapolate from the space
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 24384b49890..ab4020a710b 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -214,12 +214,12 @@ HeapTupleSatisfiesSelf(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return true;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return true;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -270,7 +270,7 @@ HeapTupleSatisfiesSelf(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
return true;
@@ -405,12 +405,12 @@ HeapTupleSatisfiesNow(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return true;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return true;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -464,7 +464,7 @@ HeapTupleSatisfiesNow(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
return true;
@@ -682,12 +682,12 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return HeapTupleMayBeUpdated;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return HeapTupleMayBeUpdated;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -699,9 +699,11 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
else
{
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan started */
+ return HeapTupleSelfUpdated; /* updated after scan
+ * started */
else
- return HeapTupleInvisible; /* updated before scan started */
+ return HeapTupleInvisible; /* updated before scan
+ * started */
}
}
@@ -746,14 +748,13 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
{
/*
- * If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK
- * is set, it cannot possibly be running. Otherwise need to
- * check.
+ * If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK is
+ * set, it cannot possibly be running. Otherwise need to check.
*/
if ((tuple->t_infomask & (HEAP_XMAX_EXCL_LOCK |
HEAP_XMAX_KEYSHR_LOCK)) &&
@@ -777,9 +778,9 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
if (TransactionIdIsCurrentTransactionId(xmax))
{
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan started */
+ return HeapTupleSelfUpdated; /* updated after scan started */
else
- return HeapTupleInvisible; /* updated before scan started */
+ return HeapTupleInvisible; /* updated before scan started */
}
if (MultiXactIdIsRunning(HeapTupleHeaderGetRawXmax(tuple)))
@@ -902,12 +903,12 @@ HeapTupleSatisfiesDirty(HeapTupleHeader tuple, Snapshot snapshot,
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return true;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return true;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -962,7 +963,7 @@ HeapTupleSatisfiesDirty(HeapTupleHeader tuple, Snapshot snapshot,
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
return true;
@@ -1094,12 +1095,12 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return true;
- if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
+ if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask)) /* not deleter */
return true;
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleGetUpdateXid(tuple);
if (!TransactionIdIsValid(xmax))
@@ -1111,7 +1112,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
else if (HeapTupleHeaderGetCmax(tuple) >= snapshot->curcid)
return true; /* updated after scan started */
else
- return false; /* updated before scan started */
+ return false; /* updated before scan started */
}
if (!TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmax(tuple)))
@@ -1156,7 +1157,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
- TransactionId xmax;
+ TransactionId xmax;
/* already checked above */
Assert(!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask));
@@ -1354,9 +1355,9 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * We don't really care whether xmax did commit, abort or crash.
- * We know that xmax did lock the tuple, but it did not and will
- * never actually update it.
+ * We don't really care whether xmax did commit, abort or crash. We
+ * know that xmax did lock the tuple, but it did not and will never
+ * actually update it.
*/
return HEAPTUPLE_LIVE;
@@ -1629,7 +1630,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
bool
HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
{
- TransactionId xmax;
+ TransactionId xmax;
/* if there's no valid Xmax, then there's obviously no update either */
if (tuple->t_infomask & HEAP_XMAX_INVALID)
@@ -1643,8 +1644,8 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
return true;
/*
- * if HEAP_XMAX_LOCK_ONLY is not set and not a multi, then this
- * must necessarily have been updated
+ * if HEAP_XMAX_LOCK_ONLY is not set and not a multi, then this must
+ * necessarily have been updated
*/
if (!(tuple->t_infomask & HEAP_XMAX_IS_MULTI))
return false;