Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruce Momjian2001-03-22 04:01:46 +0000
committerBruce Momjian2001-03-22 04:01:46 +0000
commit9e1552607a9dc6bc23e43d46770a9063ade4f3f0 (patch)
tree6a230d81917ebc004e40cd46c48f2aa27eec153e /src/backend
parent6cf8707b828b14b5c2336076ce358b18b67829d6 (diff)
pgindent run. Make it all clean.
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/common/heaptuple.c12
-rw-r--r--src/backend/access/common/indextuple.c28
-rw-r--r--src/backend/access/common/printtup.c27
-rw-r--r--src/backend/access/common/tupdesc.c18
-rw-r--r--src/backend/access/gist/gist.c698
-rw-r--r--src/backend/access/gist/gistget.c4
-rw-r--r--src/backend/access/gist/gistscan.c14
-rw-r--r--src/backend/access/hash/hash.c57
-rw-r--r--src/backend/access/hash/hashfunc.c14
-rw-r--r--src/backend/access/heap/heapam.c384
-rw-r--r--src/backend/access/heap/hio.c12
-rw-r--r--src/backend/access/heap/tuptoaster.c472
-rw-r--r--src/backend/access/index/istrat.c16
-rw-r--r--src/backend/access/nbtree/nbtcompare.c11
-rw-r--r--src/backend/access/nbtree/nbtinsert.c606
-rw-r--r--src/backend/access/nbtree/nbtpage.c60
-rw-r--r--src/backend/access/nbtree/nbtree.c271
-rw-r--r--src/backend/access/nbtree/nbtsearch.c93
-rw-r--r--src/backend/access/nbtree/nbtsort.c103
-rw-r--r--src/backend/access/nbtree/nbtutils.c49
-rw-r--r--src/backend/access/rtree/rtget.c6
-rw-r--r--src/backend/access/rtree/rtproc.c19
-rw-r--r--src/backend/access/rtree/rtree.c111
-rw-r--r--src/backend/access/rtree/rtscan.c14
-rw-r--r--src/backend/access/transam/rmgr.c34
-rw-r--r--src/backend/access/transam/transam.c6
-rw-r--r--src/backend/access/transam/transsup.c6
-rw-r--r--src/backend/access/transam/varsup.c31
-rw-r--r--src/backend/access/transam/xact.c98
-rw-r--r--src/backend/access/transam/xid.c7
-rw-r--r--src/backend/access/transam/xlog.c643
-rw-r--r--src/backend/access/transam/xlogutils.c143
-rw-r--r--src/backend/catalog/aclchk.c18
-rw-r--r--src/backend/catalog/catalog.c14
-rw-r--r--src/backend/catalog/heap.c64
-rw-r--r--src/backend/catalog/index.c198
-rw-r--r--src/backend/catalog/indexing.c4
-rw-r--r--src/backend/catalog/pg_aggregate.c22
-rw-r--r--src/backend/catalog/pg_largeobject.c16
-rw-r--r--src/backend/catalog/pg_operator.c10
-rw-r--r--src/backend/catalog/pg_proc.c35
-rw-r--r--src/backend/catalog/pg_type.c54
-rw-r--r--src/backend/commands/analyze.c113
-rw-r--r--src/backend/commands/async.c10
-rw-r--r--src/backend/commands/cluster.c43
-rw-r--r--src/backend/commands/command.c661
-rw-r--r--src/backend/commands/comment.c4
-rw-r--r--src/backend/commands/copy.c84
-rw-r--r--src/backend/commands/creatinh.c59
-rw-r--r--src/backend/commands/dbcommands.c73
-rw-r--r--src/backend/commands/define.c63
-rw-r--r--src/backend/commands/explain.c4
-rw-r--r--src/backend/commands/indexcmds.c86
-rw-r--r--src/backend/commands/proclang.c2
-rw-r--r--src/backend/commands/remove.c6
-rw-r--r--src/backend/commands/rename.c19
-rw-r--r--src/backend/commands/sequence.c87
-rw-r--r--src/backend/commands/trigger.c78
-rw-r--r--src/backend/commands/user.c17
-rw-r--r--src/backend/commands/vacuum.c243
-rw-r--r--src/backend/commands/variable.c123
-rw-r--r--src/backend/commands/view.c26
-rw-r--r--src/backend/executor/execAmi.c10
-rw-r--r--src/backend/executor/execJunk.c3
-rw-r--r--src/backend/executor/execMain.c116
-rw-r--r--src/backend/executor/execQual.c162
-rw-r--r--src/backend/executor/execScan.c6
-rw-r--r--src/backend/executor/execTuples.c18
-rw-r--r--src/backend/executor/execUtils.c20
-rw-r--r--src/backend/executor/functions.c29
-rw-r--r--src/backend/executor/nodeAgg.c151
-rw-r--r--src/backend/executor/nodeGroup.c14
-rw-r--r--src/backend/executor/nodeHash.c4
-rw-r--r--src/backend/executor/nodeHashjoin.c12
-rw-r--r--src/backend/executor/nodeIndexscan.c29
-rw-r--r--src/backend/executor/nodeLimit.c18
-rw-r--r--src/backend/executor/nodeMaterial.c18
-rw-r--r--src/backend/executor/nodeMergejoin.c128
-rw-r--r--src/backend/executor/nodeNestloop.c20
-rw-r--r--src/backend/executor/nodeResult.c4
-rw-r--r--src/backend/executor/nodeSeqscan.c8
-rw-r--r--src/backend/executor/nodeSetOp.c29
-rw-r--r--src/backend/executor/nodeSubplan.c10
-rw-r--r--src/backend/executor/nodeSubqueryscan.c7
-rw-r--r--src/backend/executor/nodeTidscan.c3
-rw-r--r--src/backend/executor/spi.c39
-rw-r--r--src/backend/lib/bit.c8
-rw-r--r--src/backend/libpq/auth.c45
-rw-r--r--src/backend/libpq/be-fsstubs.c14
-rw-r--r--src/backend/libpq/crypt.c4
-rw-r--r--src/backend/libpq/password.c10
-rw-r--r--src/backend/libpq/pqcomm.c55
-rw-r--r--src/backend/libpq/pqpacket.c6
-rw-r--r--src/backend/libpq/pqsignal.c9
-rw-r--r--src/backend/main/main.c59
-rw-r--r--src/backend/nodes/copyfuncs.c127
-rw-r--r--src/backend/nodes/equalfuncs.c13
-rw-r--r--src/backend/nodes/list.c15
-rw-r--r--src/backend/nodes/makefuncs.c9
-rw-r--r--src/backend/nodes/nodeFuncs.c4
-rw-r--r--src/backend/nodes/outfuncs.c15
-rw-r--r--src/backend/nodes/print.c4
-rw-r--r--src/backend/nodes/read.c27
-rw-r--r--src/backend/nodes/readfuncs.c759
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c6
-rw-r--r--src/backend/optimizer/geqo/geqo_main.c48
-rw-r--r--src/backend/optimizer/path/_deadcode/predmig.c4
-rw-r--r--src/backend/optimizer/path/allpaths.c127
-rw-r--r--src/backend/optimizer/path/clausesel.c30
-rw-r--r--src/backend/optimizer/path/costsize.c40
-rw-r--r--src/backend/optimizer/path/indxpath.c65
-rw-r--r--src/backend/optimizer/path/joinpath.c133
-rw-r--r--src/backend/optimizer/path/joinrels.c94
-rw-r--r--src/backend/optimizer/path/pathkeys.c80
-rw-r--r--src/backend/optimizer/plan/createplan.c96
-rw-r--r--src/backend/optimizer/plan/initsplan.c134
-rw-r--r--src/backend/optimizer/plan/planmain.c16
-rw-r--r--src/backend/optimizer/plan/planner.c215
-rw-r--r--src/backend/optimizer/plan/setrefs.c34
-rw-r--r--src/backend/optimizer/plan/subselect.c46
-rw-r--r--src/backend/optimizer/prep/prepkeyset.c5
-rw-r--r--src/backend/optimizer/prep/prepqual.c22
-rw-r--r--src/backend/optimizer/prep/preptlist.c39
-rw-r--r--src/backend/optimizer/prep/prepunion.c196
-rw-r--r--src/backend/optimizer/util/clauses.c85
-rw-r--r--src/backend/optimizer/util/joininfo.c4
-rw-r--r--src/backend/optimizer/util/pathnode.c11
-rw-r--r--src/backend/optimizer/util/plancat.c38
-rw-r--r--src/backend/optimizer/util/tlist.c3
-rw-r--r--src/backend/optimizer/util/var.c13
-rw-r--r--src/backend/parser/analyze.c499
-rw-r--r--src/backend/parser/keywords.c16
-rw-r--r--src/backend/parser/parse_clause.c158
-rw-r--r--src/backend/parser/parse_coerce.c48
-rw-r--r--src/backend/parser/parse_expr.c25
-rw-r--r--src/backend/parser/parse_func.c82
-rw-r--r--src/backend/parser/parse_node.c8
-rw-r--r--src/backend/parser/parse_oper.c56
-rw-r--r--src/backend/parser/parse_relation.c82
-rw-r--r--src/backend/parser/parse_target.c8
-rw-r--r--src/backend/parser/parse_type.c10
-rw-r--r--src/backend/parser/parser.c5
-rw-r--r--src/backend/port/beos/sem.c266
-rw-r--r--src/backend/port/beos/shm.c124
-rw-r--r--src/backend/port/beos/support.c277
-rw-r--r--src/backend/port/darwin/sem.c104
-rw-r--r--src/backend/port/dynloader/aix.c2
-rw-r--r--src/backend/port/dynloader/aix.h10
-rw-r--r--src/backend/port/dynloader/beos.c53
-rw-r--r--src/backend/port/dynloader/darwin.c24
-rw-r--r--src/backend/port/dynloader/darwin.h6
-rw-r--r--src/backend/port/dynloader/hpux.c5
-rw-r--r--src/backend/port/dynloader/solaris.h4
-rw-r--r--src/backend/port/qnx4/shm.c8
-rw-r--r--src/backend/port/strtol.c2
-rw-r--r--src/backend/postmaster/postmaster.c270
-rw-r--r--src/backend/regex/engine.c53
-rw-r--r--src/backend/regex/regcomp.c77
-rw-r--r--src/backend/regex/regexec.c2
-rw-r--r--src/backend/rewrite/rewriteDefine.c60
-rw-r--r--src/backend/rewrite/rewriteHandler.c123
-rw-r--r--src/backend/rewrite/rewriteManip.c91
-rw-r--r--src/backend/rewrite/rewriteRemove.c19
-rw-r--r--src/backend/rewrite/rewriteSupport.c5
-rw-r--r--src/backend/storage/buffer/buf_init.c15
-rw-r--r--src/backend/storage/buffer/buf_table.c6
-rw-r--r--src/backend/storage/buffer/bufmgr.c116
-rw-r--r--src/backend/storage/buffer/localbuf.c16
-rw-r--r--src/backend/storage/buffer/s_lock.c47
-rw-r--r--src/backend/storage/file/buffile.c5
-rw-r--r--src/backend/storage/file/fd.c18
-rw-r--r--src/backend/storage/ipc/ipc.c153
-rw-r--r--src/backend/storage/ipc/ipci.c4
-rw-r--r--src/backend/storage/ipc/shmem.c17
-rw-r--r--src/backend/storage/ipc/shmqueue.c5
-rw-r--r--src/backend/storage/ipc/sinval.c5
-rw-r--r--src/backend/storage/ipc/sinvaladt.c10
-rw-r--r--src/backend/storage/ipc/spin.c76
-rw-r--r--src/backend/storage/large_object/inv_api.c161
-rw-r--r--src/backend/storage/lmgr/deadlock.c188
-rw-r--r--src/backend/storage/lmgr/lmgr.c5
-rw-r--r--src/backend/storage/lmgr/lock.c203
-rw-r--r--src/backend/storage/lmgr/proc.c86
-rw-r--r--src/backend/storage/page/bufpage.c35
-rw-r--r--src/backend/storage/smgr/md.c56
-rw-r--r--src/backend/storage/smgr/smgr.c53
-rw-r--r--src/backend/tcop/dest.c4
-rw-r--r--src/backend/tcop/fastpath.c50
-rw-r--r--src/backend/tcop/postgres.c300
-rw-r--r--src/backend/tcop/pquery.c9
-rw-r--r--src/backend/tcop/utility.c67
-rw-r--r--src/backend/tioga/tgRecipe.c14
-rw-r--r--src/backend/tioga/tgRecipe.h3
-rw-r--r--src/backend/utils/adt/acl.c24
-rw-r--r--src/backend/utils/adt/arrayfuncs.c145
-rw-r--r--src/backend/utils/adt/ascii.c106
-rw-r--r--src/backend/utils/adt/bool.c10
-rw-r--r--src/backend/utils/adt/cash.c19
-rw-r--r--src/backend/utils/adt/date.c136
-rw-r--r--src/backend/utils/adt/datetime.c142
-rw-r--r--src/backend/utils/adt/datum.c5
-rw-r--r--src/backend/utils/adt/float.c26
-rw-r--r--src/backend/utils/adt/format_type.c20
-rw-r--r--src/backend/utils/adt/formatting.c813
-rw-r--r--src/backend/utils/adt/geo_ops.c202
-rw-r--r--src/backend/utils/adt/inet_net_ntop.c6
-rw-r--r--src/backend/utils/adt/int.c14
-rw-r--r--src/backend/utils/adt/int8.c17
-rw-r--r--src/backend/utils/adt/like.c224
-rw-r--r--src/backend/utils/adt/mac.c46
-rw-r--r--src/backend/utils/adt/misc.c5
-rw-r--r--src/backend/utils/adt/nabstime.c196
-rw-r--r--src/backend/utils/adt/network.c49
-rw-r--r--src/backend/utils/adt/not_in.c6
-rw-r--r--src/backend/utils/adt/numeric.c28
-rw-r--r--src/backend/utils/adt/numutils.c10
-rw-r--r--src/backend/utils/adt/oid.c55
-rw-r--r--src/backend/utils/adt/oracle_compat.c22
-rw-r--r--src/backend/utils/adt/pg_locale.c6
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c16
-rw-r--r--src/backend/utils/adt/quote.c146
-rw-r--r--src/backend/utils/adt/regexp.c37
-rw-r--r--src/backend/utils/adt/regproc.c4
-rw-r--r--src/backend/utils/adt/ri_triggers.c10
-rw-r--r--src/backend/utils/adt/ruleutils.c110
-rw-r--r--src/backend/utils/adt/selfuncs.c350
-rw-r--r--src/backend/utils/adt/sets.c20
-rw-r--r--src/backend/utils/adt/tid.c39
-rw-r--r--src/backend/utils/adt/timestamp.c183
-rw-r--r--src/backend/utils/adt/varbit.c122
-rw-r--r--src/backend/utils/adt/varchar.c84
-rw-r--r--src/backend/utils/adt/varlena.c13
-rw-r--r--src/backend/utils/cache/catcache.c177
-rw-r--r--src/backend/utils/cache/fcache.c7
-rw-r--r--src/backend/utils/cache/inval.c87
-rw-r--r--src/backend/utils/cache/lsyscache.c41
-rw-r--r--src/backend/utils/cache/relcache.c328
-rw-r--r--src/backend/utils/cache/syscache.c131
-rw-r--r--src/backend/utils/cache/temprel.c39
-rw-r--r--src/backend/utils/error/elog.c126
-rw-r--r--src/backend/utils/error/exc.c3
-rw-r--r--src/backend/utils/fmgr/dfmgr.c16
-rw-r--r--src/backend/utils/fmgr/fmgr.c288
-rw-r--r--src/backend/utils/hash/dynahash.c4
-rw-r--r--src/backend/utils/hash/pg_crc.c6
-rw-r--r--src/backend/utils/init/globals.c4
-rw-r--r--src/backend/utils/init/miscinit.c77
-rw-r--r--src/backend/utils/init/postinit.c44
-rw-r--r--src/backend/utils/mb/conv.c189
-rw-r--r--src/backend/utils/mb/liketest.c143
-rw-r--r--src/backend/utils/mb/palloc.c2
-rw-r--r--src/backend/utils/mb/utftest.c4
-rw-r--r--src/backend/utils/mb/wchar.c48
-rw-r--r--src/backend/utils/misc/database.c17
-rw-r--r--src/backend/utils/misc/guc.c625
-rw-r--r--src/backend/utils/misc/ps_status.c170
-rw-r--r--src/backend/utils/mmgr/aset.c161
-rw-r--r--src/backend/utils/mmgr/mcxt.c99
-rw-r--r--src/backend/utils/mmgr/portalmem.c6
-rw-r--r--src/backend/utils/sort/tuplesort.c4
-rw-r--r--src/backend/utils/sort/tuplestore.c50
261 files changed, 10924 insertions, 9675 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 03d180e36fe..9bb08054943 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.69 2001/01/24 19:42:46 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.70 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -306,8 +306,8 @@ nocachegetattr(HeapTuple tuple,
int j;
/*
- * In for(), we test <= and not < because we want to see
- * if we can go past it in initializing offsets.
+ * In for(), we test <= and not < because we want to see if we
+ * can go past it in initializing offsets.
*/
for (j = 0; j <= attnum; j++)
{
@@ -321,9 +321,9 @@ nocachegetattr(HeapTuple tuple,
}
/*
- * If slow is false, and we got here, we know that we have a tuple with
- * no nulls or varlenas before the target attribute. If possible, we
- * also want to initialize the remainder of the attribute cached
+ * If slow is false, and we got here, we know that we have a tuple
+ * with no nulls or varlenas before the target attribute. If possible,
+ * we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index e503d9b888d..da8129f307f 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.52 2001/02/22 21:48:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.53 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,9 +45,11 @@ index_formtuple(TupleDesc tupleDescriptor,
bool hasnull = false;
uint16 tupmask = 0;
int numberOfAttributes = tupleDescriptor->natts;
+
#ifdef TOAST_INDEX_HACK
Datum untoasted_value[INDEX_MAX_KEYS];
bool untoasted_free[INDEX_MAX_KEYS];
+
#endif
if (numberOfAttributes > INDEX_MAX_KEYS)
@@ -57,7 +59,7 @@ index_formtuple(TupleDesc tupleDescriptor,
#ifdef TOAST_INDEX_HACK
for (i = 0; i < numberOfAttributes; i++)
{
- Form_pg_attribute att = tupleDescriptor->attrs[i];
+ Form_pg_attribute att = tupleDescriptor->attrs[i];
untoasted_value[i] = value[i];
untoasted_free[i] = false;
@@ -73,20 +75,20 @@ index_formtuple(TupleDesc tupleDescriptor,
if (VARATT_IS_EXTERNAL(value[i]))
{
untoasted_value[i] = PointerGetDatum(
- heap_tuple_fetch_attr(
- (varattrib *) DatumGetPointer(value[i])));
+ heap_tuple_fetch_attr(
+ (varattrib *) DatumGetPointer(value[i])));
untoasted_free[i] = true;
}
/*
- * If value is above size target, and is of a compressible datatype,
- * try to compress it in-line.
+ * If value is above size target, and is of a compressible
+ * datatype, try to compress it in-line.
*/
if (VARATT_SIZE(untoasted_value[i]) > TOAST_INDEX_TARGET &&
!VARATT_IS_EXTENDED(untoasted_value[i]) &&
(att->attstorage == 'x' || att->attstorage == 'm'))
{
- Datum cvalue = toast_compress_datum(untoasted_value[i]);
+ Datum cvalue = toast_compress_datum(untoasted_value[i]);
if (DatumGetPointer(cvalue) != NULL)
{
@@ -146,8 +148,8 @@ index_formtuple(TupleDesc tupleDescriptor,
/*
* We do this because DataFill wants to initialize a "tupmask" which
* is used for HeapTuples, but we want an indextuple infomask. The
- * only relevant info is the "has variable attributes" field.
- * We have already set the hasnull bit above.
+ * only relevant info is the "has variable attributes" field. We have
+ * already set the hasnull bit above.
*/
if (tupmask & HEAP_HASVARLENA)
@@ -315,9 +317,9 @@ nocache_index_getattr(IndexTuple tup,
}
/*
- * If slow is false, and we got here, we know that we have a tuple with
- * no nulls or varlenas before the target attribute. If possible, we
- * also want to initialize the remainder of the attribute cached
+ * If slow is false, and we got here, we know that we have a tuple
+ * with no nulls or varlenas before the target attribute. If possible,
+ * we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
@@ -391,9 +393,7 @@ nocache_index_getattr(IndexTuple tup,
usecache = false;
}
else
- {
off += att[i]->attlen;
- }
}
off = att_align(off, att[attnum]->attlen, att[attnum]->attalign);
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 4f47ef0d451..d44bfe973e0 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.57 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.58 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ getTypeOutputInfo(Oid type, Oid *typOutput, Oid *typElem,
*typOutput = pt->typoutput;
*typElem = pt->typelem;
- *typIsVarlena = (! pt->typbyval) && (pt->typlen == -1);
+ *typIsVarlena = (!pt->typbyval) && (pt->typlen == -1);
ReleaseSysCache(typeTuple);
return OidIsValid(*typOutput);
}
@@ -200,9 +200,10 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue;
if (OidIsValid(thisState->typoutput))
{
+
/*
- * If we have a toasted datum, forcibly detoast it here to avoid
- * memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to
+ * avoid memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -210,9 +211,9 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
attr = origattr;
outputstr = DatumGetCString(FunctionCall3(&thisState->finfo,
- attr,
- ObjectIdGetDatum(thisState->typelem),
- Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
+ attr,
+ ObjectIdGetDatum(thisState->typelem),
+ Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
pq_sendcountedtext(&buf, outputstr, strlen(outputstr));
@@ -308,9 +309,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena))
{
+
/*
- * If we have a toasted datum, forcibly detoast it here to avoid
- * memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to
+ * avoid memory leakage inside the type's output routine.
*/
if (typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -318,9 +320,9 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
attr = origattr;
value = DatumGetCString(OidFunctionCall3(typoutput,
- attr,
- ObjectIdGetDatum(typelem),
- Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
+ attr,
+ ObjectIdGetDatum(typelem),
+ Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
@@ -405,6 +407,7 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
/* send # of bytes, and opaque data */
if (thisState->typisvarlena)
{
+
/*
* If we have a toasted datum, must detoast before sending.
*/
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index 86bc1a56f82..e07c6296d15 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.71 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.72 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -242,9 +242,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We do not need to check every single field here, and in fact
* some fields such as attdispersion probably shouldn't be
- * compared. We can also disregard attnum (it was used to
- * place the row in the attrs array) and everything derived
- * from the column datatype.
+ * compared. We can also disregard attnum (it was used to place
+ * the row in the attrs array) and everything derived from the
+ * column datatype.
*/
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
return false;
@@ -276,8 +276,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We can't assume that the items are always read from the
- * system catalogs in the same order; so use the adnum field to
- * identify the matching item to compare.
+ * system catalogs in the same order; so use the adnum field
+ * to identify the matching item to compare.
*/
for (j = 0; j < n; defval2++, j++)
{
@@ -298,9 +298,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
ConstrCheck *check2 = constr2->check;
/*
- * Similarly, don't assume that the checks are always read
- * in the same order; match them up by name and contents.
- * (The name *should* be unique, but...)
+ * Similarly, don't assume that the checks are always read in
+ * the same order; match them up by name and contents. (The
+ * name *should* be unique, but...)
*/
for (j = 0; j < n; check2++, j++)
{
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 9e3f935bd67..1c5577b88a0 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.71 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.72 2001/03/22 03:59:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,61 +25,62 @@
#include "access/xlogutils.h"
-/* result's status */
+/* result's status */
#define INSERTED 0x01
#define SPLITED 0x02
/* non-export function prototypes */
-static void gistdoinsert(Relation r,
- IndexTuple itup,
- InsertIndexResult *res,
- GISTSTATE *GISTstate);
-static int gistlayerinsert( Relation r, BlockNumber blkno,
- IndexTuple **itup,
- int *len,
- InsertIndexResult *res,
- GISTSTATE *giststate );
-static OffsetNumber gistwritebuffer( Relation r,
- Page page,
- IndexTuple *itup,
- int len,
- OffsetNumber off,
- GISTSTATE *giststate );
-static int gistnospace( Page page,
- IndexTuple *itvec, int len );
-static IndexTuple * gistreadbuffer( Relation r,
- Buffer buffer, int *len );
-static IndexTuple * gistjoinvector(
- IndexTuple *itvec, int *len,
- IndexTuple *additvec, int addlen );
-static IndexTuple gistunion( Relation r, IndexTuple *itvec,
- int len, GISTSTATE *giststate );
-static IndexTuple gistgetadjusted( Relation r,
- IndexTuple oldtup,
- IndexTuple addtup,
- GISTSTATE *giststate );
-static IndexTuple * gistSplit(Relation r,
- Buffer buffer,
- IndexTuple *itup,
- int *len,
- GISTSTATE *giststate,
- InsertIndexResult *res);
-static void gistnewroot(GISTSTATE *giststate, Relation r,
+static void gistdoinsert(Relation r,
+ IndexTuple itup,
+ InsertIndexResult *res,
+ GISTSTATE *GISTstate);
+static int gistlayerinsert(Relation r, BlockNumber blkno,
+ IndexTuple **itup,
+ int *len,
+ InsertIndexResult *res,
+ GISTSTATE *giststate);
+static OffsetNumber gistwritebuffer(Relation r,
+ Page page,
+ IndexTuple *itup,
+ int len,
+ OffsetNumber off,
+ GISTSTATE *giststate);
+static int gistnospace(Page page,
+ IndexTuple *itvec, int len);
+static IndexTuple *gistreadbuffer(Relation r,
+ Buffer buffer, int *len);
+static IndexTuple *gistjoinvector(
+ IndexTuple *itvec, int *len,
+ IndexTuple *additvec, int addlen);
+static IndexTuple gistunion(Relation r, IndexTuple *itvec,
+ int len, GISTSTATE *giststate);
+static IndexTuple gistgetadjusted(Relation r,
+ IndexTuple oldtup,
+ IndexTuple addtup,
+ GISTSTATE *giststate);
+static IndexTuple *gistSplit(Relation r,
+ Buffer buffer,
+ IndexTuple *itup,
+ int *len,
+ GISTSTATE *giststate,
+ InsertIndexResult *res);
+static void gistnewroot(GISTSTATE *giststate, Relation r,
IndexTuple *itup, int len);
static void GISTInitBuffer(Buffer b, uint32 f);
-static OffsetNumber gistchoose(Relation r, Page p,
- IndexTuple it,
- GISTSTATE *giststate);
-static IndexTuple gist_tuple_replacekey(Relation r,
- GISTENTRY entry, IndexTuple t);
-static void gistcentryinit(GISTSTATE *giststate,
- GISTENTRY *e, char *pr,
- Relation r, Page pg,
- OffsetNumber o, int b, bool l);
+static OffsetNumber gistchoose(Relation r, Page p,
+ IndexTuple it,
+ GISTSTATE *giststate);
+static IndexTuple gist_tuple_replacekey(Relation r,
+ GISTENTRY entry, IndexTuple t);
+static void gistcentryinit(GISTSTATE *giststate,
+ GISTENTRY *e, char *pr,
+ Relation r, Page pg,
+ OffsetNumber o, int b, bool l);
#undef GISTDEBUG
#ifdef GISTDEBUG
static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff);
+
#endif
/*
@@ -88,12 +89,14 @@ static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber c
Datum
gistbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -105,9 +108,11 @@ gistbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
GISTSTATE giststate;
@@ -181,6 +186,7 @@ gistbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -262,9 +268,7 @@ gistbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -297,7 +301,7 @@ gistbuild(PG_FUNCTION_ARGS)
}
#ifdef GISTDEBUG
-gist_dumptree(index, 0, GISTP_ROOT, 0);
+ gist_dumptree(index, 0, GISTP_ROOT, 0);
#endif
PG_RETURN_VOID();
@@ -312,12 +316,14 @@ gist_dumptree(index, 0, GISTP_ROOT, 0);
Datum
gistinsert(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+
#endif
InsertIndexResult res;
IndexTuple itup;
@@ -380,7 +386,7 @@ gistPageAddItem(GISTSTATE *giststate,
{
GISTENTRY tmpcentry;
IndexTuple itup = (IndexTuple) item;
- OffsetNumber retval;
+ OffsetNumber retval;
/*
* recompress the item given that we now know the exact page and
@@ -394,7 +400,7 @@ gistPageAddItem(GISTSTATE *giststate,
offsetNumber, dentry->bytes, FALSE);
*newtup = gist_tuple_replacekey(r, tmpcentry, itup);
retval = PageAddItem(page, (Item) *newtup, IndexTupleSize(*newtup),
- offsetNumber, flags);
+ offsetNumber, flags);
if (retval == InvalidOffsetNumber)
elog(ERROR, "gist: failed to add index item to %s",
RelationGetRelationName(r));
@@ -405,189 +411,213 @@ gistPageAddItem(GISTSTATE *giststate,
return (retval);
}
-static void
-gistdoinsert( Relation r,
- IndexTuple itup,
- InsertIndexResult *res,
- GISTSTATE *giststate ) {
+static void
+gistdoinsert(Relation r,
+ IndexTuple itup,
+ InsertIndexResult *res,
+ GISTSTATE *giststate)
+{
IndexTuple *instup;
- int i,ret,len = 1;
+ int i,
+ ret,
+ len = 1;
+
+ instup = (IndexTuple *) palloc(sizeof(IndexTuple));
+ instup[0] = (IndexTuple) palloc(IndexTupleSize(itup));
+ memcpy(instup[0], itup, IndexTupleSize(itup));
- instup = ( IndexTuple* ) palloc( sizeof(IndexTuple) );
- instup[0] = ( IndexTuple ) palloc( IndexTupleSize( itup ) );
- memcpy( instup[0], itup, IndexTupleSize( itup ) );
-
ret = gistlayerinsert(r, GISTP_ROOT, &instup, &len, res, giststate);
- if ( ret & SPLITED )
- gistnewroot( giststate, r, instup, len );
+ if (ret & SPLITED)
+ gistnewroot(giststate, r, instup, len);
- for(i=0;i<len;i++)
- pfree( instup[i] );
- pfree( instup );
+ for (i = 0; i < len; i++)
+ pfree(instup[i]);
+ pfree(instup);
}
static int
-gistlayerinsert( Relation r, BlockNumber blkno,
- IndexTuple **itup, /* in - out, has compressed entry */
- int *len , /* in - out */
- InsertIndexResult *res, /* out */
- GISTSTATE *giststate ) {
- Buffer buffer;
- Page page;
- OffsetNumber child;
- int ret;
+gistlayerinsert(Relation r, BlockNumber blkno,
+ IndexTuple **itup, /* in - out, has compressed entry */
+ int *len, /* in - out */
+ InsertIndexResult *res, /* out */
+ GISTSTATE *giststate)
+{
+ Buffer buffer;
+ Page page;
+ OffsetNumber child;
+ int ret;
GISTPageOpaque opaque;
buffer = ReadBuffer(r, blkno);
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
- if (!(opaque->flags & F_LEAF)) {
+ if (!(opaque->flags & F_LEAF))
+ {
/* internal page, so we must walk on tree */
/* len IS equial 1 */
- ItemId iid;
+ ItemId iid;
BlockNumber nblkno;
ItemPointerData oldtid;
- IndexTuple oldtup;
-
- child = gistchoose( r, page, *(*itup), giststate );
+ IndexTuple oldtup;
+
+ child = gistchoose(r, page, *(*itup), giststate);
iid = PageGetItemId(page, child);
oldtup = (IndexTuple) PageGetItem(page, iid);
nblkno = ItemPointerGetBlockNumber(&(oldtup->t_tid));
- /*
- * After this call:
- * 1. if child page was splited, then itup contains
- * keys for each page
- * 2. if child page wasn't splited, then itup contains
- * additional for adjustement of current key
+ /*
+ * After this call: 1. if child page was splited, then itup
+ * contains keys for each page 2. if child page wasn't splited,
+ * then itup contains additional for adjustement of current key
*/
- ret = gistlayerinsert( r, nblkno, itup, len, res, giststate );
+ ret = gistlayerinsert(r, nblkno, itup, len, res, giststate);
/* nothing inserted in child */
- if ( ! (ret & INSERTED) ) {
+ if (!(ret & INSERTED))
+ {
ReleaseBuffer(buffer);
- return 0x00;
+ return 0x00;
}
- /* child does not splited */
- if ( ! (ret & SPLITED) ) {
- IndexTuple newtup = gistgetadjusted( r, oldtup, (*itup)[0], giststate );
- if ( ! newtup ) {
+ /* child does not splited */
+ if (!(ret & SPLITED))
+ {
+ IndexTuple newtup = gistgetadjusted(r, oldtup, (*itup)[0], giststate);
+
+ if (!newtup)
+ {
/* not need to update key */
ReleaseBuffer(buffer);
return 0x00;
}
- pfree( (*itup)[0] ); /* !!! */
+ pfree((*itup)[0]); /* !!! */
(*itup)[0] = newtup;
}
- /* key is modified, so old version must be deleted */
+ /* key is modified, so old version must be deleted */
ItemPointerSet(&oldtid, blkno, child);
DirectFunctionCall2(gistdelete,
- PointerGetDatum(r),
- PointerGetDatum(&oldtid));
+ PointerGetDatum(r),
+ PointerGetDatum(&oldtid));
}
- ret = INSERTED;
+ ret = INSERTED;
- if ( gistnospace(page, (*itup), *len) ) {
+ if (gistnospace(page, (*itup), *len))
+ {
/* no space for insertion */
IndexTuple *itvec;
- int tlen;
+ int tlen;
ret |= SPLITED;
- itvec = gistreadbuffer( r, buffer, &tlen );
- itvec = gistjoinvector( itvec, &tlen, (*itup), *len );
- pfree( (*itup) );
- (*itup) = gistSplit( r, buffer, itvec, &tlen, giststate,
- (opaque->flags & F_LEAF) ? res : NULL ); /*res only for inserting in leaf*/
- ReleaseBuffer( buffer );
- pfree( itvec );
- *len = tlen; /* now tlen >= 2 */
- } else {
+ itvec = gistreadbuffer(r, buffer, &tlen);
+ itvec = gistjoinvector(itvec, &tlen, (*itup), *len);
+ pfree((*itup));
+ (*itup) = gistSplit(r, buffer, itvec, &tlen, giststate,
+ (opaque->flags & F_LEAF) ? res : NULL); /* res only for
+ * inserting in leaf */
+ ReleaseBuffer(buffer);
+ pfree(itvec);
+ *len = tlen; /* now tlen >= 2 */
+ }
+ else
+ {
/* enogth space */
- OffsetNumber off, l;
+ OffsetNumber off,
+ l;
- off = ( PageIsEmpty(page) ) ?
- FirstOffsetNumber
+ off = (PageIsEmpty(page)) ?
+ FirstOffsetNumber
:
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
- l = gistwritebuffer( r, page, (*itup), *len, off, giststate );
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ l = gistwritebuffer(r, page, (*itup), *len, off, giststate);
WriteBuffer(buffer);
- /* set res if insert into leaf page, in
- this case, len = 1 always */
- if ( res && (opaque->flags & F_LEAF) )
+ /*
+ * set res if insert into leaf page, in this case, len = 1 always
+ */
+ if (res && (opaque->flags & F_LEAF))
ItemPointerSet(&((*res)->pointerData), blkno, l);
- if ( *len > 1 ) { /* previos insert ret & SPLITED != 0 */
- int i;
- /* child was splited, so we must form union
- * for insertion in parent */
- IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
- for(i=0; i<*len; i++)
- pfree( (*itup)[i] );
+ if (*len > 1)
+ { /* previos insert ret & SPLITED != 0 */
+ int i;
+
+ /*
+ * child was splited, so we must form union for insertion in
+ * parent
+ */
+ IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
+
+ for (i = 0; i < *len; i++)
+ pfree((*itup)[i]);
(*itup)[0] = newtup;
*len = 1;
}
}
-
- return ret;
-}
-/*
+ return ret;
+}
+
+/*
* Write itup vector to page, has no control of free space
*/
static OffsetNumber
-gistwritebuffer( Relation r, Page page, IndexTuple *itup,
- int len, OffsetNumber off, GISTSTATE *giststate) {
+gistwritebuffer(Relation r, Page page, IndexTuple *itup,
+ int len, OffsetNumber off, GISTSTATE *giststate)
+{
OffsetNumber l = InvalidOffsetNumber;
- int i;
- GISTENTRY tmpdentry;
- IndexTuple newtup;
-
- for(i=0; i<len; i++) {
- l = gistPageAddItem(giststate, r, page,
- (Item) itup[i], IndexTupleSize(itup[i]),
- off, LP_USED, &tmpdentry, &newtup);
- off = OffsetNumberNext( off );
+ int i;
+ GISTENTRY tmpdentry;
+ IndexTuple newtup;
+
+ for (i = 0; i < len; i++)
+ {
+ l = gistPageAddItem(giststate, r, page,
+ (Item) itup[i], IndexTupleSize(itup[i]),
+ off, LP_USED, &tmpdentry, &newtup);
+ off = OffsetNumberNext(off);
if (tmpdentry.pred != (((char *) itup[i]) + sizeof(IndexTupleData)) && tmpdentry.pred)
pfree(tmpdentry.pred);
if (itup[i] != newtup)
pfree(newtup);
}
- return l;
+ return l;
}
/*
* Check space for itup vector on page
*/
-static int
-gistnospace( Page page, IndexTuple *itvec, int len ) {
- int size = 0;
- int i;
- for(i=0; i<len; i++)
- size += IndexTupleSize( itvec[i] )+4; /* ??? */
+static int
+gistnospace(Page page, IndexTuple *itvec, int len)
+{
+ int size = 0;
+ int i;
- return (PageGetFreeSpace(page) < size);
-}
+ for (i = 0; i < len; i++)
+ size += IndexTupleSize(itvec[i]) + 4; /* ??? */
+
+ return (PageGetFreeSpace(page) < size);
+}
/*
* Read buffer into itup vector
*/
static IndexTuple *
-gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
- OffsetNumber i, maxoff;
- IndexTuple *itvec;
- Page p = (Page) BufferGetPage(buffer);
+gistreadbuffer(Relation r, Buffer buffer, int *len /* out */ )
+{
+ OffsetNumber i,
+ maxoff;
+ IndexTuple *itvec;
+ Page p = (Page) BufferGetPage(buffer);
- *len=0;
+ *len = 0;
maxoff = PageGetMaxOffsetNumber(p);
- itvec = palloc( sizeof(IndexTuple) * maxoff );
- for(i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
- itvec[ (*len)++ ] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
+ itvec = palloc(sizeof(IndexTuple) * maxoff);
+ for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
+ itvec[(*len)++] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
return itvec;
}
@@ -596,9 +626,10 @@ gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
* join two vectors into one
*/
static IndexTuple *
-gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen ) {
- itvec = (IndexTuple*) repalloc( (void*)itvec, sizeof(IndexTuple) * ( (*len) + addlen ) );
- memmove( &itvec[*len], additvec, sizeof(IndexTuple) * addlen );
+gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
+{
+ itvec = (IndexTuple *) repalloc((void *) itvec, sizeof(IndexTuple) * ((*len) + addlen));
+ memmove(&itvec[*len], additvec, sizeof(IndexTuple) * addlen);
*len += addlen;
return itvec;
}
@@ -607,115 +638,124 @@ gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen )
* return union of itup vector
*/
static IndexTuple
-gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
- bytea *evec;
- char *datum;
- int datumsize, i;
- GISTENTRY centry;
- char isnull;
- IndexTuple newtup;
+gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
+{
+ bytea *evec;
+ char *datum;
+ int datumsize,
+ i;
+ GISTENTRY centry;
+ char isnull;
+ IndexTuple newtup;
evec = (bytea *) palloc(len * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = len * sizeof(GISTENTRY) + VARHDRSZ;
- for ( i = 0 ; i< len ; i++ )
+ for (i = 0; i < len; i++)
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[i],
- (char*) itvec[i] + sizeof(IndexTupleData),
- (Relation)NULL, (Page)NULL, (OffsetNumber)NULL,
- IndexTupleSize((IndexTuple)itvec[i]) - sizeof(IndexTupleData), FALSE);
+ (char *) itvec[i] + sizeof(IndexTupleData),
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ IndexTupleSize((IndexTuple) itvec[i]) - sizeof(IndexTupleData), FALSE);
datum = (char *)
DatumGetPointer(FunctionCall2(&giststate->unionFn,
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize)));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize)));
+
+ for (i = 0; i < len; i++)
+ if (((GISTENTRY *) VARDATA(evec))[i].pred &&
+ ((GISTENTRY *) VARDATA(evec))[i].pred !=
+ ((char *) (itvec[i]) + sizeof(IndexTupleData)))
+ pfree(((GISTENTRY *) VARDATA(evec))[i].pred);
- for ( i = 0 ; i< len ; i++ )
- if ( ((GISTENTRY *) VARDATA(evec))[i].pred &&
- ((GISTENTRY *) VARDATA(evec))[i].pred !=
- ((char*)( itvec[i] )+ sizeof(IndexTupleData)) )
- pfree( ((GISTENTRY *) VARDATA(evec))[i].pred );
-
- pfree( evec );
+ pfree(evec);
- gistcentryinit(giststate, &centry, datum,
- (Relation)NULL, (Page)NULL, (OffsetNumber)NULL,
- datumsize, FALSE);
+ gistcentryinit(giststate, &centry, datum,
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n';
- newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull );
+ newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
if (centry.pred != datum)
- pfree( datum );
+ pfree(datum);
return newtup;
-}
+}
/*
* Forms union of oldtup and addtup, if union == oldtup then return NULL
*/
static IndexTuple
-gistgetadjusted( Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate ) {
- bytea *evec;
- char *datum;
- int datumsize;
- bool result;
- char isnull;
- GISTENTRY centry, *ev0p, *ev1p;
- IndexTuple newtup = NULL;
-
+gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
+{
+ bytea *evec;
+ char *datum;
+ int datumsize;
+ bool result;
+ char isnull;
+ GISTENTRY centry,
+ *ev0p,
+ *ev1p;
+ IndexTuple newtup = NULL;
+
evec = (bytea *) palloc(2 * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[0],
- (char*) oldtup + sizeof(IndexTupleData), (Relation) NULL,
- (Page) NULL, (OffsetNumber) 0,
- IndexTupleSize((IndexTuple)oldtup) - sizeof(IndexTupleData), FALSE);
+ (char *) oldtup + sizeof(IndexTupleData), (Relation) NULL,
+ (Page) NULL, (OffsetNumber) 0,
+ IndexTupleSize((IndexTuple) oldtup) - sizeof(IndexTupleData), FALSE);
ev0p = &((GISTENTRY *) VARDATA(evec))[0];
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[1],
- (char*) addtup + sizeof(IndexTupleData), (Relation) NULL,
- (Page) NULL, (OffsetNumber) 0,
- IndexTupleSize((IndexTuple)addtup) - sizeof(IndexTupleData), FALSE);
+ (char *) addtup + sizeof(IndexTupleData), (Relation) NULL,
+ (Page) NULL, (OffsetNumber) 0,
+ IndexTupleSize((IndexTuple) addtup) - sizeof(IndexTupleData), FALSE);
ev1p = &((GISTENTRY *) VARDATA(evec))[1];
datum = (char *)
DatumGetPointer(FunctionCall2(&giststate->unionFn,
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize)));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize)));
- if ( ! ( ev0p->pred && ev1p->pred ) ) {
- result = ( ev0p->pred == NULL && ev1p->pred == NULL );
- } else {
+ if (!(ev0p->pred && ev1p->pred))
+ result = (ev0p->pred == NULL && ev1p->pred == NULL);
+ else
+ {
FunctionCall3(&giststate->equalFn,
- PointerGetDatum(ev0p->pred),
- PointerGetDatum(datum),
- PointerGetDatum(&result));
+ PointerGetDatum(ev0p->pred),
+ PointerGetDatum(datum),
+ PointerGetDatum(&result));
}
- if ( result ) {
+ if (result)
+ {
/* not need to update key */
- pfree( datum );
- } else {
+ pfree(datum);
+ }
+ else
+ {
gistcentryinit(giststate, &centry, datum, ev0p->rel, ev0p->page,
- ev0p->offset, datumsize, FALSE);
+ ev0p->offset, datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n';
- newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull );
- newtup->t_tid = oldtup->t_tid;
+ newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
+ newtup->t_tid = oldtup->t_tid;
if (centry.pred != datum)
- pfree( datum );
+ pfree(datum);
}
- if ( ev0p->pred &&
- ev0p->pred != (char*) oldtup + sizeof(IndexTupleData) )
- pfree( ev0p->pred );
- if ( ev1p->pred &&
- ev1p->pred != (char*) addtup + sizeof(IndexTupleData) )
- pfree( ev1p->pred );
- pfree( evec );
+ if (ev0p->pred &&
+ ev0p->pred != (char *) oldtup + sizeof(IndexTupleData))
+ pfree(ev0p->pred);
+ if (ev1p->pred &&
+ ev1p->pred != (char *) addtup + sizeof(IndexTupleData))
+ pfree(ev1p->pred);
+ pfree(evec);
- return newtup;
+ return newtup;
}
-
+
/*
* gistSplit -- split a page in the tree.
*/
@@ -728,19 +768,27 @@ gistSplit(Relation r,
InsertIndexResult *res)
{
Page p;
- Buffer leftbuf, rightbuf;
- Page left, right;
- OffsetNumber *spl_left, *spl_right;
- IndexTuple *lvectup, *rvectup, *newtup;
- int leftoff, rightoff;
- BlockNumber lbknum, rbknum;
+ Buffer leftbuf,
+ rightbuf;
+ Page left,
+ right;
+ OffsetNumber *spl_left,
+ *spl_right;
+ IndexTuple *lvectup,
+ *rvectup,
+ *newtup;
+ int leftoff,
+ rightoff;
+ BlockNumber lbknum,
+ rbknum;
GISTPageOpaque opaque;
- char isnull;
+ char isnull;
GIST_SPLITVEC v;
bytea *entryvec;
bool *decompvec;
GISTENTRY tmpentry;
- int i, nlen;
+ int i,
+ nlen;
p = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(p);
@@ -773,17 +821,17 @@ gistSplit(Relation r,
right = (Page) BufferGetPage(rightbuf);
/* generate the item array */
- entryvec = (bytea *) palloc(VARHDRSZ + (*len+1) * sizeof(GISTENTRY));
- decompvec = (bool *) palloc(VARHDRSZ + (*len+1) * sizeof(bool));
- VARATT_SIZEP(entryvec) = (*len+1) * sizeof(GISTENTRY) + VARHDRSZ;
+ entryvec = (bytea *) palloc(VARHDRSZ + (*len + 1) * sizeof(GISTENTRY));
+ decompvec = (bool *) palloc(VARHDRSZ + (*len + 1) * sizeof(bool));
+ VARATT_SIZEP(entryvec) = (*len + 1) * sizeof(GISTENTRY) + VARHDRSZ;
for (i = 1; i <= *len; i++)
{
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(entryvec))[i],
- (((char *) itup[i-1]) + sizeof(IndexTupleData)),
+ (((char *) itup[i - 1]) + sizeof(IndexTupleData)),
r, p, i,
- IndexTupleSize(itup[i-1]) - sizeof(IndexTupleData), FALSE);
+ IndexTupleSize(itup[i - 1]) - sizeof(IndexTupleData), FALSE);
if ((char *) (((GISTENTRY *) VARDATA(entryvec))[i].pred)
- == (((char *) itup[i-1]) + sizeof(IndexTupleData)))
+ == (((char *) itup[i - 1]) + sizeof(IndexTupleData)))
decompvec[i] = FALSE;
else
decompvec[i] = TRUE;
@@ -791,8 +839,8 @@ gistSplit(Relation r,
/* now let the user-defined picksplit function set up the split vector */
FunctionCall2(&giststate->picksplitFn,
- PointerGetDatum(entryvec),
- PointerGetDatum(&v));
+ PointerGetDatum(entryvec),
+ PointerGetDatum(&v));
/* clean up the entry vector: its preds need to be deleted, too */
for (i = 1; i <= *len; i++)
@@ -801,35 +849,43 @@ gistSplit(Relation r,
pfree(entryvec);
pfree(decompvec);
- spl_left = v.spl_left; spl_right = v.spl_right;
-
+ spl_left = v.spl_left;
+ spl_right = v.spl_right;
+
/* form left and right vector */
- lvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nleft );
- rvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nright );
+ lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nleft);
+ rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nright);
leftoff = rightoff = 0;
- for( i=1; i <= *len; i++ ) {
- if (i == *(spl_left) || ( i==*len && *(spl_left) != FirstOffsetNumber ) ) {
- lvectup[ leftoff++ ] = itup[ i-1 ];
+ for (i = 1; i <= *len; i++)
+ {
+ if (i == *(spl_left) || (i == *len && *(spl_left) != FirstOffsetNumber))
+ {
+ lvectup[leftoff++] = itup[i - 1];
spl_left++;
- } else {
- rvectup[ rightoff++ ] = itup[ i-1 ];
+ }
+ else
+ {
+ rvectup[rightoff++] = itup[i - 1];
spl_right++;
}
}
/* write on disk (may be need another split) */
- if ( gistnospace(right, rvectup, v.spl_nright) ) {
+ if (gistnospace(right, rvectup, v.spl_nright))
+ {
nlen = v.spl_nright;
- newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
- ( res && rvectup[ nlen-1 ] == itup[ *len - 1 ] ) ? res : NULL );
- ReleaseBuffer( rightbuf );
- } else {
+ newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
+ (res && rvectup[nlen - 1] == itup[*len - 1]) ? res : NULL);
+ ReleaseBuffer(rightbuf);
+ }
+ else
+ {
OffsetNumber l;
-
- l = gistwritebuffer( r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate );
+
+ l = gistwritebuffer(r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate);
WriteBuffer(rightbuf);
- if ( res )
+ if (res)
ItemPointerSet(&((*res)->pointerData), rbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_rdatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
@@ -839,32 +895,35 @@ gistSplit(Relation r,
v.spl_rdatum = tmpentry.pred;
nlen = 1;
- newtup = (IndexTuple*) palloc( sizeof(IndexTuple) * 1);
- isnull = ( v.spl_rdatum ) ? ' ' : 'n';
+ newtup = (IndexTuple *) palloc(sizeof(IndexTuple) * 1);
+ isnull = (v.spl_rdatum) ? ' ' : 'n';
newtup[0] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_rdatum), &isnull);
ItemPointerSet(&(newtup[0]->t_tid), rbknum, 1);
}
- if ( gistnospace(left, lvectup, v.spl_nleft) ) {
- int llen = v.spl_nleft;
+ if (gistnospace(left, lvectup, v.spl_nleft))
+ {
+ int llen = v.spl_nleft;
IndexTuple *lntup;
- lntup = gistSplit(r, leftbuf, lvectup, &llen, giststate,
- ( res && lvectup[ llen-1 ] == itup[ *len - 1 ] ) ? res : NULL );
- ReleaseBuffer( leftbuf );
+ lntup = gistSplit(r, leftbuf, lvectup, &llen, giststate,
+ (res && lvectup[llen - 1] == itup[*len - 1]) ? res : NULL);
+ ReleaseBuffer(leftbuf);
- newtup = gistjoinvector( newtup, &nlen, lntup, llen );
- pfree( lntup );
- } else {
+ newtup = gistjoinvector(newtup, &nlen, lntup, llen);
+ pfree(lntup);
+ }
+ else
+ {
OffsetNumber l;
-
- l = gistwritebuffer( r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate );
- if ( BufferGetBlockNumber(buffer) != GISTP_ROOT)
+
+ l = gistwritebuffer(r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate);
+ if (BufferGetBlockNumber(buffer) != GISTP_ROOT)
PageRestoreTempPage(left, p);
WriteBuffer(leftbuf);
- if ( res )
+ if (res)
ItemPointerSet(&((*res)->pointerData), lbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_ldatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
@@ -874,10 +933,10 @@ gistSplit(Relation r,
v.spl_ldatum = tmpentry.pred;
nlen += 1;
- newtup = (IndexTuple*) repalloc( (void*)newtup, sizeof(IndexTuple) * nlen);
- isnull = ( v.spl_ldatum ) ? ' ' : 'n';
- newtup[nlen-1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull);
- ItemPointerSet(&(newtup[nlen-1]->t_tid), lbknum, 1);
+ newtup = (IndexTuple *) repalloc((void *) newtup, sizeof(IndexTuple) * nlen);
+ isnull = (v.spl_ldatum) ? ' ' : 'n';
+ newtup[nlen - 1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull);
+ ItemPointerSet(&(newtup[nlen - 1]->t_tid), lbknum, 1);
}
@@ -885,10 +944,10 @@ gistSplit(Relation r,
gistadjscans(r, GISTOP_SPLIT, BufferGetBlockNumber(buffer), FirstOffsetNumber);
/* !!! pfree */
- pfree( rvectup );
- pfree( lvectup );
- pfree( v.spl_left );
- pfree( v.spl_right );
+ pfree(rvectup);
+ pfree(lvectup);
+ pfree(v.spl_left);
+ pfree(v.spl_right);
*len = nlen;
return newtup;
@@ -903,8 +962,8 @@ gistnewroot(GISTSTATE *giststate, Relation r, IndexTuple *itup, int len)
b = ReadBuffer(r, GISTP_ROOT);
GISTInitBuffer(b, 0);
p = BufferGetPage(b);
-
- gistwritebuffer( r, p, itup, len, FirstOffsetNumber, giststate );
+
+ gistwritebuffer(r, p, itup, len, FirstOffsetNumber, giststate);
WriteBuffer(b);
}
@@ -1000,8 +1059,8 @@ gistfreestack(GISTSTACK *s)
Datum
gistdelete(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
BlockNumber blkno;
OffsetNumber offnum;
Buffer buf;
@@ -1101,7 +1160,7 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
char *datum = (((char *) t) + sizeof(IndexTupleData));
/* if new entry fits in index tuple, copy it in */
- if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0 )
+ if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0)
{
memcpy(datum, entry.pred, entry.bytes);
/* clear out old size */
@@ -1116,9 +1175,9 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
/* generate a new index tuple for the compressed entry */
TupleDesc tupDesc = r->rd_att;
IndexTuple newtup;
- char isnull;
+ char isnull;
- isnull = ( entry.pred ) ? ' ' : 'n';
+ isnull = (entry.pred) ? ' ' : 'n';
newtup = (IndexTuple) index_formtuple(tupDesc,
(Datum *) &(entry.pred),
&isnull);
@@ -1181,38 +1240,40 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
Page page;
GISTPageOpaque opaque;
IndexTuple which;
- ItemId iid;
- OffsetNumber i,maxoff;
- BlockNumber cblk;
- char *pred;
+ ItemId iid;
+ OffsetNumber i,
+ maxoff;
+ BlockNumber cblk;
+ char *pred;
- pred = (char*) palloc( sizeof(char)*level+1 );
+ pred = (char *) palloc(sizeof(char) * level + 1);
MemSet(pred, '\t', level);
- pred[level]='\0';
+ pred[level] = '\0';
buffer = ReadBuffer(r, blk);
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
-
- maxoff = PageGetMaxOffsetNumber( page );
-
- elog(NOTICE,"%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, ( opaque->flags & F_LEAF ) ? "LEAF" : "INTE", (int)blk, (int)maxoff, PageGetFreeSpace(page));
-
- for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
+
+ maxoff = PageGetMaxOffsetNumber(page);
+
+ elog(NOTICE, "%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, (opaque->flags & F_LEAF) ? "LEAF" : "INTE", (int) blk, (int) maxoff, PageGetFreeSpace(page));
+
+ for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
+ {
iid = PageGetItemId(page, i);
which = (IndexTuple) PageGetItem(page, iid);
cblk = ItemPointerGetBlockNumber(&(which->t_tid));
-#ifdef PRINTTUPLE
- elog(NOTICE,"%s Tuple. blk: %d size: %d", pred, (int)cblk, IndexTupleSize( which ) );
-#endif
-
- if ( ! ( opaque->flags & F_LEAF ) ) {
- gist_dumptree( r, level+1, cblk, i );
- }
+#ifdef PRINTTUPLE
+ elog(NOTICE, "%s Tuple. blk: %d size: %d", pred, (int) cblk, IndexTupleSize(which));
+#endif
+
+ if (!(opaque->flags & F_LEAF))
+ gist_dumptree(r, level + 1, cblk, i);
}
ReleaseBuffer(buffer);
pfree(pred);
}
+
#endif /* defined GISTDEBUG */
void
@@ -1220,15 +1281,14 @@ gist_redo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "gist_redo: unimplemented");
}
-
+
void
gist_undo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "gist_undo: unimplemented");
}
-
+
void
-gist_desc(char *buf, uint8 xl_info, char* rec)
+gist_desc(char *buf, uint8 xl_info, char *rec)
{
}
-
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index f7b49430d07..8f3b5dd475c 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -32,8 +32,8 @@ static bool gistindex_keytest(IndexTuple tuple, TupleDesc tupdesc,
Datum
gistgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/* if we have it cached in the scan desc, just return the value */
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index d37a8c07763..ba21fee3c33 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -72,9 +72,9 @@ gistbeginscan(PG_FUNCTION_ARGS)
Datum
gistrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- bool fromEnd = PG_GETARG_BOOL(1);
- ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ bool fromEnd = PG_GETARG_BOOL(1);
+ ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
GISTScanOpaque p;
int i;
@@ -160,7 +160,7 @@ gistrescan(PG_FUNCTION_ARGS)
Datum
gistmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
GISTScanOpaque p;
GISTSTACK *o,
*n,
@@ -196,7 +196,7 @@ gistmarkpos(PG_FUNCTION_ARGS)
Datum
gistrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
GISTScanOpaque p;
GISTSTACK *o,
*n,
@@ -232,8 +232,8 @@ gistrestrpos(PG_FUNCTION_ARGS)
Datum
gistendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- GISTScanOpaque p;
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ GISTScanOpaque p;
p = (GISTScanOpaque) s->opaque;
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 44a8b225e8f..aa76ba232a0 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.49 2001/02/22 21:48:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.50 2001/03/22 03:59:12 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -41,12 +41,14 @@ bool BuildingHash = false;
Datum
hashbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -59,9 +61,11 @@ hashbuild(PG_FUNCTION_ARGS)
nitups;
HashItem hitem;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@@ -117,6 +121,7 @@ hashbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -191,9 +196,7 @@ hashbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -241,12 +244,14 @@ hashbuild(PG_FUNCTION_ARGS)
Datum
hashinsert(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+
#endif
InsertIndexResult res;
HashItem hitem;
@@ -276,8 +281,8 @@ hashinsert(PG_FUNCTION_ARGS)
Datum
hashgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/*
@@ -326,11 +331,13 @@ hashbeginscan(PG_FUNCTION_ARGS)
Datum
hashrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
- bool fromEnd = PG_GETARG_BOOL(1);
+ bool fromEnd = PG_GETARG_BOOL(1);
+
#endif
- ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
+ ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
HashScanOpaque so;
@@ -367,7 +374,7 @@ hashrescan(PG_FUNCTION_ARGS)
Datum
hashendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
HashScanOpaque so;
@@ -405,7 +412,7 @@ hashendscan(PG_FUNCTION_ARGS)
Datum
hashmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
HashScanOpaque so;
@@ -437,7 +444,7 @@ hashmarkpos(PG_FUNCTION_ARGS)
Datum
hashrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
HashScanOpaque so;
@@ -468,8 +475,8 @@ hashrestrpos(PG_FUNCTION_ARGS)
Datum
hashdelete(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
/* adjust any active scans that will be affected by this deletion */
_hash_adjscans(rel, tid);
@@ -491,8 +498,8 @@ hash_undo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "hash_undo: unimplemented");
}
-
+
void
-hash_desc(char *buf, uint8 xl_info, char* rec)
+hash_desc(char *buf, uint8 xl_info, char *rec)
{
}
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 30defc1a57b..4cb157c702c 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.29 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.30 2001/03/22 03:59:13 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -25,32 +25,32 @@
Datum
hashchar(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_CHAR(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_CHAR(0)));
}
Datum
hashint2(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT16(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_INT16(0)));
}
Datum
hashint4(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ PG_GETARG_UINT32(0));
+ PG_RETURN_UINT32(~PG_GETARG_UINT32(0));
}
Datum
hashint8(PG_FUNCTION_ARGS)
{
/* we just use the low 32 bits... */
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT64(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_INT64(0)));
}
Datum
hashoid(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_OID(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_OID(0)));
}
Datum
@@ -93,7 +93,7 @@ hashint2vector(PG_FUNCTION_ARGS)
Datum
hashname(PG_FUNCTION_ARGS)
{
- char *key = NameStr(* PG_GETARG_NAME(0));
+ char *key = NameStr(*PG_GETARG_NAME(0));
return hash_any((char *) key, NAMEDATALEN);
}
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 9748daa194d..b55717744c1 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,14 +8,14 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.110 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.111 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
* heapgettup - fetch next heap tuple from a scan
* heap_open - open a heap relation by relationId
* heap_openr - open a heap relation by name
- * heap_open[r]_nofail - same, but return NULL on failure instead of elog
+ * heap_open[r]_nofail - same, but return NULL on failure instead of elog
* heap_close - close a heap relation
* heap_beginscan - begin relation scan
* heap_rescan - restart a relation scan
@@ -88,16 +88,16 @@
#include "access/xlogutils.h"
-XLogRecPtr log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
- Buffer newbuf, HeapTuple newtup);
-XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
- char *unused, int unlen);
+XLogRecPtr log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
+ Buffer newbuf, HeapTuple newtup);
+XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
+ char *unused, int unlen);
/* comments are in heap_update */
-static xl_heaptid _locked_tuple_;
+static xl_heaptid _locked_tuple_;
static void _heap_unlock_tuple(void *data);
-static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
- ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
+static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
+ ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
/* ----------------------------------------------------------------
@@ -249,7 +249,7 @@ heapgettup(Relation relation,
OffsetNumber lineoff;
int linesleft;
ItemPointer tid = (tuple->t_data == NULL) ?
- (ItemPointer) NULL : &(tuple->t_self);
+ (ItemPointer) NULL : &(tuple->t_self);
/* ----------------
* increment access statistics
@@ -286,7 +286,7 @@ heapgettup(Relation relation,
if (!ItemPointerIsValid(tid))
Assert(!PointerIsValid(tid));
-
+
tuple->t_tableOid = relation->rd_id;
/* ----------------
@@ -538,9 +538,9 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
(
(tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?
(
- fetchatt((tupleDesc)->attrs[(attnum) - 1],
- (char *) (tup)->t_data + (tup)->t_data->t_hoff +
- (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
+ fetchatt((tupleDesc)->attrs[(attnum) - 1],
+ (char *) (tup)->t_data + (tup)->t_data->t_hoff +
+ (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
)
:
nocachegetattr((tup), (attnum), (tupleDesc), (isnull))
@@ -564,7 +564,8 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
)
);
}
-#endif /* defined(DISABLE_COMPLEX_MACRO)*/
+
+#endif /* defined(DISABLE_COMPLEX_MACRO) */
/* ----------------------------------------------------------------
@@ -791,8 +792,8 @@ heap_beginscan(Relation relation,
scan->rs_nkeys = (short) nkeys;
/*
- * we do this here instead of in initscan() because heap_rescan
- * also calls initscan() and we don't want to allocate memory again
+ * we do this here instead of in initscan() because heap_rescan also
+ * calls initscan() and we don't want to allocate memory again
*/
if (nkeys)
scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
@@ -1316,7 +1317,7 @@ heap_get_latest_tid(Relation relation,
Oid
heap_insert(Relation relation, HeapTuple tup)
{
- Buffer buffer;
+ Buffer buffer;
/* increment access statistics */
IncrHeapAccessStat(local_insert);
@@ -1350,7 +1351,7 @@ heap_insert(Relation relation, HeapTuple tup)
* toasted attributes from some other relation, invoke the toaster.
* ----------
*/
- if (HeapTupleHasExtended(tup) ||
+ if (HeapTupleHasExtended(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
heap_tuple_toast_attrs(relation, tup, NULL);
#endif
@@ -1364,17 +1365,17 @@ heap_insert(Relation relation, HeapTuple tup)
/* XLOG stuff */
{
- xl_heap_insert xlrec;
- xl_heap_header xlhdr;
- XLogRecPtr recptr;
- XLogRecData rdata[3];
- Page page = BufferGetPage(buffer);
- uint8 info = XLOG_HEAP_INSERT;
+ xl_heap_insert xlrec;
+ xl_heap_header xlhdr;
+ XLogRecPtr recptr;
+ XLogRecData rdata[3];
+ Page page = BufferGetPage(buffer);
+ uint8 info = XLOG_HEAP_INSERT;
xlrec.target.node = relation->rd_node;
xlrec.target.tid = tup->t_self;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapInsert;
rdata[0].next = &(rdata[1]);
@@ -1383,12 +1384,12 @@ heap_insert(Relation relation, HeapTuple tup)
xlhdr.t_hoff = tup->t_data->t_hoff;
xlhdr.mask = tup->t_data->t_infomask;
rdata[1].buffer = buffer;
- rdata[1].data = (char*)&xlhdr;
+ rdata[1].data = (char *) &xlhdr;
rdata[1].len = SizeOfHeapHeader;
rdata[1].next = &(rdata[2]);
rdata[2].buffer = buffer;
- rdata[2].data = (char*) tup->t_data + offsetof(HeapTupleHeaderData, t_bits);
+ rdata[2].data = (char *) tup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[2].len = tup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[2].next = NULL;
@@ -1411,10 +1412,10 @@ heap_insert(Relation relation, HeapTuple tup)
WriteBuffer(buffer);
/*
- * If tuple is cachable, mark it for rollback from the caches
- * in case we abort. Note it is OK to do this after WriteBuffer
- * releases the buffer, because the "tup" data structure is all
- * in local memory, not in the shared buffer.
+ * If tuple is cachable, mark it for rollback from the caches in case
+ * we abort. Note it is OK to do this after WriteBuffer releases the
+ * buffer, because the "tup" data structure is all in local memory,
+ * not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, tup);
@@ -1513,14 +1514,14 @@ l1:
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
/* XLOG stuff */
{
- xl_heap_delete xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
+ xl_heap_delete xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
xlrec.target.node = relation->rd_node;
xlrec.target.tid = tp.t_self;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapDelete;
rdata[0].next = &(rdata[1]);
@@ -1551,9 +1552,10 @@ l1:
#endif
/*
- * Mark tuple for invalidation from system caches at next command boundary.
- * We have to do this before WriteBuffer because we need to look at the
- * contents of the tuple, so we need to hold our refcount on the buffer.
+ * Mark tuple for invalidation from system caches at next command
+ * boundary. We have to do this before WriteBuffer because we need to
+ * look at the contents of the tuple, so we need to hold our refcount
+ * on the buffer.
*/
RelationInvalidateHeapTuple(relation, &tp);
@@ -1567,7 +1569,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via elog().
*/
void
@@ -1636,6 +1638,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
oldtup.t_len = ItemIdGetLength(lp);
oldtup.t_self = *otid;
+
/*
* Note: beyond this point, use oldtup not otid to refer to old tuple.
* otid may very well point at newtup->t_self, which we will overwrite
@@ -1701,23 +1704,24 @@ l2:
/*
* If the toaster needs to be activated, OR if the new tuple will not
- * fit on the same page as the old, then we need to release the context
- * lock (but not the pin!) on the old tuple's buffer while we are off
- * doing TOAST and/or table-file-extension work. We must mark the old
- * tuple to show that it's already being updated, else other processes
- * may try to update it themselves. To avoid second XLOG log record,
- * we use xact mgr hook to unlock old tuple without reading log if xact
- * will abort before update is logged. In the event of crash prio logging,
- * TQUAL routines will see HEAP_XMAX_UNLOGGED flag...
+ * fit on the same page as the old, then we need to release the
+ * context lock (but not the pin!) on the old tuple's buffer while we
+ * are off doing TOAST and/or table-file-extension work. We must mark
+ * the old tuple to show that it's already being updated, else other
+ * processes may try to update it themselves. To avoid second XLOG log
+ * record, we use xact mgr hook to unlock old tuple without reading
+ * log if xact will abort before update is logged. In the event of
+ * crash prio logging, TQUAL routines will see HEAP_XMAX_UNLOGGED
+ * flag...
*
- * NOTE: this trick is useless currently but saved for future
- * when we'll implement UNDO and will re-use transaction IDs
- * after postmaster startup.
+ * NOTE: this trick is useless currently but saved for future when we'll
+ * implement UNDO and will re-use transaction IDs after postmaster
+ * startup.
*
* We need to invoke the toaster if there are already any toasted values
* present, or if the new tuple is over-threshold.
*/
- need_toast = (HeapTupleHasExtended(&oldtup) ||
+ need_toast = (HeapTupleHasExtended(&oldtup) ||
HeapTupleHasExtended(newtup) ||
(MAXALIGN(newtup->t_len) > TOAST_TUPLE_THRESHOLD));
@@ -1726,7 +1730,7 @@ l2:
{
_locked_tuple_.node = relation->rd_node;
_locked_tuple_.tid = oldtup.t_self;
- XactPushRollback(_heap_unlock_tuple, (void*) &_locked_tuple_);
+ XactPushRollback(_heap_unlock_tuple, (void *) &_locked_tuple_);
TransactionIdStore(GetCurrentTransactionId(),
&(oldtup.t_data->t_xmax));
@@ -1762,7 +1766,7 @@ l2:
/* NO ELOG(ERROR) from here till changes are logged */
START_CRIT_SECTION();
- RelationPutHeapTuple(relation, newbuf, newtup); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, newtup); /* insert new tuple */
if (already_marked)
{
@@ -1784,7 +1788,7 @@ l2:
/* XLOG stuff */
{
- XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
+ XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
newbuf, newtup, false);
if (newbuf != buffer)
@@ -1814,10 +1818,10 @@ l2:
WriteBuffer(buffer);
/*
- * If new tuple is cachable, mark it for rollback from the caches
- * in case we abort. Note it is OK to do this after WriteBuffer
- * releases the buffer, because the "newtup" data structure is all
- * in local memory, not in the shared buffer.
+ * If new tuple is cachable, mark it for rollback from the caches in
+ * case we abort. Note it is OK to do this after WriteBuffer releases
+ * the buffer, because the "newtup" data structure is all in local
+ * memory, not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, newtup);
@@ -1829,7 +1833,7 @@ l2:
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via elog().
*/
void
@@ -2129,14 +2133,14 @@ heap_restrpos(HeapScanDesc scan)
XLogRecPtr
log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
{
- xl_heap_clean xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[3];
+ xl_heap_clean xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[3];
xlrec.node = reln->rd_node;
xlrec.block = BufferGetBlockNumber(buffer);
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapClean;
rdata[0].next = &(rdata[1]);
@@ -2157,27 +2161,27 @@ log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CLEAN, rdata);
- return(recptr);
+ return (recptr);
}
static XLogRecPtr
-log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
+log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup, bool move)
{
- char tbuf[MAXALIGN(sizeof(xl_heap_header)) + 2 * sizeof(TransactionId)];
- xl_heap_update xlrec;
- xl_heap_header *xlhdr = (xl_heap_header*) tbuf;
- int hsize = SizeOfHeapHeader;
- XLogRecPtr recptr;
- XLogRecData rdata[4];
- Page page = BufferGetPage(newbuf);
- uint8 info = (move) ? XLOG_HEAP_MOVE : XLOG_HEAP_UPDATE;
+ char tbuf[MAXALIGN(sizeof(xl_heap_header)) + 2 * sizeof(TransactionId)];
+ xl_heap_update xlrec;
+ xl_heap_header *xlhdr = (xl_heap_header *) tbuf;
+ int hsize = SizeOfHeapHeader;
+ XLogRecPtr recptr;
+ XLogRecData rdata[4];
+ Page page = BufferGetPage(newbuf);
+ uint8 info = (move) ? XLOG_HEAP_MOVE : XLOG_HEAP_UPDATE;
xlrec.target.node = reln->rd_node;
xlrec.target.tid = from;
xlrec.newtid = newtup->t_self;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapUpdate;
rdata[0].next = &(rdata[1]);
@@ -2190,9 +2194,9 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
xlhdr->t_natts = newtup->t_data->t_natts;
xlhdr->t_hoff = newtup->t_data->t_hoff;
xlhdr->mask = newtup->t_data->t_infomask;
- if (move) /* remember xmin & xmax */
+ if (move) /* remember xmin & xmax */
{
- TransactionId xmax;
+ TransactionId xmax;
if (newtup->t_data->t_infomask & HEAP_XMAX_INVALID ||
newtup->t_data->t_infomask & HEAP_MARKED_FOR_UPDATE)
@@ -2200,17 +2204,17 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
else
xmax = newtup->t_data->t_xmax;
memcpy(tbuf + hsize, &xmax, sizeof(TransactionId));
- memcpy(tbuf + hsize + sizeof(TransactionId),
- &(newtup->t_data->t_xmin), sizeof(TransactionId));
+ memcpy(tbuf + hsize + sizeof(TransactionId),
+ &(newtup->t_data->t_xmin), sizeof(TransactionId));
hsize += (2 * sizeof(TransactionId));
}
rdata[2].buffer = newbuf;
- rdata[2].data = (char*)xlhdr;
+ rdata[2].data = (char *) xlhdr;
rdata[2].len = hsize;
rdata[2].next = &(rdata[3]);
rdata[3].buffer = newbuf;
- rdata[3].data = (char*) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
+ rdata[3].data = (char *) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[3].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[3].next = NULL;
@@ -2224,23 +2228,23 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
recptr = XLogInsert(RM_HEAP_ID, info, rdata);
- return(recptr);
+ return (recptr);
}
XLogRecPtr
-log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
- Buffer newbuf, HeapTuple newtup)
+log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
+ Buffer newbuf, HeapTuple newtup)
{
- return(log_heap_update(reln, oldbuf, from, newbuf, newtup, true));
+ return (log_heap_update(reln, oldbuf, from, newbuf, newtup, true));
}
static void
heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_heap_clean *xlrec = (xl_heap_clean*) XLogRecGetData(record);
- Relation reln;
- Buffer buffer;
- Page page;
+ xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
+ Relation reln;
+ Buffer buffer;
+ Page page;
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
return;
@@ -2266,15 +2270,15 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfHeapClean)
{
- char unbuf[BLCKSZ];
- OffsetNumber *unused = (OffsetNumber*)unbuf;
- char *unend;
- ItemId lp;
+ char unbuf[BLCKSZ];
+ OffsetNumber *unused = (OffsetNumber *) unbuf;
+ char *unend;
+ ItemId lp;
- memcpy(unbuf, (char*)xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean);
+ memcpy(unbuf, (char *) xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean);
unend = unbuf + (record->xl_len - SizeOfHeapClean);
- while((char*)unused < unend)
+ while ((char *) unused < unend)
{
lp = ((PageHeader) page)->pd_linp + *unused;
lp->lp_flags &= ~LP_USED;
@@ -2289,13 +2293,13 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_heap_delete *xlrec = (xl_heap_delete*) XLogRecGetData(record);
- Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- ItemId lp = NULL;
- HeapTupleHeader htup;
+ xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
+ Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
@@ -2303,7 +2307,7 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer(false, reln,
+ buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "heap_delete_%sdo: no block", (redo) ? "re" : "un");
@@ -2320,7 +2324,8 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
return;
}
}
- else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_delete_undo: bad page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@@ -2337,7 +2342,7 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
htup->t_xmax = record->xl_xid;
htup->t_cmax = FirstCommandId;
htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
- HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+ HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
@@ -2350,12 +2355,12 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_heap_insert *xlrec = (xl_heap_insert*) XLogRecGetData(record);
- Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- HeapTupleHeader htup;
+ xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
+ Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ HeapTupleHeader htup;
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
@@ -2363,7 +2368,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer((redo) ? true : false, reln,
+ buffer = XLogReadBuffer((redo) ? true : false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
return;
@@ -2375,9 +2380,9 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (redo)
{
- char tbuf[MaxTupleSize];
- xl_heap_header xlhdr;
- uint32 newlen;
+ char tbuf[MaxTupleSize];
+ xl_heap_header xlhdr;
+ uint32 newlen;
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
@@ -2396,9 +2401,9 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_insert_redo: invalid max offset number");
newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
- memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapInsert, SizeOfHeapHeader);
- memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
- (char*)xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen);
+ memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapInsert, SizeOfHeapHeader);
+ memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
+ (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid;
@@ -2408,19 +2413,20 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
htup->t_cmin = FirstCommandId;
htup->t_xmax = htup->t_cmax = 0;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
-
- offnum = PageAddItem(page, (Item)htup, newlen, offnum,
- LP_USED | OverwritePageMode);
+
+ offnum = PageAddItem(page, (Item) htup, newlen, offnum,
+ LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_insert_redo: failed to add tuple");
PageSetLSN(page, lsn);
- PageSetSUI(page, ThisStartUpID); /* prev sui */
+ PageSetSUI(page, ThisStartUpID); /* prev sui */
UnlockAndWriteBuffer(buffer);
return;
}
/* undo insert */
- if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_insert_undo: bad page LSN");
elog(STOP, "heap_insert_undo: unimplemented");
@@ -2432,16 +2438,16 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
{
- xl_heap_update *xlrec = (xl_heap_update*) XLogRecGetData(record);
- Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
- Buffer buffer;
- bool samepage =
- (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
- Page page;
- OffsetNumber offnum;
- ItemId lp = NULL;
- HeapTupleHeader htup;
+ xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
+ Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+ Buffer buffer;
+ bool samepage =
+ (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
if (!RelationIsValid(reln))
return;
@@ -2451,7 +2457,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
/* Deal with old tuple version */
- buffer = XLogReadBuffer(false, reln,
+ buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "heap_update_%sdo: no block", (redo) ? "re" : "un");
@@ -2470,7 +2476,8 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
goto newt;
}
}
- else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_update_undo: bad old tuple page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@@ -2487,7 +2494,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
if (move)
{
TransactionIdStore(record->xl_xid, (TransactionId *) &(htup->t_cmin));
- htup->t_infomask &=
+ htup->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
htup->t_infomask |= HEAP_MOVED_OFF;
}
@@ -2496,7 +2503,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
htup->t_xmax = record->xl_xid;
htup->t_cmax = FirstCommandId;
htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
- HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+ HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
}
if (samepage)
goto newsame;
@@ -2514,11 +2521,11 @@ newt:;
if (redo &&
((record->xl_info & XLR_BKP_BLOCK_2) ||
- ((record->xl_info & XLR_BKP_BLOCK_1) && samepage)))
+ ((record->xl_info & XLR_BKP_BLOCK_1) && samepage)))
return;
- buffer = XLogReadBuffer((redo) ? true : false, reln,
- ItemPointerGetBlockNumber(&(xlrec->newtid)));
+ buffer = XLogReadBuffer((redo) ? true : false, reln,
+ ItemPointerGetBlockNumber(&(xlrec->newtid)));
if (!BufferIsValid(buffer))
return;
@@ -2531,10 +2538,10 @@ newsame:;
if (redo)
{
- char tbuf[MaxTupleSize];
- xl_heap_header xlhdr;
- int hsize;
- uint32 newlen;
+ char tbuf[MaxTupleSize];
+ xl_heap_header xlhdr;
+ int hsize;
+ uint32 newlen;
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
@@ -2557,9 +2564,9 @@ newsame:;
hsize += (2 * sizeof(TransactionId));
newlen = record->xl_len - hsize;
- memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapUpdate, SizeOfHeapHeader);
- memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
- (char*)xlrec + hsize, newlen);
+ memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapUpdate, SizeOfHeapHeader);
+ memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
+ (char *) xlrec + hsize, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid;
@@ -2568,13 +2575,13 @@ newsame:;
if (move)
{
hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
- memcpy(&(htup->t_xmax), (char*)xlrec + hsize, sizeof(TransactionId));
- memcpy(&(htup->t_xmin),
- (char*)xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId));
+ memcpy(&(htup->t_xmax), (char *) xlrec + hsize, sizeof(TransactionId));
+ memcpy(&(htup->t_xmin),
+ (char *) xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId));
TransactionIdStore(record->xl_xid, (TransactionId *) &(htup->t_cmin));
htup->t_infomask = xlhdr.mask;
- htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
- HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
+ htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
+ HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
htup->t_infomask |= HEAP_MOVED_IN;
}
else
@@ -2584,19 +2591,20 @@ newsame:;
htup->t_xmax = htup->t_cmax = 0;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
}
-
- offnum = PageAddItem(page, (Item)htup, newlen, offnum,
- LP_USED | OverwritePageMode);
+
+ offnum = PageAddItem(page, (Item) htup, newlen, offnum,
+ LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_update_redo: failed to add tuple");
PageSetLSN(page, lsn);
- PageSetSUI(page, ThisStartUpID); /* prev sui */
+ PageSetSUI(page, ThisStartUpID); /* prev sui */
UnlockAndWriteBuffer(buffer);
return;
}
/* undo */
- if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_update_undo: bad new tuple page LSN");
elog(STOP, "heap_update_undo: unimplemented");
@@ -2606,19 +2614,19 @@ newsame:;
static void
_heap_unlock_tuple(void *data)
{
- xl_heaptid *xltid = (xl_heaptid*) data;
- Relation reln = XLogOpenRelation(false, RM_HEAP_ID, xltid->node);
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- ItemId lp;
- HeapTupleHeader htup;
+ xl_heaptid *xltid = (xl_heaptid *) data;
+ Relation reln = XLogOpenRelation(false, RM_HEAP_ID, xltid->node);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp;
+ HeapTupleHeader htup;
if (!RelationIsValid(reln))
elog(STOP, "_heap_unlock_tuple: can't open relation");
- buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xltid->tid)));
+ buffer = XLogReadBuffer(false, reln,
+ ItemPointerGetBlockNumber(&(xltid->tid)));
if (!BufferIsValid(buffer))
elog(STOP, "_heap_unlock_tuple: can't read buffer");
@@ -2636,8 +2644,8 @@ _heap_unlock_tuple(void *data)
htup = (HeapTupleHeader) PageGetItem(page, lp);
- if (htup->t_xmax != GetCurrentTransactionId() ||
- htup->t_cmax != GetCurrentCommandId())
+ if (htup->t_xmax != GetCurrentTransactionId() ||
+ htup->t_cmax != GetCurrentCommandId())
elog(STOP, "_heap_unlock_tuple: invalid xmax/cmax in rollback");
htup->t_infomask &= ~HEAP_XMAX_UNLOGGED;
htup->t_infomask |= HEAP_XMAX_INVALID;
@@ -2645,9 +2653,10 @@ _heap_unlock_tuple(void *data)
return;
}
-void heap_redo(XLogRecPtr lsn, XLogRecord *record)
+void
+heap_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
@@ -2664,9 +2673,10 @@ void heap_redo(XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_redo: unknown op code %u", info);
}
-void heap_undo(XLogRecPtr lsn, XLogRecord *record)
+void
+heap_undo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
@@ -2687,46 +2697,50 @@ static void
out_target(char *buf, xl_heaptid *target)
{
sprintf(buf + strlen(buf), "node %u/%u; tid %u/%u",
- target->node.tblNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.tblNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
-
+
void
-heap_desc(char *buf, uint8 xl_info, char* rec)
+heap_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
{
- xl_heap_insert *xlrec = (xl_heap_insert*) rec;
+ xl_heap_insert *xlrec = (xl_heap_insert *) rec;
+
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_DELETE)
{
- xl_heap_delete *xlrec = (xl_heap_delete*) rec;
+ xl_heap_delete *xlrec = (xl_heap_delete *) rec;
+
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_UPDATE || info == XLOG_HEAP_MOVE)
{
- xl_heap_update *xlrec = (xl_heap_update*) rec;
+ xl_heap_update *xlrec = (xl_heap_update *) rec;
+
if (info == XLOG_HEAP_UPDATE)
strcat(buf, "update: ");
else
strcat(buf, "move: ");
out_target(buf, &(xlrec->target));
sprintf(buf + strlen(buf), "; new %u/%u",
- ItemPointerGetBlockNumber(&(xlrec->newtid)),
- ItemPointerGetOffsetNumber(&(xlrec->newtid)));
+ ItemPointerGetBlockNumber(&(xlrec->newtid)),
+ ItemPointerGetOffsetNumber(&(xlrec->newtid)));
}
else if (info == XLOG_HEAP_CLEAN)
{
- xl_heap_clean *xlrec = (xl_heap_clean*) rec;
+ xl_heap_clean *xlrec = (xl_heap_clean *) rec;
+
sprintf(buf + strlen(buf), "clean: node %u/%u; blk %u",
- xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
+ xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
}
else
strcat(buf, "UNKNOWN");
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 64e7262e868..94dedbf87b9 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Id: hio.c,v 1.35 2001/01/24 19:42:48 momjian Exp $
+ * $Id: hio.c,v 1.36 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
#include "access/hio.h"
/*
- * RelationPutHeapTuple - place tuple at specified page
+ * RelationPutHeapTuple - place tuple at specified page
*
* !!! ELOG(ERROR) IS DISALLOWED HERE !!!
*
@@ -69,7 +69,7 @@ RelationPutHeapTuple(Relation relation,
*
* Returns (locked) buffer with free space >= given len.
*
- * Note that we use LockPage to lock relation for extension. We can
+ * Note that we use LockPage to lock relation for extension. We can
* do this as long as in all other places we use page-level locking
* for indices only. Alternatively, we could define pseudo-table as
* we do for transactions with XactLockTable.
@@ -92,7 +92,7 @@ RelationGetBufferForTuple(Relation relation, Size len)
*/
if (len > MaxTupleSize)
elog(ERROR, "Tuple is too big: size %lu, max size %ld",
- (unsigned long)len, MaxTupleSize);
+ (unsigned long) len, MaxTupleSize);
if (!relation->rd_myxactonly)
LockPage(relation, 0, ExclusiveLock);
@@ -140,13 +140,13 @@ RelationGetBufferForTuple(Relation relation, Size len)
{
/* We should not get here given the test at the top */
elog(STOP, "Tuple is too big: size %lu",
- (unsigned long)len);
+ (unsigned long) len);
}
}
if (!relation->rd_myxactonly)
UnlockPage(relation, 0, ExclusiveLock);
- return(buffer);
+ return (buffer);
}
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index a3cf6ae7116..d0e60681e77 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.17 2001/02/15 20:57:01 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.18 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -41,12 +41,12 @@
#undef TOAST_DEBUG
-static void toast_delete(Relation rel, HeapTuple oldtup);
-static void toast_delete_datum(Relation rel, Datum value);
-static void toast_insert_or_update(Relation rel, HeapTuple newtup,
- HeapTuple oldtup);
-static Datum toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value);
-static varattrib *toast_fetch_datum(varattrib *attr);
+static void toast_delete(Relation rel, HeapTuple oldtup);
+static void toast_delete_datum(Relation rel, Datum value);
+static void toast_insert_or_update(Relation rel, HeapTuple newtup,
+ HeapTuple oldtup);
+static Datum toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value);
+static varattrib *toast_fetch_datum(varattrib *attr);
/* ----------
@@ -70,14 +70,14 @@ heap_tuple_toast_attrs(Relation rel, HeapTuple newtup, HeapTuple oldtup)
/* ----------
* heap_tuple_fetch_attr -
*
- * Public entry point to get back a toasted value
+ * Public entry point to get back a toasted value
* external storage (possibly still in compressed format).
* ----------
*/
-varattrib *
+varattrib *
heap_tuple_fetch_attr(varattrib *attr)
{
- varattrib *result;
+ varattrib *result;
if (VARATT_IS_EXTERNAL(attr))
{
@@ -94,7 +94,7 @@ heap_tuple_fetch_attr(varattrib *attr)
* ----------
*/
result = attr;
- }
+ }
return result;
}
@@ -107,10 +107,10 @@ heap_tuple_fetch_attr(varattrib *attr)
* or external storage.
* ----------
*/
-varattrib *
+varattrib *
heap_tuple_untoast_attr(varattrib *attr)
{
- varattrib *result;
+ varattrib *result;
if (VARATT_IS_EXTERNAL(attr))
{
@@ -121,14 +121,14 @@ heap_tuple_untoast_attr(varattrib *attr)
* Fetch it from the toast heap and decompress.
* ----------
*/
- varattrib *tmp;
+ varattrib *tmp;
tmp = toast_fetch_datum(attr);
- result = (varattrib *)palloc(attr->va_content.va_external.va_rawsize
- + VARHDRSZ);
+ result = (varattrib *) palloc(attr->va_content.va_external.va_rawsize
+ + VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_external.va_rawsize
- + VARHDRSZ;
- pglz_decompress((PGLZ_Header *)tmp, VARATT_DATA(result));
+ + VARHDRSZ;
+ pglz_decompress((PGLZ_Header *) tmp, VARATT_DATA(result));
pfree(tmp);
}
@@ -147,11 +147,11 @@ heap_tuple_untoast_attr(varattrib *attr)
* This is a compressed value inside of the main tuple
* ----------
*/
- result = (varattrib *)palloc(attr->va_content.va_compressed.va_rawsize
- + VARHDRSZ);
+ result = (varattrib *) palloc(attr->va_content.va_compressed.va_rawsize
+ + VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_compressed.va_rawsize
- + VARHDRSZ;
- pglz_decompress((PGLZ_Header *)attr, VARATT_DATA(result));
+ + VARHDRSZ;
+ pglz_decompress((PGLZ_Header *) attr, VARATT_DATA(result));
}
else
/* ----------
@@ -173,21 +173,21 @@ heap_tuple_untoast_attr(varattrib *attr)
static void
toast_delete(Relation rel, HeapTuple oldtup)
{
- TupleDesc tupleDesc;
- Form_pg_attribute *att;
- int numAttrs;
- int i;
- Datum value;
- bool isnull;
+ TupleDesc tupleDesc;
+ Form_pg_attribute *att;
+ int numAttrs;
+ int i;
+ Datum value;
+ bool isnull;
/* ----------
* Get the tuple descriptor, the number of and attribute
* descriptors.
* ----------
*/
- tupleDesc = rel->rd_att;
- numAttrs = tupleDesc->natts;
- att = tupleDesc->attrs;
+ tupleDesc = rel->rd_att;
+ numAttrs = tupleDesc->natts;
+ att = tupleDesc->attrs;
/* ----------
* Check for external stored attributes and delete them
@@ -216,35 +216,35 @@ toast_delete(Relation rel, HeapTuple oldtup)
static void
toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
{
- TupleDesc tupleDesc;
- Form_pg_attribute *att;
- int numAttrs;
- int i;
- bool old_isnull;
- bool new_isnull;
-
- bool need_change = false;
- bool need_free = false;
- bool need_delold = false;
- bool has_nulls = false;
-
- Size maxDataLen;
-
- char toast_action[MaxHeapAttributeNumber];
- char toast_nulls[MaxHeapAttributeNumber];
- Datum toast_values[MaxHeapAttributeNumber];
- int32 toast_sizes[MaxHeapAttributeNumber];
- bool toast_free[MaxHeapAttributeNumber];
- bool toast_delold[MaxHeapAttributeNumber];
+ TupleDesc tupleDesc;
+ Form_pg_attribute *att;
+ int numAttrs;
+ int i;
+ bool old_isnull;
+ bool new_isnull;
+
+ bool need_change = false;
+ bool need_free = false;
+ bool need_delold = false;
+ bool has_nulls = false;
+
+ Size maxDataLen;
+
+ char toast_action[MaxHeapAttributeNumber];
+ char toast_nulls[MaxHeapAttributeNumber];
+ Datum toast_values[MaxHeapAttributeNumber];
+ int32 toast_sizes[MaxHeapAttributeNumber];
+ bool toast_free[MaxHeapAttributeNumber];
+ bool toast_delold[MaxHeapAttributeNumber];
/* ----------
* Get the tuple descriptor, the number of and attribute
* descriptors and the location of the tuple values.
* ----------
*/
- tupleDesc = rel->rd_att;
- numAttrs = tupleDesc->natts;
- att = tupleDesc->attrs;
+ tupleDesc = rel->rd_att;
+ numAttrs = tupleDesc->natts;
+ att = tupleDesc->attrs;
/* ----------
* Then collect information about the values given
@@ -255,14 +255,14 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* 'x' incompressible, but OK to move off
* ----------
*/
- memset(toast_action, ' ', numAttrs * sizeof(char));
- memset(toast_nulls, ' ', numAttrs * sizeof(char));
- memset(toast_free, 0, numAttrs * sizeof(bool));
- memset(toast_delold, 0, numAttrs * sizeof(bool));
+ memset(toast_action, ' ', numAttrs * sizeof(char));
+ memset(toast_nulls, ' ', numAttrs * sizeof(char));
+ memset(toast_free, 0, numAttrs * sizeof(bool));
+ memset(toast_delold, 0, numAttrs * sizeof(bool));
for (i = 0; i < numAttrs; i++)
{
- varattrib *old_value;
- varattrib *new_value;
+ varattrib *old_value;
+ varattrib *new_value;
if (oldtup != NULL)
{
@@ -270,25 +270,25 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* For UPDATE get the old and new values of this attribute
* ----------
*/
- old_value = (varattrib *)DatumGetPointer(
- heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
- toast_values[i] =
- heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
- new_value = (varattrib *)DatumGetPointer(toast_values[i]);
+ old_value = (varattrib *) DatumGetPointer(
+ heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
+ toast_values[i] =
+ heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
+ new_value = (varattrib *) DatumGetPointer(toast_values[i]);
/* ----------
* If the old value is an external stored one, check if it
* has changed so we have to delete it later.
* ----------
*/
- if (!old_isnull && att[i]->attlen == -1 &&
- VARATT_IS_EXTERNAL(old_value))
+ if (!old_isnull && att[i]->attlen == -1 &&
+ VARATT_IS_EXTERNAL(old_value))
{
if (new_isnull || !VARATT_IS_EXTERNAL(new_value) ||
- old_value->va_content.va_external.va_rowid !=
- new_value->va_content.va_external.va_rowid ||
- old_value->va_content.va_external.va_attno !=
- new_value->va_content.va_external.va_attno)
+ old_value->va_content.va_external.va_rowid !=
+ new_value->va_content.va_external.va_rowid ||
+ old_value->va_content.va_external.va_attno !=
+ new_value->va_content.va_external.va_attno)
{
/* ----------
* The old external store value isn't needed any
@@ -318,8 +318,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* For INSERT simply get the new value
* ----------
*/
- toast_values[i] =
- heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
+ toast_values[i] =
+ heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
}
/* ----------
@@ -356,7 +356,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i])))
{
toast_values[i] = PointerGetDatum(heap_tuple_untoast_attr(
- (varattrib *)DatumGetPointer(toast_values[i])));
+ (varattrib *) DatumGetPointer(toast_values[i])));
toast_free[i] = true;
need_change = true;
need_free = true;
@@ -366,7 +366,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Remember the size of this attribute
* ----------
*/
- toast_sizes[i] = VARATT_SIZE(DatumGetPointer(toast_values[i]));
+ toast_sizes[i] = VARATT_SIZE(DatumGetPointer(toast_values[i]));
}
else
{
@@ -375,7 +375,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
toast_action[i] = 'p';
- toast_sizes[i] = att[i]->attlen;
+ toast_sizes[i] = att[i]->attlen;
}
}
@@ -384,7 +384,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
*
* 1: Inline compress attributes with attstorage 'x'
* 2: Store attributes with attstorage 'x' or 'e' external
- * 3: Inline compress attributes with attstorage 'm'
+ * 3: Inline compress attributes with attstorage 'm'
* 4: Store attributes with attstorage 'm' external
* ----------
*/
@@ -398,12 +398,12 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen)
+ maxDataLen)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
- Datum new_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
+ Datum new_value;
/* ----------
* Search for the biggest yet uncompressed internal attribute
@@ -420,7 +420,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -431,24 +431,28 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Attempt to compress it inline
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- new_value = toast_compress_datum(old_value);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ new_value = toast_compress_datum(old_value);
if (DatumGetPointer(new_value) != NULL)
{
/* successful compression */
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_values[i] = new_value;
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
- need_change = true;
- need_free = true;
+ toast_values[i] = new_value;
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ need_change = true;
+ need_free = true;
}
else
{
- /* incompressible data, ignore on subsequent compression passes */
+
+ /*
+ * incompressible data, ignore on subsequent compression
+ * passes
+ */
toast_action[i] = 'x';
}
}
@@ -459,11 +463,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
+ maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
/* ----------
* Search for the biggest yet inlined attribute with
@@ -481,7 +485,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -492,21 +496,21 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Store this external
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- toast_action[i] = 'p';
- toast_values[i] = toast_save_datum(rel,
- newtup->t_data->t_oid,
- i + 1,
- toast_values[i]);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ toast_action[i] = 'p';
+ toast_values[i] = toast_save_datum(rel,
+ newtup->t_data->t_oid,
+ i + 1,
+ toast_values[i]);
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
need_change = true;
- need_free = true;
+ need_free = true;
}
/* ----------
@@ -515,12 +519,12 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen)
+ maxDataLen)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
- Datum new_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
+ Datum new_value;
/* ----------
* Search for the biggest yet uncompressed internal attribute
@@ -537,7 +541,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -548,24 +552,28 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Attempt to compress it inline
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- new_value = toast_compress_datum(old_value);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ new_value = toast_compress_datum(old_value);
if (DatumGetPointer(new_value) != NULL)
{
/* successful compression */
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_values[i] = new_value;
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
- need_change = true;
- need_free = true;
+ toast_values[i] = new_value;
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ need_change = true;
+ need_free = true;
}
else
{
- /* incompressible data, ignore on subsequent compression passes */
+
+ /*
+ * incompressible data, ignore on subsequent compression
+ * passes
+ */
toast_action[i] = 'x';
}
}
@@ -575,11 +583,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
+ maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
/* ----------
* Search for the biggest yet inlined attribute with
@@ -597,7 +605,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -608,21 +616,21 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Store this external
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- toast_action[i] = 'p';
- toast_values[i] = toast_save_datum(rel,
- newtup->t_data->t_oid,
- i + 1,
- toast_values[i]);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ toast_action[i] = 'p';
+ toast_values[i] = toast_save_datum(rel,
+ newtup->t_data->t_oid,
+ i + 1,
+ toast_values[i]);
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
need_change = true;
- need_free = true;
+ need_free = true;
}
/* ----------
@@ -632,10 +640,10 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
*/
if (need_change)
{
- char *new_data;
- int32 new_len;
- MemoryContext oldcxt;
- HeapTupleHeader olddata;
+ char *new_data;
+ int32 new_len;
+ MemoryContext oldcxt;
+ HeapTupleHeader olddata;
/* ----------
* Calculate the new size of the tuple
@@ -662,24 +670,24 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
memcpy(new_data, newtup->t_data, newtup->t_data->t_hoff);
- newtup->t_data = (HeapTupleHeader)new_data;
+ newtup->t_data = (HeapTupleHeader) new_data;
newtup->t_len = new_len;
- DataFill((char *)(MAXALIGN((long)new_data +
- offsetof(HeapTupleHeaderData, t_bits) +
- ((has_nulls) ? BITMAPLEN(numAttrs) : 0))),
- tupleDesc,
- toast_values,
- toast_nulls,
- &(newtup->t_data->t_infomask),
- has_nulls ? newtup->t_data->t_bits : NULL);
+ DataFill((char *) (MAXALIGN((long) new_data +
+ offsetof(HeapTupleHeaderData, t_bits) +
+ ((has_nulls) ? BITMAPLEN(numAttrs) : 0))),
+ tupleDesc,
+ toast_values,
+ toast_nulls,
+ &(newtup->t_data->t_infomask),
+ has_nulls ? newtup->t_data->t_bits : NULL);
/* ----------
* In the case we modified a previously modified tuple again,
* free the memory from the previous run
* ----------
*/
- if ((char *)olddata != ((char *)newtup + HEAPTUPLESIZE))
+ if ((char *) olddata != ((char *) newtup + HEAPTUPLESIZE))
pfree(olddata);
/* ----------
@@ -723,7 +731,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
Datum
toast_compress_datum(Datum value)
{
- varattrib *tmp;
+ varattrib *tmp;
tmp = (varattrib *) palloc(sizeof(PGLZ_Header) + VARATT_SIZE(value));
pglz_compress(VARATT_DATA(value), VARATT_SIZE(value) - VARHDRSZ,
@@ -754,45 +762,45 @@ toast_compress_datum(Datum value)
static Datum
toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
{
- Relation toastrel;
- Relation toastidx;
- HeapTuple toasttup;
- InsertIndexResult idxres;
- TupleDesc toasttupDesc;
- Datum t_values[3];
- char t_nulls[3];
- varattrib *result;
- char chunk_data[VARHDRSZ + TOAST_MAX_CHUNK_SIZE];
- int32 chunk_size;
- int32 chunk_seq = 0;
- char *data_p;
- int32 data_todo;
+ Relation toastrel;
+ Relation toastidx;
+ HeapTuple toasttup;
+ InsertIndexResult idxres;
+ TupleDesc toasttupDesc;
+ Datum t_values[3];
+ char t_nulls[3];
+ varattrib *result;
+ char chunk_data[VARHDRSZ + TOAST_MAX_CHUNK_SIZE];
+ int32 chunk_size;
+ int32 chunk_seq = 0;
+ char *data_p;
+ int32 data_todo;
/* ----------
* Create the varattrib reference
* ----------
*/
- result = (varattrib *)palloc(sizeof(varattrib));
+ result = (varattrib *) palloc(sizeof(varattrib));
- result->va_header = sizeof(varattrib) | VARATT_FLAG_EXTERNAL;
+ result->va_header = sizeof(varattrib) | VARATT_FLAG_EXTERNAL;
if (VARATT_IS_COMPRESSED(value))
{
result->va_header |= VARATT_FLAG_COMPRESSED;
- result->va_content.va_external.va_rawsize =
- ((varattrib *)value)->va_content.va_compressed.va_rawsize;
+ result->va_content.va_external.va_rawsize =
+ ((varattrib *) value)->va_content.va_compressed.va_rawsize;
}
else
result->va_content.va_external.va_rawsize = VARATT_SIZE(value);
-
- result->va_content.va_external.va_extsize =
- VARATT_SIZE(value) - VARHDRSZ;
- result->va_content.va_external.va_valueid = newoid();
- result->va_content.va_external.va_toastrelid =
- rel->rd_rel->reltoastrelid;
- result->va_content.va_external.va_toastidxid =
- rel->rd_rel->reltoastidxid;
- result->va_content.va_external.va_rowid = mainoid;
- result->va_content.va_external.va_attno = attno;
+
+ result->va_content.va_external.va_extsize =
+ VARATT_SIZE(value) - VARHDRSZ;
+ result->va_content.va_external.va_valueid = newoid();
+ result->va_content.va_external.va_toastrelid =
+ rel->rd_rel->reltoastrelid;
+ result->va_content.va_external.va_toastidxid =
+ rel->rd_rel->reltoastidxid;
+ result->va_content.va_external.va_rowid = mainoid;
+ result->va_content.va_external.va_attno = attno;
/* ----------
* Initialize constant parts of the tuple data
@@ -808,8 +816,8 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
* Get the data to process
* ----------
*/
- data_p = VARATT_DATA(value);
- data_todo = VARATT_SIZE(value) - VARHDRSZ;
+ data_p = VARATT_DATA(value);
+ data_todo = VARATT_SIZE(value) - VARHDRSZ;
/* ----------
* Open the toast relation
@@ -818,9 +826,9 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
toastrel = heap_open(rel->rd_rel->reltoastrelid, RowExclusiveLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(rel->rd_rel->reltoastidxid);
-
+
/* ----------
- * Split up the item into chunks
+ * Split up the item into chunks
* ----------
*/
while (data_todo > 0)
@@ -848,8 +856,8 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
*/
heap_insert(toastrel, toasttup);
idxres = index_insert(toastidx, t_values, t_nulls,
- &(toasttup->t_self),
- toastrel);
+ &(toasttup->t_self),
+ toastrel);
if (idxres == NULL)
elog(ERROR, "Failed to insert index entry for TOAST tuple");
@@ -888,14 +896,14 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
static void
toast_delete_datum(Relation rel, Datum value)
{
- register varattrib *attr = (varattrib *)value;
- Relation toastrel;
- Relation toastidx;
- ScanKeyData toastkey;
- IndexScanDesc toastscan;
- HeapTupleData toasttup;
- RetrieveIndexResult indexRes;
- Buffer buffer;
+ register varattrib *attr = (varattrib *) value;
+ Relation toastrel;
+ Relation toastidx;
+ ScanKeyData toastkey;
+ IndexScanDesc toastscan;
+ HeapTupleData toasttup;
+ RetrieveIndexResult indexRes;
+ Buffer buffer;
if (!VARATT_IS_EXTERNAL(attr))
return;
@@ -904,8 +912,8 @@ toast_delete_datum(Relation rel, Datum value)
* Open the toast relation and it's index
* ----------
*/
- toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
- RowExclusiveLock);
+ toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
+ RowExclusiveLock);
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
/* ----------
@@ -913,10 +921,10 @@ toast_delete_datum(Relation rel, Datum value)
* ----------
*/
ScanKeyEntryInitialize(&toastkey,
- (bits16) 0,
- (AttrNumber) 1,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ (bits16) 0,
+ (AttrNumber) 1,
+ (RegProcedure) F_OIDEQ,
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/* ----------
* Read the chunks by index
@@ -961,36 +969,36 @@ toast_delete_datum(Relation rel, Datum value)
static varattrib *
toast_fetch_datum(varattrib *attr)
{
- Relation toastrel;
- Relation toastidx;
- ScanKeyData toastkey;
- IndexScanDesc toastscan;
- HeapTupleData toasttup;
- HeapTuple ttup;
- TupleDesc toasttupDesc;
- RetrieveIndexResult indexRes;
- Buffer buffer;
-
- varattrib *result;
- int32 ressize;
- int32 residx;
- int numchunks;
- Pointer chunk;
- bool isnull;
- int32 chunksize;
-
- char *chunks_found;
- char *chunks_expected;
+ Relation toastrel;
+ Relation toastidx;
+ ScanKeyData toastkey;
+ IndexScanDesc toastscan;
+ HeapTupleData toasttup;
+ HeapTuple ttup;
+ TupleDesc toasttupDesc;
+ RetrieveIndexResult indexRes;
+ Buffer buffer;
+
+ varattrib *result;
+ int32 ressize;
+ int32 residx;
+ int numchunks;
+ Pointer chunk;
+ bool isnull;
+ int32 chunksize;
+
+ char *chunks_found;
+ char *chunks_expected;
ressize = attr->va_content.va_external.va_extsize;
- numchunks = ((ressize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;
+ numchunks = ((ressize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;
- chunks_found = palloc(numchunks);
+ chunks_found = palloc(numchunks);
chunks_expected = palloc(numchunks);
- memset(chunks_found, 0, numchunks);
+ memset(chunks_found, 0, numchunks);
memset(chunks_expected, 1, numchunks);
- result = (varattrib *)palloc(ressize + VARHDRSZ);
+ result = (varattrib *) palloc(ressize + VARHDRSZ);
VARATT_SIZEP(result) = ressize + VARHDRSZ;
if (VARATT_IS_COMPRESSED(attr))
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
@@ -999,8 +1007,8 @@ toast_fetch_datum(varattrib *attr)
* Open the toast relation and it's index
* ----------
*/
- toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
- AccessShareLock);
+ toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
+ AccessShareLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
@@ -1009,10 +1017,10 @@ toast_fetch_datum(varattrib *attr)
* ----------
*/
ScanKeyEntryInitialize(&toastkey,
- (bits16) 0,
- (AttrNumber) 1,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ (bits16) 0,
+ (AttrNumber) 1,
+ (RegProcedure) F_OIDEQ,
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/* ----------
* Read the chunks by index
@@ -1049,7 +1057,7 @@ toast_fetch_datum(varattrib *attr)
elog(ERROR, "unexpected chunk number %d for toast value %d",
residx,
attr->va_content.va_external.va_valueid);
- if (residx < numchunks-1)
+ if (residx < numchunks - 1)
{
if (chunksize != TOAST_MAX_CHUNK_SIZE)
elog(ERROR, "unexpected chunk size %d in chunk %d for toast value %d",
@@ -1072,7 +1080,7 @@ toast_fetch_datum(varattrib *attr)
* Copy the data into proper place in our result
* ----------
*/
- memcpy(((char *)VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
+ memcpy(((char *) VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
VARATT_DATA(chunk),
chunksize);
@@ -1085,7 +1093,7 @@ toast_fetch_datum(varattrib *attr)
*/
if (memcmp(chunks_found, chunks_expected, numchunks) != 0)
elog(ERROR, "not all toast chunks found for value %d",
- attr->va_content.va_external.va_valueid);
+ attr->va_content.va_external.va_valueid);
pfree(chunks_expected);
pfree(chunks_found);
diff --git a/src/backend/access/index/istrat.c b/src/backend/access/index/istrat.c
index 1cc2c42c01c..3b016392068 100644
--- a/src/backend/access/index/istrat.c
+++ b/src/backend/access/index/istrat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.48 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.49 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -239,8 +239,8 @@ StrategyTermEvaluate(StrategyTerm term,
break;
case SK_NEGATE:
- result = ! DatumGetBool(FunctionCall2(&entry->sk_func,
- left, right));
+ result = !DatumGetBool(FunctionCall2(&entry->sk_func,
+ left, right));
break;
case SK_COMMUTE:
@@ -249,8 +249,8 @@ StrategyTermEvaluate(StrategyTerm term,
break;
case SK_NEGATE | SK_COMMUTE:
- result = ! DatumGetBool(FunctionCall2(&entry->sk_func,
- right, left));
+ result = !DatumGetBool(FunctionCall2(&entry->sk_func,
+ right, left));
break;
default:
@@ -263,6 +263,7 @@ StrategyTermEvaluate(StrategyTerm term,
return result;
}
+
#endif
/* ----------------
@@ -465,6 +466,7 @@ RelationInvokeStrategy(Relation relation,
}
+
#endif
/* ----------------
@@ -519,7 +521,7 @@ OperatorRelationFillScanKeyEntry(Relation operatorRelation,
if (!RegProcedureIsValid(entry->sk_procedure))
elog(ERROR,
- "OperatorRelationFillScanKeyEntry: no procedure for operator %u",
+ "OperatorRelationFillScanKeyEntry: no procedure for operator %u",
operatorObjectId);
fmgr_info(entry->sk_procedure, &entry->sk_func);
@@ -597,9 +599,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
}
if (cachesearch)
- {
ReleaseSysCache(tuple);
- }
else
{
heap_endscan(scan);
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index 435a7f72dde..fc85906d9b2 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.40 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.41 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
*
@@ -150,8 +150,8 @@ btoidvectorcmp(PG_FUNCTION_ARGS)
Datum
btabstimecmp(PG_FUNCTION_ARGS)
{
- AbsoluteTime a = PG_GETARG_ABSOLUTETIME(0);
- AbsoluteTime b = PG_GETARG_ABSOLUTETIME(1);
+ AbsoluteTime a = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime b = PG_GETARG_ABSOLUTETIME(1);
if (AbsoluteTimeIsBefore(a, b))
PG_RETURN_INT32(-1);
@@ -236,9 +236,10 @@ bttextcmp(PG_FUNCTION_ARGS)
if (res == 0 && VARSIZE(a) != VARSIZE(b))
{
+
/*
- * The two strings are the same in the first len bytes,
- * and they are of different lengths.
+ * The two strings are the same in the first len bytes, and they
+ * are of different lengths.
*/
if (VARSIZE(a) < VARSIZE(b))
res = -1;
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 325e585e3cc..f2112de6777 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.81 2001/02/07 23:35:33 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.82 2001/03/22 03:59:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -23,23 +23,23 @@
typedef struct
{
/* context data for _bt_checksplitloc */
- Size newitemsz; /* size of new item to be inserted */
- bool non_leaf; /* T if splitting an internal node */
+ Size newitemsz; /* size of new item to be inserted */
+ bool non_leaf; /* T if splitting an internal node */
- bool have_split; /* found a valid split? */
+ bool have_split; /* found a valid split? */
/* these fields valid only if have_split is true */
- bool newitemonleft; /* new item on left or right of best split */
+ bool newitemonleft; /* new item on left or right of best split */
OffsetNumber firstright; /* best split point */
- int best_delta; /* best size delta so far */
+ int best_delta; /* best size delta so far */
} FindSplitData;
extern bool FixBTree;
-Buffer _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release);
+Buffer _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release);
static void _bt_fixtree(Relation rel, BlockNumber blkno);
-static void _bt_fixbranch(Relation rel, BlockNumber lblkno,
- BlockNumber rblkno, BTStack true_stack);
+static void _bt_fixbranch(Relation rel, BlockNumber lblkno,
+ BlockNumber rblkno, BTStack true_stack);
static void _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit);
static void _bt_fixup(Relation rel, Buffer buf);
static OffsetNumber _bt_getoff(Page page, BlockNumber blkno);
@@ -47,34 +47,34 @@ static OffsetNumber _bt_getoff(Page page, BlockNumber blkno);
static Buffer _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
static TransactionId _bt_check_unique(Relation rel, BTItem btitem,
- Relation heapRel, Buffer buf,
- ScanKey itup_scankey);
+ Relation heapRel, Buffer buf,
+ ScanKey itup_scankey);
static InsertIndexResult _bt_insertonpg(Relation rel, Buffer buf,
- BTStack stack,
- int keysz, ScanKey scankey,
- BTItem btitem,
- OffsetNumber afteritem);
-static void _bt_insertuple(Relation rel, Buffer buf,
- Size itemsz, BTItem btitem, OffsetNumber newitemoff);
+ BTStack stack,
+ int keysz, ScanKey scankey,
+ BTItem btitem,
+ OffsetNumber afteritem);
+static void _bt_insertuple(Relation rel, Buffer buf,
+ Size itemsz, BTItem btitem, OffsetNumber newitemoff);
static Buffer _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
- OffsetNumber newitemoff, Size newitemsz,
- BTItem newitem, bool newitemonleft,
- OffsetNumber *itup_off, BlockNumber *itup_blkno);
+ OffsetNumber newitemoff, Size newitemsz,
+ BTItem newitem, bool newitemonleft,
+ OffsetNumber *itup_off, BlockNumber *itup_blkno);
static OffsetNumber _bt_findsplitloc(Relation rel, Page page,
- OffsetNumber newitemoff,
- Size newitemsz,
- bool *newitemonleft);
+ OffsetNumber newitemoff,
+ Size newitemsz,
+ bool *newitemonleft);
static void _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
- int leftfree, int rightfree,
- bool newitemonleft, Size firstrightitemsz);
+ int leftfree, int rightfree,
+ bool newitemonleft, Size firstrightitemsz);
static Buffer _bt_getstackbuf(Relation rel, BTStack stack, int access);
static void _bt_pgaddtup(Relation rel, Page page,
- Size itemsize, BTItem btitem,
- OffsetNumber itup_off, const char *where);
+ Size itemsize, BTItem btitem,
+ OffsetNumber itup_off, const char *where);
static bool _bt_isequal(TupleDesc itupdesc, Page page, OffsetNumber offnum,
- int keysz, ScanKey scankey);
+ int keysz, ScanKey scankey);
-static Relation _xlheapRel; /* temporary hack */
+static Relation _xlheapRel; /* temporary hack */
/*
* _bt_doinsert() -- Handle insertion of a single btitem in the tree.
@@ -114,8 +114,8 @@ top:
buf = _bt_moveright(rel, buf, natts, itup_scankey, BT_WRITE);
/*
- * If we're not allowing duplicates, make sure the key isn't
- * already in the index. XXX this belongs somewhere else, likely
+ * If we're not allowing duplicates, make sure the key isn't already
+ * in the index. XXX this belongs somewhere else, likely
*/
if (index_is_unique)
{
@@ -134,7 +134,7 @@ top:
}
}
- _xlheapRel = heapRel; /* temporary hack */
+ _xlheapRel = heapRel; /* temporary hack */
/* do the insertion */
res = _bt_insertonpg(rel, buf, stack, natts, itup_scankey, btitem, 0);
@@ -150,7 +150,7 @@ top:
* _bt_check_unique() -- Check for violation of unique index constraint
*
* Returns NullTransactionId if there is no conflict, else an xact ID we
- * must wait for to see if it commits a conflicting tuple. If an actual
+ * must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just elog().
*/
static TransactionId
@@ -171,8 +171,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
maxoff = PageGetMaxOffsetNumber(page);
/*
- * Find first item >= proposed new item. Note we could also get
- * a pointer to end-of-page here.
+ * Find first item >= proposed new item. Note we could also get a
+ * pointer to end-of-page here.
*/
offset = _bt_binsrch(rel, buf, natts, itup_scankey);
@@ -187,24 +187,24 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
BlockNumber nblkno;
/*
- * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
- * how we handling NULLs - and so we must not use _bt_compare
- * in real comparison, but only for ordering/finding items on
- * pages. - vadim 03/24/97
+ * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
+ * handling NULLs - and so we must not use _bt_compare in real
+ * comparison, but only for ordering/finding items on pages. -
+ * vadim 03/24/97
*
- * make sure the offset points to an actual key
- * before trying to compare it...
+ * make sure the offset points to an actual key before trying to
+ * compare it...
*/
if (offset <= maxoff)
{
- if (! _bt_isequal(itupdesc, page, offset, natts, itup_scankey))
+ if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */
/*
- * Have to check is inserted heap tuple deleted one (i.e.
- * just moved to another place by vacuum)! We only need to
- * do this once, but don't want to do it at all unless
- * we see equal tuples, so as not to slow down unequal case.
+ * Have to check is inserted heap tuple deleted one (i.e. just
+ * moved to another place by vacuum)! We only need to do this
+ * once, but don't want to do it at all unless we see equal
+ * tuples, so as not to slow down unequal case.
*/
if (chtup)
{
@@ -220,11 +220,11 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
cbti = (BTItem) PageGetItem(page, PageGetItemId(page, offset));
htup.t_self = cbti->bti_itup.t_tid;
heap_fetch(heapRel, SnapshotDirty, &htup, &buffer);
- if (htup.t_data != NULL) /* it is a duplicate */
+ if (htup.t_data != NULL) /* it is a duplicate */
{
TransactionId xwait =
- (TransactionIdIsValid(SnapshotDirty->xmin)) ?
- SnapshotDirty->xmin : SnapshotDirty->xmax;
+ (TransactionIdIsValid(SnapshotDirty->xmin)) ?
+ SnapshotDirty->xmin : SnapshotDirty->xmax;
/*
* If this tuple is being updated by other transaction
@@ -238,6 +238,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/* Tell _bt_doinsert to wait... */
return xwait;
}
+
/*
* Otherwise we have a definite conflict.
*/
@@ -304,7 +305,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
* NOTE: if the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
- * the next page. If it is equal to the high key, and there's not room
+ * the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@@ -358,16 +359,14 @@ _bt_insertonpg(Relation rel,
*/
if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %lu",
- (unsigned long)itemsz,
- (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
+ (unsigned long) itemsz,
+ (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
/*
* Determine exactly where new item will go.
*/
if (afteritem > 0)
- {
newitemoff = afteritem + 1;
- }
else
{
/*----------
@@ -383,12 +382,12 @@ _bt_insertonpg(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
- * pages). Currently the probability of moving right is set at 0.99,
+ * pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
*/
- bool movedright = false;
+ bool movedright = false;
while (PageGetFreeSpace(page) < itemsz &&
!P_RIGHTMOST(lpageop) &&
@@ -396,7 +395,7 @@ _bt_insertonpg(Relation rel,
random() > (MAX_RANDOM_VALUE / 100))
{
/* step right one page */
- BlockNumber rblkno = lpageop->btpo_next;
+ BlockNumber rblkno = lpageop->btpo_next;
_bt_relbuf(rel, buf, BT_WRITE);
buf = _bt_getbuf(rel, rblkno, BT_WRITE);
@@ -404,10 +403,11 @@ _bt_insertonpg(Relation rel,
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
movedright = true;
}
+
/*
- * Now we are on the right page, so find the insert position.
- * If we moved right at all, we know we should insert at the
- * start of the page, else must find the position by searching.
+ * Now we are on the right page, so find the insert position. If
+ * we moved right at all, we know we should insert at the start of
+ * the page, else must find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
@@ -418,9 +418,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
- * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its
- * result, so this comparison is correct even though we appear to
- * be accounting only for the item and not for its line pointer.
+ * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
+ * so this comparison is correct even though we appear to be
+ * accounting only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@@ -468,7 +468,7 @@ _bt_insertonpg(Relation rel,
if (is_root)
{
- Buffer rootbuf;
+ Buffer rootbuf;
Assert(stack == (BTStack) NULL);
/* create a new root node and release the split buffers */
@@ -481,7 +481,7 @@ _bt_insertonpg(Relation rel,
{
InsertIndexResult newres;
BTItem new_item;
- BTStackData fakestack;
+ BTStackData fakestack;
BTItem ritem;
Buffer pbuf;
@@ -489,10 +489,11 @@ _bt_insertonpg(Relation rel,
if (stack == (BTStack) NULL)
{
elog(DEBUG, "btree: concurrent ROOT page split");
+
/*
* If root page splitter failed to create new root page
- * then old root' btpo_parent still points to metapage.
- * We have to fix root page in this case.
+ * then old root' btpo_parent still points to metapage. We
+ * have to fix root page in this case.
*/
if (BTreeInvalidParent(lpageop))
{
@@ -531,9 +532,9 @@ _bt_insertonpg(Relation rel,
* item! We want to find parent pointing to where we are,
* right ? - vadim 05/27/97
*
- * Interestingly, this means we didn't *really* need to stack
- * the parent key at all; all we really care about is the
- * saved block and offset as a starting point for our search...
+ * Interestingly, this means we didn't *really* need to stack the
+ * parent key at all; all we really care about is the saved
+ * block and offset as a starting point for our search...
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY);
@@ -583,25 +584,26 @@ formres:;
}
static void
-_bt_insertuple(Relation rel, Buffer buf,
- Size itemsz, BTItem btitem, OffsetNumber newitemoff)
+_bt_insertuple(Relation rel, Buffer buf,
+ Size itemsz, BTItem btitem, OffsetNumber newitemoff)
{
- Page page = BufferGetPage(buf);
- BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
+ Page page = BufferGetPage(buf);
+ BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
START_CRIT_SECTION();
_bt_pgaddtup(rel, page, itemsz, btitem, newitemoff, "page");
/* XLOG stuff */
{
- xl_btree_insert xlrec;
- uint8 flag = XLOG_BTREE_INSERT;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
- BTItemData truncitem;
- xlrec.target.node = rel->rd_node;
+ xl_btree_insert xlrec;
+ uint8 flag = XLOG_BTREE_INSERT;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
+ BTItemData truncitem;
+
+ xlrec.target.node = rel->rd_node;
ItemPointerSet(&(xlrec.target.tid), BufferGetBlockNumber(buf), newitemoff);
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeInsert;
rdata[0].next = &(rdata[1]);
@@ -610,14 +612,14 @@ _bt_insertuple(Relation rel, Buffer buf,
{
truncitem = *btitem;
truncitem.bti_itup.t_info = sizeof(BTItemData);
- rdata[1].data = (char*)&truncitem;
+ rdata[1].data = (char *) &truncitem;
rdata[1].len = sizeof(BTItemData);
}
else
{
- rdata[1].data = (char*)btitem;
- rdata[1].len = IndexTupleDSize(btitem->bti_itup) +
- (sizeof(BTItemData) - sizeof(IndexTupleData));
+ rdata[1].data = (char *) btitem;
+ rdata[1].len = IndexTupleDSize(btitem->bti_itup) +
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
}
rdata[1].buffer = buf;
rdata[1].next = NULL;
@@ -700,8 +702,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
/*
* If the page we're splitting is not the rightmost page at its level
- * in the tree, then the first entry on the page is the high key
- * for the page. We need to copy that to the right half. Otherwise
+ * in the tree, then the first entry on the page is the high key for
+ * the page. We need to copy that to the right half. Otherwise
* (meaning the rightmost page case), all the items on the right half
* will be user data.
*/
@@ -779,13 +781,13 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
if (i < firstright)
{
_bt_pgaddtup(rel, leftpage, itemsz, item, leftoff,
- "left sibling");
+ "left sibling");
leftoff = OffsetNumberNext(leftoff);
}
else
{
_bt_pgaddtup(rel, rightpage, itemsz, item, rightoff,
- "right sibling");
+ "right sibling");
rightoff = OffsetNumberNext(rightoff);
}
}
@@ -812,11 +814,11 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
}
/*
- * We have to grab the right sibling (if any) and fix the prev
- * pointer there. We are guaranteed that this is deadlock-free
- * since no other writer will be holding a lock on that page
- * and trying to move left, and all readers release locks on a page
- * before trying to fetch its neighbors.
+ * We have to grab the right sibling (if any) and fix the prev pointer
+ * there. We are guaranteed that this is deadlock-free since no other
+ * writer will be holding a lock on that page and trying to move left,
+ * and all readers release locks on a page before trying to fetch its
+ * neighbors.
*/
if (!P_RIGHTMOST(ropaque))
@@ -834,12 +836,12 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*/
START_CRIT_SECTION();
{
- xl_btree_split xlrec;
- int flag = (newitemonleft) ?
- XLOG_BTREE_SPLEFT : XLOG_BTREE_SPLIT;
- BlockNumber blkno;
- XLogRecPtr recptr;
- XLogRecData rdata[4];
+ xl_btree_split xlrec;
+ int flag = (newitemonleft) ?
+ XLOG_BTREE_SPLEFT : XLOG_BTREE_SPLIT;
+ BlockNumber blkno;
+ XLogRecPtr recptr;
+ XLogRecData rdata[4];
xlrec.target.node = rel->rd_node;
ItemPointerSet(&(xlrec.target.tid), *itup_blkno, *itup_off);
@@ -856,31 +858,33 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
BlockIdSet(&(xlrec.parentblk), lopaque->btpo_parent);
BlockIdSet(&(xlrec.leftblk), lopaque->btpo_prev);
BlockIdSet(&(xlrec.rightblk), ropaque->btpo_next);
- /*
- * Dirrect access to page is not good but faster - we should
+
+ /*
+ * Dirrect access to page is not good but faster - we should
* implement some new func in page API.
*/
- xlrec.leftlen = ((PageHeader)leftpage)->pd_special -
- ((PageHeader)leftpage)->pd_upper;
+ xlrec.leftlen = ((PageHeader) leftpage)->pd_special -
+ ((PageHeader) leftpage)->pd_upper;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeSplit;
rdata[0].next = &(rdata[1]);
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)leftpage + ((PageHeader)leftpage)->pd_upper;
+ rdata[1].data = (char *) leftpage + ((PageHeader) leftpage)->pd_upper;
rdata[1].len = xlrec.leftlen;
rdata[1].next = &(rdata[2]);
rdata[2].buffer = InvalidBuffer;
- rdata[2].data = (char*)rightpage + ((PageHeader)rightpage)->pd_upper;
- rdata[2].len = ((PageHeader)rightpage)->pd_special -
- ((PageHeader)rightpage)->pd_upper;
+ rdata[2].data = (char *) rightpage + ((PageHeader) rightpage)->pd_upper;
+ rdata[2].len = ((PageHeader) rightpage)->pd_special -
+ ((PageHeader) rightpage)->pd_upper;
rdata[2].next = NULL;
if (!P_RIGHTMOST(ropaque))
{
- BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
+ BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
+
sopaque->btpo_prev = BufferGetBlockNumber(rbuf);
rdata[2].next = &(rdata[3]);
@@ -942,7 +946,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
- * the left or right page. The bool is necessary to disambiguate the case
+ * the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@@ -968,23 +972,23 @@ _bt_findsplitloc(Relation rel,
/* Passed-in newitemsz is MAXALIGNED but does not include line pointer */
newitemsz += sizeof(ItemIdData);
state.newitemsz = newitemsz;
- state.non_leaf = ! P_ISLEAF(opaque);
+ state.non_leaf = !P_ISLEAF(opaque);
state.have_split = false;
/* Total free space available on a btree page, after fixed overhead */
leftspace = rightspace =
PageGetPageSize(page) - sizeof(PageHeaderData) -
MAXALIGN(sizeof(BTPageOpaqueData))
- + sizeof(ItemIdData);
+ +sizeof(ItemIdData);
/*
- * Finding the best possible split would require checking all the possible
- * split points, because of the high-key and left-key special cases.
- * That's probably more work than it's worth; instead, stop as soon as
- * we find a "good-enough" split, where good-enough is defined as an
- * imbalance in free space of no more than pagesize/16 (arbitrary...)
- * This should let us stop near the middle on most pages, instead of
- * plowing to the end.
+ * Finding the best possible split would require checking all the
+ * possible split points, because of the high-key and left-key special
+ * cases. That's probably more work than it's worth; instead, stop as
+ * soon as we find a "good-enough" split, where good-enough is defined
+ * as an imbalance in free space of no more than pagesize/16
+ * (arbitrary...) This should let us stop near the middle on most
+ * pages, instead of plowing to the end.
*/
goodenough = leftspace / 16;
@@ -1024,6 +1028,7 @@ _bt_findsplitloc(Relation rel,
*/
leftfree = leftspace - dataitemstoleft - (int) itemsz;
rightfree = rightspace - (dataitemtotal - dataitemstoleft);
+
/*
* Will the new item go to left or right of split?
*/
@@ -1051,10 +1056,10 @@ _bt_findsplitloc(Relation rel,
}
/*
- * I believe it is not possible to fail to find a feasible split,
- * but just in case ...
+ * I believe it is not possible to fail to find a feasible split, but
+ * just in case ...
*/
- if (! state.have_split)
+ if (!state.have_split)
elog(FATAL, "_bt_findsplitloc: can't find a feasible split point for %s",
RelationGetRelationName(rel));
@@ -1071,6 +1076,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
int leftfree, int rightfree,
bool newitemonleft, Size firstrightitemsz)
{
+
/*
* Account for the new item on whichever side it is to be put.
*/
@@ -1078,19 +1084,21 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
leftfree -= (int) state->newitemsz;
else
rightfree -= (int) state->newitemsz;
+
/*
- * If we are not on the leaf level, we will be able to discard the
- * key data from the first item that winds up on the right page.
+ * If we are not on the leaf level, we will be able to discard the key
+ * data from the first item that winds up on the right page.
*/
if (state->non_leaf)
rightfree += (int) firstrightitemsz -
(int) (MAXALIGN(sizeof(BTItemData)) + sizeof(ItemIdData));
+
/*
* If feasible split point, remember best delta.
*/
if (leftfree >= 0 && rightfree >= 0)
{
- int delta = leftfree - rightfree;
+ int delta = leftfree - rightfree;
if (delta < 0)
delta = -delta;
@@ -1134,10 +1142,11 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
maxoff = PageGetMaxOffsetNumber(page);
start = stack->bts_offset;
+
/*
- * _bt_insertonpg set bts_offset to InvalidOffsetNumber in the
- * case of concurrent ROOT page split. Also, watch out for
- * possibility that page has a high key now when it didn't before.
+ * _bt_insertonpg set bts_offset to InvalidOffsetNumber in the case of
+ * concurrent ROOT page split. Also, watch out for possibility that
+ * page has a high key now when it didn't before.
*/
if (start < P_FIRSTDATAKEY(opaque))
start = P_FIRSTDATAKEY(opaque);
@@ -1159,11 +1168,15 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
return buf;
}
}
- /* by here, the item we're looking for moved right at least one page */
+
+ /*
+ * by here, the item we're looking for moved right at least one
+ * page
+ */
if (P_RIGHTMOST(opaque))
{
_bt_relbuf(rel, buf, access);
- return(InvalidBuffer);
+ return (InvalidBuffer);
}
blkno = opaque->btpo_next;
@@ -1190,27 +1203,27 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
*
* On entry, lbuf (the old root) and rbuf (its new peer) are write-
* locked. On exit, a new root page exists with entries for the
- * two new children, metapage is updated and unlocked/unpinned.
- * The new root buffer is returned to caller which has to unlock/unpin
- * lbuf, rbuf & rootbuf.
+ * two new children, metapage is updated and unlocked/unpinned.
+ * The new root buffer is returned to caller which has to unlock/unpin
+ * lbuf, rbuf & rootbuf.
*/
static Buffer
_bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
{
- Buffer rootbuf;
- Page lpage,
- rpage,
- rootpage;
- BlockNumber lbkno,
- rbkno;
- BlockNumber rootblknum;
- BTPageOpaque rootopaque;
- ItemId itemid;
- BTItem item;
- Size itemsz;
- BTItem new_item;
- Buffer metabuf;
- Page metapg;
+ Buffer rootbuf;
+ Page lpage,
+ rpage,
+ rootpage;
+ BlockNumber lbkno,
+ rbkno;
+ BlockNumber rootblknum;
+ BTPageOpaque rootopaque;
+ ItemId itemid;
+ BTItem item;
+ Size itemsz;
+ BTItem new_item;
+ Buffer metabuf;
+ Page metapg;
BTMetaPageData *metad;
/* get a new root page */
@@ -1236,9 +1249,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
rpage = BufferGetPage(rbuf);
/*
- * Make sure pages in old root level have valid parent links --- we will
- * need this in _bt_insertonpg() if a concurrent root split happens (see
- * README).
+ * Make sure pages in old root level have valid parent links --- we
+ * will need this in _bt_insertonpg() if a concurrent root split
+ * happens (see README).
*/
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
@@ -1264,8 +1277,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
pfree(new_item);
/*
- * Create downlink item for right page. The key for it is obtained from
- * the "high key" position in the left page.
+ * Create downlink item for right page. The key for it is obtained
+ * from the "high key" position in the left page.
*/
itemid = PageGetItemId(lpage, P_HIKEY);
itemsz = ItemIdGetLength(itemid);
@@ -1285,26 +1298,26 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
/* XLOG stuff */
{
- xl_btree_newroot xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
+ xl_btree_newroot xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
xlrec.node = rel->rd_node;
xlrec.level = metad->btm_level;
BlockIdSet(&(xlrec.rootblk), rootblknum);
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeNewroot;
rdata[0].next = &(rdata[1]);
- /*
- * Dirrect access to page is not good but faster - we should
+ /*
+ * Dirrect access to page is not good but faster - we should
* implement some new func in page API.
*/
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)rootpage + ((PageHeader) rootpage)->pd_upper;
- rdata[1].len = ((PageHeader)rootpage)->pd_special -
- ((PageHeader)rootpage)->pd_upper;
+ rdata[1].data = (char *) rootpage + ((PageHeader) rootpage)->pd_upper;
+ rdata[1].len = ((PageHeader) rootpage)->pd_special -
+ ((PageHeader) rootpage)->pd_upper;
rdata[1].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, rdata);
@@ -1325,7 +1338,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
/* write and let go of metapage buffer */
_bt_wrtbuf(rel, metabuf);
- return(rootbuf);
+ return (rootbuf);
}
/*
@@ -1339,24 +1352,31 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
Buffer
_bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
{
- Buffer rootbuf;
- BlockNumber rootblk;
- Page rootpage;
- XLogRecPtr rootLSN;
- Page oldrootpage = BufferGetPage(oldrootbuf);
- BTPageOpaque oldrootopaque = (BTPageOpaque)
- PageGetSpecialPointer(oldrootpage);
- Buffer buf, leftbuf, rightbuf;
- Page page, leftpage, rightpage;
- BTPageOpaque opaque, leftopaque, rightopaque;
- OffsetNumber newitemoff;
- BTItem btitem, ritem;
- Size itemsz;
-
- if (! P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
+ Buffer rootbuf;
+ BlockNumber rootblk;
+ Page rootpage;
+ XLogRecPtr rootLSN;
+ Page oldrootpage = BufferGetPage(oldrootbuf);
+ BTPageOpaque oldrootopaque = (BTPageOpaque)
+ PageGetSpecialPointer(oldrootpage);
+ Buffer buf,
+ leftbuf,
+ rightbuf;
+ Page page,
+ leftpage,
+ rightpage;
+ BTPageOpaque opaque,
+ leftopaque,
+ rightopaque;
+ OffsetNumber newitemoff;
+ BTItem btitem,
+ ritem;
+ Size itemsz;
+
+ if (!P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
elog(ERROR, "bt_fixroot: not valid old root page");
- /* Read right neighbor and create new root page*/
+ /* Read right neighbor and create new root page */
leftbuf = _bt_getbuf(rel, oldrootopaque->btpo_next, BT_WRITE);
leftpage = BufferGetPage(leftbuf);
leftopaque = (BTPageOpaque) PageGetSpecialPointer(leftpage);
@@ -1377,26 +1397,26 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
*
* If concurrent process will split one of pages on this level then it
* will see either btpo_parent == metablock or btpo_parent == rootblk.
- * In first case it will give up its locks and walk to the leftmost page
- * (oldrootbuf) in _bt_fixup() - ie it will wait for us and let us
- * continue. In second case it will try to lock rootbuf keeping its locks
- * on buffers we already passed, also waiting for us. If we'll have to
- * unlock rootbuf (split it) and that process will have to split page
- * of new level we created (level of rootbuf) then it will wait while
- * we create upper level. Etc.
+ * In first case it will give up its locks and walk to the leftmost
+ * page (oldrootbuf) in _bt_fixup() - ie it will wait for us and let
+ * us continue. In second case it will try to lock rootbuf keeping its
+ * locks on buffers we already passed, also waiting for us. If we'll
+ * have to unlock rootbuf (split it) and that process will have to
+ * split page of new level we created (level of rootbuf) then it will
+ * wait while we create upper level. Etc.
*/
- while(! P_RIGHTMOST(leftopaque))
+ while (!P_RIGHTMOST(leftopaque))
{
rightbuf = _bt_getbuf(rel, leftopaque->btpo_next, BT_WRITE);
rightpage = BufferGetPage(rightbuf);
rightopaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
/*
- * Update LSN & StartUpID of child page buffer to ensure that
- * it will be written on disk after flushing log record for new
- * root creation. Unfortunately, for the moment (?) we do not
- * log this operation and so possibly break our rule to log entire
- * page content on first after checkpoint modification.
+ * Update LSN & StartUpID of child page buffer to ensure that it
+ * will be written on disk after flushing log record for new root
+ * creation. Unfortunately, for the moment (?) we do not log this
+ * operation and so possibly break our rule to log entire page
+ * content on first after checkpoint modification.
*/
HOLD_INTERRUPTS();
rightopaque->btpo_parent = rootblk;
@@ -1416,17 +1436,17 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
if (PageGetFreeSpace(page) < itemsz)
{
- Buffer newbuf;
- OffsetNumber firstright;
- OffsetNumber itup_off;
- BlockNumber itup_blkno;
- bool newitemonleft;
+ Buffer newbuf;
+ OffsetNumber firstright;
+ OffsetNumber itup_off;
+ BlockNumber itup_blkno;
+ bool newitemonleft;
firstright = _bt_findsplitloc(rel, page,
- newitemoff, itemsz, &newitemonleft);
+ newitemoff, itemsz, &newitemonleft);
newbuf = _bt_split(rel, buf, firstright,
- newitemoff, itemsz, btitem, newitemonleft,
- &itup_off, &itup_blkno);
+ newitemoff, itemsz, btitem, newitemonleft,
+ &itup_off, &itup_blkno);
/* Keep lock on new "root" buffer ! */
if (buf != rootbuf)
_bt_relbuf(rel, buf, BT_WRITE);
@@ -1450,10 +1470,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
/*
* Here we hold locks on old root buffer, new root buffer we've
- * created with _bt_newroot() - rootbuf, - and buf we've used
- * for last insert ops - buf. If rootbuf != buf then we have to
- * create at least one more level. And if "release" is TRUE
- * then we give up oldrootbuf.
+ * created with _bt_newroot() - rootbuf, - and buf we've used for last
+ * insert ops - buf. If rootbuf != buf then we have to create at least
+ * one more level. And if "release" is TRUE then we give up
+ * oldrootbuf.
*/
if (release)
_bt_wrtbuf(rel, oldrootbuf);
@@ -1461,10 +1481,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
if (rootbuf != buf)
{
_bt_wrtbuf(rel, buf);
- return(_bt_fixroot(rel, rootbuf, true));
+ return (_bt_fixroot(rel, rootbuf, true));
}
- return(rootbuf);
+ return (rootbuf);
}
/*
@@ -1474,17 +1494,17 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
static void
_bt_fixtree(Relation rel, BlockNumber blkno)
{
- Buffer buf;
- Page page;
- BTPageOpaque opaque;
- BlockNumber pblkno;
+ Buffer buf;
+ Page page;
+ BTPageOpaque opaque;
+ BlockNumber pblkno;
- for ( ; ; )
+ for (;;)
{
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- if (! P_LEFTMOST(opaque) || P_ISLEAF(opaque))
+ if (!P_LEFTMOST(opaque) || P_ISLEAF(opaque))
elog(ERROR, "bt_fixtree[%s]: invalid start page (need to recreate index)", RelationGetRelationName(rel));
pblkno = opaque->btpo_parent;
@@ -1534,25 +1554,26 @@ _bt_fixtree(Relation rel, BlockNumber blkno)
static void
_bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
{
- BlockNumber blkno = BufferGetBlockNumber(buf);
- Page page;
- BTPageOpaque opaque;
- BlockNumber cblkno[3];
- OffsetNumber coff[3];
- Buffer cbuf[3];
- Page cpage[3];
- BTPageOpaque copaque[3];
- BTItem btitem;
- int cidx, i;
- bool goodbye = false;
- char tbuf[BLCKSZ];
+ BlockNumber blkno = BufferGetBlockNumber(buf);
+ Page page;
+ BTPageOpaque opaque;
+ BlockNumber cblkno[3];
+ OffsetNumber coff[3];
+ Buffer cbuf[3];
+ Page cpage[3];
+ BTPageOpaque copaque[3];
+ BTItem btitem;
+ int cidx,
+ i;
+ bool goodbye = false;
+ char tbuf[BLCKSZ];
page = BufferGetPage(buf);
/* copy page to temp storage */
memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_READ);
- page = (Page)tbuf;
+ page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/* Initialize first child data */
@@ -1564,20 +1585,21 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
cbuf[0] = _bt_getbuf(rel, cblkno[0], BT_READ);
cpage[0] = BufferGetPage(cbuf[0]);
copaque[0] = (BTPageOpaque) PageGetSpecialPointer(cpage[0]);
- if (P_LEFTMOST(opaque) && ! P_LEFTMOST(copaque[0]))
+ if (P_LEFTMOST(opaque) && !P_LEFTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: non-leftmost child page of leftmost parent (need to recreate index)", RelationGetRelationName(rel));
/* caller should take care and avoid this */
if (P_RIGHTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: invalid start child (need to recreate index)", RelationGetRelationName(rel));
- for ( ; ; )
+ for (;;)
{
+
/*
- * Read up to 2 more child pages and look for pointers
- * to them in *saved* parent page
+ * Read up to 2 more child pages and look for pointers to them in
+ * *saved* parent page
*/
coff[1] = coff[2] = InvalidOffsetNumber;
- for (cidx = 0; cidx < 2; )
+ for (cidx = 0; cidx < 2;)
{
cidx++;
cblkno[cidx] = (copaque[cidx - 1])->btpo_next;
@@ -1609,20 +1631,20 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
if (coff[1] == InvalidOffsetNumber ||
(cidx == 2 && coff[2] == InvalidOffsetNumber))
{
- Buffer newbuf;
- Page newpage;
- BTPageOpaque newopaque;
- BTItem ritem;
- Size itemsz;
- OffsetNumber newitemoff;
- BlockNumber parblk[3];
- BTStackData stack;
+ Buffer newbuf;
+ Page newpage;
+ BTPageOpaque newopaque;
+ BTItem ritem;
+ Size itemsz;
+ OffsetNumber newitemoff;
+ BlockNumber parblk[3];
+ BTStackData stack;
stack.bts_parent = NULL;
stack.bts_blkno = blkno;
stack.bts_offset = InvalidOffsetNumber;
ItemPointerSet(&(stack.bts_btitem.bti_itup.t_tid),
- cblkno[0], P_HIKEY);
+ cblkno[0], P_HIKEY);
buf = _bt_getstackbuf(rel, &stack, BT_WRITE);
if (buf == InvalidBuffer)
@@ -1644,19 +1666,19 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
if (coff[i] != InvalidOffsetNumber)
{
if (parblk[i] == parblk[i - 1] &&
- coff[i] != coff[i - 1] + 1)
+ coff[i] != coff[i - 1] + 1)
elog(ERROR, "bt_fixlevel[%s]: invalid item order(2) (need to recreate index)", RelationGetRelationName(rel));
continue;
}
/* Have to check next page ? */
- if ((! P_RIGHTMOST(opaque)) &&
- coff[i - 1] == PageGetMaxOffsetNumber(page)) /* yes */
+ if ((!P_RIGHTMOST(opaque)) &&
+ coff[i - 1] == PageGetMaxOffsetNumber(page)) /* yes */
{
newbuf = _bt_getbuf(rel, opaque->btpo_next, BT_WRITE);
newpage = BufferGetPage(newbuf);
newopaque = (BTPageOpaque) PageGetSpecialPointer(newpage);
coff[i] = _bt_getoff(newpage, cblkno[i]);
- if (coff[i] != InvalidOffsetNumber) /* found ! */
+ if (coff[i] != InvalidOffsetNumber) /* found ! */
{
if (coff[i] != P_FIRSTDATAKEY(newopaque))
elog(ERROR, "bt_fixlevel[%s]: invalid item order(3) (need to recreate index)", RelationGetRelationName(rel));
@@ -1673,7 +1695,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
}
/* insert pointer */
ritem = (BTItem) PageGetItem(cpage[i - 1],
- PageGetItemId(cpage[i - 1], P_HIKEY));
+ PageGetItemId(cpage[i - 1], P_HIKEY));
btitem = _bt_formitem(&(ritem->bti_itup));
ItemPointerSet(&(btitem->bti_itup.t_tid), cblkno[i], P_HIKEY);
itemsz = IndexTupleDSize(btitem->bti_itup)
@@ -1684,16 +1706,16 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
if (PageGetFreeSpace(page) < itemsz)
{
- OffsetNumber firstright;
- OffsetNumber itup_off;
- BlockNumber itup_blkno;
- bool newitemonleft;
+ OffsetNumber firstright;
+ OffsetNumber itup_off;
+ BlockNumber itup_blkno;
+ bool newitemonleft;
firstright = _bt_findsplitloc(rel, page,
- newitemoff, itemsz, &newitemonleft);
+ newitemoff, itemsz, &newitemonleft);
newbuf = _bt_split(rel, buf, firstright,
- newitemoff, itemsz, btitem, newitemonleft,
- &itup_off, &itup_blkno);
+ newitemoff, itemsz, btitem, newitemonleft,
+ &itup_off, &itup_blkno);
/* what buffer we need in ? */
if (newitemonleft)
_bt_relbuf(rel, newbuf, BT_WRITE);
@@ -1720,7 +1742,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
/* copy page with pointer to cblkno[cidx] to temp storage */
memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_WRITE);
- page = (Page)tbuf;
+ page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
}
@@ -1760,18 +1782,19 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
* but it doesn't guarantee full consistency of tree.)
*/
static void
-_bt_fixbranch(Relation rel, BlockNumber lblkno,
- BlockNumber rblkno, BTStack true_stack)
+_bt_fixbranch(Relation rel, BlockNumber lblkno,
+ BlockNumber rblkno, BTStack true_stack)
{
- BlockNumber blkno = true_stack->bts_blkno;
- BTStackData stack;
- BTPageOpaque opaque;
- Buffer buf, rbuf;
- Page page;
- OffsetNumber offnum;
+ BlockNumber blkno = true_stack->bts_blkno;
+ BTStackData stack;
+ BTPageOpaque opaque;
+ Buffer buf,
+ rbuf;
+ Page page;
+ OffsetNumber offnum;
true_stack = true_stack->bts_parent;
- for ( ; ; )
+ for (;;)
{
buf = _bt_getbuf(rel, blkno, BT_READ);
@@ -1779,8 +1802,8 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
_bt_fixlevel(rel, buf, rblkno);
/*
- * Here parent level should have pointers for both
- * lblkno and rblkno and we have to find them.
+ * Here parent level should have pointers for both lblkno and
+ * rblkno and we have to find them.
*/
stack.bts_parent = NULL;
stack.bts_blkno = blkno;
@@ -1792,7 +1815,7 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
page = BufferGetPage(buf);
offnum = _bt_getoff(page, rblkno);
- if (offnum != InvalidOffsetNumber) /* right pointer found */
+ if (offnum != InvalidOffsetNumber) /* right pointer found */
{
if (offnum <= stack.bts_offset)
elog(ERROR, "bt_fixbranch[%s]: invalid item order (need to recreate index)", RelationGetRelationName(rel));
@@ -1829,10 +1852,10 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
}
/*
- * Well, we are on the level that was root or unexistent when
- * we started traversing tree down. If btpo_parent is updated
- * then we'll use it to continue, else we'll fix/restore upper
- * levels entirely.
+ * Well, we are on the level that was root or unexistent when we
+ * started traversing tree down. If btpo_parent is updated then
+ * we'll use it to continue, else we'll fix/restore upper levels
+ * entirely.
*/
if (!BTreeInvalidParent(opaque))
{
@@ -1874,18 +1897,18 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
static void
_bt_fixup(Relation rel, Buffer buf)
{
- Page page;
- BTPageOpaque opaque;
- BlockNumber blkno;
+ Page page;
+ BTPageOpaque opaque;
+ BlockNumber blkno;
- for ( ; ; )
+ for (;;)
{
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
- * If someone else already created parent pages
- * then it's time for _bt_fixtree() to check upper
- * levels and fix them, if required.
+ * If someone else already created parent pages then it's time for
+ * _bt_fixtree() to check upper levels and fix them, if required.
*/
if (!BTreeInvalidParent(opaque))
{
@@ -1904,13 +1927,12 @@ _bt_fixup(Relation rel, Buffer buf)
}
/*
- * Ok, we are on the leftmost page, it's write locked
- * by us and its btpo_parent points to meta page - time
- * for _bt_fixroot().
+ * Ok, we are on the leftmost page, it's write locked by us and its
+ * btpo_parent points to meta page - time for _bt_fixroot().
*/
elog(NOTICE, "bt_fixup[%s]: fixing root page", RelationGetRelationName(rel));
- buf = _bt_fixroot(rel, buf, true);
- _bt_relbuf(rel, buf, BT_WRITE);
+ buf = _bt_fixroot(rel, buf, true);
+ _bt_relbuf(rel, buf, BT_WRITE);
return;
}
@@ -1918,23 +1940,23 @@ _bt_fixup(Relation rel, Buffer buf)
static OffsetNumber
_bt_getoff(Page page, BlockNumber blkno)
{
- BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
- OffsetNumber offnum = P_FIRSTDATAKEY(opaque);
- BlockNumber curblkno;
- ItemId itemid;
- BTItem item;
-
- for ( ; offnum <= maxoff; offnum++)
+ BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+ OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
+ OffsetNumber offnum = P_FIRSTDATAKEY(opaque);
+ BlockNumber curblkno;
+ ItemId itemid;
+ BTItem item;
+
+ for (; offnum <= maxoff; offnum++)
{
itemid = PageGetItemId(page, offnum);
item = (BTItem) PageGetItem(page, itemid);
curblkno = ItemPointerGetBlockNumber(&(item->bti_itup.t_tid));
if (curblkno == blkno)
- return(offnum);
+ return (offnum);
}
- return(InvalidOffsetNumber);
+ return (InvalidOffsetNumber);
}
/*
@@ -1961,9 +1983,9 @@ _bt_pgaddtup(Relation rel,
const char *where)
{
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- BTItemData truncitem;
+ BTItemData truncitem;
- if (! P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
+ if (!P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
{
memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData);
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 4c854fe7913..460d6c834c1 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.50 2001/02/07 23:35:33 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.51 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -28,7 +28,7 @@
#include "miscadmin.h"
#include "storage/lmgr.h"
-extern bool FixBTree; /* comments in nbtree.c */
+extern bool FixBTree; /* comments in nbtree.c */
extern Buffer _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release);
/*
@@ -100,7 +100,7 @@ _bt_metapinit(Relation rel)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
- * and no root page exists, we just return InvalidBuffer. For
+ * and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@@ -178,20 +178,20 @@ _bt_getroot(Relation rel, int access)
/* XLOG stuff */
{
- xl_btree_newroot xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata;
+ xl_btree_newroot xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata;
xlrec.node = rel->rd_node;
xlrec.level = 1;
BlockIdSet(&(xlrec.rootblk), rootblkno);
rdata.buffer = InvalidBuffer;
- rdata.data = (char*)&xlrec;
+ rdata.data = (char *) &xlrec;
rdata.len = SizeOfBtreeNewroot;
rdata.next = NULL;
recptr = XLogInsert(RM_BTREE_ID,
- XLOG_BTREE_NEWROOT|XLOG_BTREE_LEAF, &rdata);
+ XLOG_BTREE_NEWROOT | XLOG_BTREE_LEAF, &rdata);
PageSetLSN(rootpage, recptr);
PageSetSUI(rootpage, ThisStartUpID);
@@ -212,6 +212,7 @@ _bt_getroot(Relation rel, int access)
}
else
{
+
/*
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
@@ -232,30 +233,31 @@ _bt_getroot(Relation rel, int access)
/*
* Race condition: If the root page split between the time we looked
* at the metadata page and got the root buffer, then we got the wrong
- * buffer. Release it and try again.
+ * buffer. Release it and try again.
*/
rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
- if (! P_ISROOT(rootopaque))
+ if (!P_ISROOT(rootopaque))
{
+
/*
- * It happened, but if root page splitter failed to create
- * new root page then we'll go in loop trying to call
- * _bt_getroot again and again.
+ * It happened, but if root page splitter failed to create new
+ * root page then we'll go in loop trying to call _bt_getroot
+ * again and again.
*/
if (FixBTree)
{
- Buffer newrootbuf;
+ Buffer newrootbuf;
-check_parent:;
- if (BTreeInvalidParent(rootopaque)) /* unupdated! */
+ check_parent:;
+ if (BTreeInvalidParent(rootopaque)) /* unupdated! */
{
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_WRITE);
/* handle concurrent fix of root page */
- if (BTreeInvalidParent(rootopaque)) /* unupdated! */
+ if (BTreeInvalidParent(rootopaque)) /* unupdated! */
{
elog(NOTICE, "bt_getroot[%s]: fixing root page", RelationGetRelationName(rel));
newrootbuf = _bt_fixroot(rel, rootbuf, true);
@@ -266,20 +268,22 @@ check_parent:;
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
/* New root might be splitted while changing lock */
if (P_ISROOT(rootopaque))
- return(rootbuf);
+ return (rootbuf);
/* rootbuf is read locked */
goto check_parent;
}
- else /* someone else already fixed root */
+ else
+/* someone else already fixed root */
{
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_READ);
}
}
+
/*
- * Ok, here we have old root page with btpo_parent pointing
- * to upper level - check parent page because of there is
- * good chance that parent is root page.
+ * Ok, here we have old root page with btpo_parent pointing to
+ * upper level - check parent page because of there is good
+ * chance that parent is root page.
*/
newrootbuf = _bt_getbuf(rel, rootopaque->btpo_parent, BT_READ);
_bt_relbuf(rel, rootbuf, BT_READ);
@@ -287,7 +291,7 @@ check_parent:;
rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
if (P_ISROOT(rootopaque))
- return(rootbuf);
+ return (rootbuf);
/* no luck -:( */
}
@@ -366,7 +370,7 @@ _bt_relbuf(Relation rel, Buffer buf, int access)
* and a pin on the buffer.
*
* NOTE: actually, the buffer manager just marks the shared buffer page
- * dirty here, the real I/O happens later. Since we can't persuade the
+ * dirty here, the real I/O happens later. Since we can't persuade the
* Unix kernel to schedule disk writes in a particular order, there's not
* much point in worrying about this. The most we can say is that all the
* writes will occur before commit.
@@ -468,14 +472,14 @@ _bt_pagedel(Relation rel, ItemPointer tid)
PageIndexTupleDelete(page, offno);
/* XLOG stuff */
{
- xl_btree_delete xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
+ xl_btree_delete xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
xlrec.target.node = rel->rd_node;
xlrec.target.tid = *tid;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeDelete;
rdata[0].next = &(rdata[1]);
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index f02dfcbd128..97d99da4fde 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.78 2001/02/07 23:35:33 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.79 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,7 +30,8 @@
bool BuildingBtree = false; /* see comment in btbuild() */
bool FastBuild = true; /* use sort/build instead */
- /* of insertion build */
+
+ /* of insertion build */
/*
@@ -52,12 +53,14 @@ static void _bt_restscan(IndexScanDesc scan);
Datum
btbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -69,9 +72,11 @@ btbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@@ -79,15 +84,16 @@ btbuild(PG_FUNCTION_ARGS)
BTItem btitem;
bool usefast;
Snapshot snapshot;
- TransactionId XmaxRecent;
+ TransactionId XmaxRecent;
+
/*
- * spool2 is needed only when the index is an unique index.
- * Dead tuples are put into spool2 instead of spool in
- * order to avoid uniqueness check.
+ * spool2 is needed only when the index is an unique index. Dead
+ * tuples are put into spool2 instead of spool in order to avoid
+ * uniqueness check.
*/
- BTSpool *spool2 = NULL;
+ BTSpool *spool2 = NULL;
bool tupleIsAlive;
- int dead_count;
+ int dead_count;
/* note that this is a new btree */
BuildingBtree = true;
@@ -103,7 +109,7 @@ btbuild(PG_FUNCTION_ARGS)
#ifdef BTREE_BUILD_STATS
if (Show_btree_build_stats)
ResetUsage();
-#endif /* BTREE_BUILD_STATS */
+#endif /* BTREE_BUILD_STATS */
/* initialize the btree index metadata page (if this is a new index) */
if (oldPred == NULL)
@@ -155,10 +161,10 @@ btbuild(PG_FUNCTION_ARGS)
if (usefast)
{
spool = _bt_spoolinit(index, indexInfo->ii_Unique);
+
/*
- * Different from spool,the uniqueness isn't checked
- * for spool2.
- */
+ * Different from spool,the uniqueness isn't checked for spool2.
+ */
if (indexInfo->ii_Unique)
spool2 = _bt_spoolinit(index, false);
}
@@ -187,12 +193,13 @@ btbuild(PG_FUNCTION_ARGS)
}
else
tupleIsAlive = true;
-
+
MemoryContextReset(econtext->ecxt_per_tuple_memory);
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -253,8 +260,7 @@ btbuild(PG_FUNCTION_ARGS)
* btree pages - NULLs greater NOT_NULLs and NULL = NULL is TRUE.
* Sure, it's just rule for placing/finding items and no more -
* keytest'll return FALSE for a = 5 for items having 'a' isNULL.
- * Look at _bt_compare for how it works.
- * - vadim 03/23/97
+ * Look at _bt_compare for how it works. - vadim 03/23/97
*
* if (itup->t_info & INDEX_NULL_MASK) { pfree(itup); continue; }
*/
@@ -271,7 +277,8 @@ btbuild(PG_FUNCTION_ARGS)
{
if (tupleIsAlive || !spool2)
_bt_spool(btitem, spool);
- else /* dead tuples are put into spool2 */
+ else
+/* dead tuples are put into spool2 */
{
dead_count++;
_bt_spool(btitem, spool2);
@@ -288,7 +295,7 @@ btbuild(PG_FUNCTION_ARGS)
/* okay, all heap tuples are indexed */
heap_endscan(hscan);
- if (spool2 && !dead_count) /* spool2 was found to be unnecessary */
+ if (spool2 && !dead_count) /* spool2 was found to be unnecessary */
{
_bt_spooldestroy(spool2);
spool2 = NULL;
@@ -296,9 +303,7 @@ btbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -322,7 +327,7 @@ btbuild(PG_FUNCTION_ARGS)
ShowUsage();
ResetUsage();
}
-#endif /* BTREE_BUILD_STATS */
+#endif /* BTREE_BUILD_STATS */
/*
* Since we just counted the tuples in the heap, we update its stats
@@ -368,11 +373,11 @@ btbuild(PG_FUNCTION_ARGS)
Datum
btinsert(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
InsertIndexResult res;
BTItem btitem;
IndexTuple itup;
@@ -396,8 +401,8 @@ btinsert(PG_FUNCTION_ARGS)
Datum
btgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/*
@@ -408,10 +413,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (ItemPointerIsValid(&(scan->currentItemData)))
{
+
/*
* Restore scan position using heap TID returned by previous call
- * to btgettuple(). _bt_restscan() re-grabs the read lock on
- * the buffer, too.
+ * to btgettuple(). _bt_restscan() re-grabs the read lock on the
+ * buffer, too.
*/
_bt_restscan(scan);
res = _bt_next(scan, dir);
@@ -421,8 +427,8 @@ btgettuple(PG_FUNCTION_ARGS)
/*
* Save heap TID to use it in _bt_restscan. Then release the read
- * lock on the buffer so that we aren't blocking other backends.
- * NOTE: we do keep the pin on the buffer!
+ * lock on the buffer so that we aren't blocking other backends. NOTE:
+ * we do keep the pin on the buffer!
*/
if (res)
{
@@ -461,11 +467,13 @@ btbeginscan(PG_FUNCTION_ARGS)
Datum
btrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
- bool fromEnd = PG_GETARG_BOOL(1);
+ bool fromEnd = PG_GETARG_BOOL(1);
+
#endif
- ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
+ ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
BTScanOpaque so;
@@ -540,7 +548,7 @@ btmovescan(IndexScanDesc scan, Datum v)
Datum
btendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
BTScanOpaque so;
@@ -578,7 +586,7 @@ btendscan(PG_FUNCTION_ARGS)
Datum
btmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
BTScanOpaque so;
@@ -610,7 +618,7 @@ btmarkpos(PG_FUNCTION_ARGS)
Datum
btrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
BTScanOpaque so;
@@ -640,8 +648,8 @@ btrestrpos(PG_FUNCTION_ARGS)
Datum
btdelete(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
/* adjust any active scans that will be affected by this deletion */
_bt_adjscans(rel, tid);
@@ -671,8 +679,8 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
- * Get back the read lock we were holding on the buffer.
- * (We still have a reference-count pin on it, though.)
+ * Get back the read lock we were holding on the buffer. (We still
+ * have a reference-count pin on it, though.)
*/
LockBuffer(buf, BT_READ);
@@ -689,13 +697,13 @@ _bt_restscan(IndexScanDesc scan)
if (!ItemPointerIsValid(&target))
{
ItemPointerSetOffsetNumber(current,
- OffsetNumberPrev(P_FIRSTDATAKEY(opaque)));
+ OffsetNumberPrev(P_FIRSTDATAKEY(opaque)));
return;
}
/*
- * The item we were on may have moved right due to insertions.
- * Find it again.
+ * The item we were on may have moved right due to insertions. Find it
+ * again.
*/
for (;;)
{
@@ -717,7 +725,8 @@ _bt_restscan(IndexScanDesc scan)
}
/*
- * By here, the item we're looking for moved right at least one page
+ * By here, the item we're looking for moved right at least one
+ * page
*/
if (P_RIGHTMOST(opaque))
elog(FATAL, "_bt_restscan: my bits moved right off the end of the world!"
@@ -742,14 +751,14 @@ _bt_restore_page(Page page, char *from, int len)
Size itemsz;
char *end = from + len;
- for ( ; from < end; )
+ for (; from < end;)
{
memcpy(&btdata, from, sizeof(BTItemData));
itemsz = IndexTupleDSize(btdata.bti_itup) +
- (sizeof(BTItemData) - sizeof(IndexTupleData));
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
itemsz = MAXALIGN(itemsz);
if (PageAddItem(page, (Item) from, itemsz,
- FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
+ FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(STOP, "_bt_restore_page: can't add item to page");
from += itemsz;
}
@@ -758,20 +767,20 @@ _bt_restore_page(Page page, char *from, int len)
static void
btree_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_delete *xlrec;
- Relation reln;
- Buffer buffer;
- Page page;
+ xl_btree_delete *xlrec;
+ Relation reln;
+ Buffer buffer;
+ Page page;
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
return;
- xlrec = (xl_btree_delete*) XLogRecGetData(record);
+ xlrec = (xl_btree_delete *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ buffer = XLogReadBuffer(false, reln,
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "btree_delete_redo: block unfound");
page = (Page) BufferGetPage(buffer);
@@ -796,21 +805,21 @@ btree_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_insert *xlrec;
- Relation reln;
- Buffer buffer;
- Page page;
- BTPageOpaque pageop;
+ xl_btree_insert *xlrec;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ BTPageOpaque pageop;
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
- xlrec = (xl_btree_insert*) XLogRecGetData(record);
+ xlrec = (xl_btree_insert *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ buffer = XLogReadBuffer(false, reln,
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
page = (Page) BufferGetPage(buffer);
@@ -825,11 +834,11 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
UnlockAndReleaseBuffer(buffer);
return;
}
- if (PageAddItem(page, (Item)((char*)xlrec + SizeOfBtreeInsert),
- record->xl_len - SizeOfBtreeInsert,
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
- LP_USED) == InvalidOffsetNumber)
- elog(STOP, "btree_insert_redo: failed to add item");
+ if (PageAddItem(page, (Item) ((char *) xlrec + SizeOfBtreeInsert),
+ record->xl_len - SizeOfBtreeInsert,
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ LP_USED) == InvalidOffsetNumber)
+ elog(STOP, "btree_insert_redo: failed to add item");
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
@@ -840,7 +849,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_insert_undo: bad page LSN");
- if (! P_ISLEAF(pageop))
+ if (!P_ISLEAF(pageop))
{
UnlockAndReleaseBuffer(buffer);
return;
@@ -855,14 +864,14 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_split *xlrec = (xl_btree_split*) XLogRecGetData(record);
- Relation reln;
- BlockNumber blkno;
- Buffer buffer;
- Page page;
- BTPageOpaque pageop;
- char *op = (redo) ? "redo" : "undo";
- bool isleaf = (record->xl_info & XLOG_BTREE_LEAF);
+ xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
+ Relation reln;
+ BlockNumber blkno;
+ Buffer buffer;
+ Page page;
+ BTPageOpaque pageop;
+ char *op = (redo) ? "redo" : "undo";
+ bool isleaf = (record->xl_info & XLOG_BTREE_LEAF);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
@@ -870,7 +879,7 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
/* Left (original) sibling */
blkno = (onleft) ? ItemPointerGetBlockNumber(&(xlrec->target.tid)) :
- BlockIdGetBlockNumber(&(xlrec->otherblk));
+ BlockIdGetBlockNumber(&(xlrec->otherblk));
buffer = XLogReadBuffer(false, reln, blkno);
if (!BufferIsValid(buffer))
elog(STOP, "btree_split_%s: lost left sibling", op);
@@ -892,13 +901,14 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
pageop->btpo_next = ItemPointerGetBlockNumber(&(xlrec->target.tid));
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
- _bt_restore_page(page, (char*)xlrec + SizeOfBtreeSplit, xlrec->leftlen);
+ _bt_restore_page(page, (char *) xlrec + SizeOfBtreeSplit, xlrec->leftlen);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
- else /* undo */
+ else
+/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad left sibling LSN");
@@ -906,8 +916,8 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
}
/* Right (new) sibling */
- blkno = (onleft) ? BlockIdGetBlockNumber(&(xlrec->otherblk)) :
- ItemPointerGetBlockNumber(&(xlrec->target.tid));
+ blkno = (onleft) ? BlockIdGetBlockNumber(&(xlrec->otherblk)) :
+ ItemPointerGetBlockNumber(&(xlrec->target.tid));
buffer = XLogReadBuffer((redo) ? true : false, reln, blkno);
if (!BufferIsValid(buffer))
elog(STOP, "btree_split_%s: lost right sibling", op);
@@ -922,21 +932,22 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
if (redo)
{
pageop->btpo_parent = BlockIdGetBlockNumber(&(xlrec->parentblk));
- pageop->btpo_prev = (onleft) ?
- ItemPointerGetBlockNumber(&(xlrec->target.tid)) :
- BlockIdGetBlockNumber(&(xlrec->otherblk));
+ pageop->btpo_prev = (onleft) ?
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)) :
+ BlockIdGetBlockNumber(&(xlrec->otherblk));
pageop->btpo_next = BlockIdGetBlockNumber(&(xlrec->rightblk));
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
_bt_restore_page(page,
- (char*)xlrec + SizeOfBtreeSplit + xlrec->leftlen,
- record->xl_len - SizeOfBtreeSplit - xlrec->leftlen);
+ (char *) xlrec + SizeOfBtreeSplit + xlrec->leftlen,
+ record->xl_len - SizeOfBtreeSplit - xlrec->leftlen);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
- else /* undo */
+ else
+/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad right sibling LSN");
@@ -965,9 +976,9 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
return;
}
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
- pageop->btpo_prev = (onleft) ?
- BlockIdGetBlockNumber(&(xlrec->otherblk)) :
- ItemPointerGetBlockNumber(&(xlrec->target.tid));
+ pageop->btpo_prev = (onleft) ?
+ BlockIdGetBlockNumber(&(xlrec->otherblk)) :
+ ItemPointerGetBlockNumber(&(xlrec->target.tid));
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
@@ -977,14 +988,14 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_newroot *xlrec = (xl_btree_newroot*) XLogRecGetData(record);
- Relation reln;
- Buffer buffer;
- Page page;
- BTPageOpaque pageop;
- Buffer metabuf;
- Page metapg;
- BTMetaPageData md;
+ xl_btree_newroot *xlrec = (xl_btree_newroot *) XLogRecGetData(record);
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ BTPageOpaque pageop;
+ Buffer metabuf;
+ Page metapg;
+ BTMetaPageData md;
if (!redo)
return;
@@ -1011,8 +1022,8 @@ btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfBtreeNewroot)
_bt_restore_page(page,
- (char*)xlrec + SizeOfBtreeNewroot,
- record->xl_len - SizeOfBtreeNewroot);
+ (char *) xlrec + SizeOfBtreeNewroot,
+ record->xl_len - SizeOfBtreeNewroot);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
@@ -1037,7 +1048,7 @@ btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
void
btree_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_DELETE)
@@ -1045,9 +1056,9 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_BTREE_INSERT)
btree_xlog_insert(true, lsn, record);
else if (info == XLOG_BTREE_SPLIT)
- btree_xlog_split(true, false, lsn, record); /* new item on the right */
+ btree_xlog_split(true, false, lsn, record); /* new item on the right */
else if (info == XLOG_BTREE_SPLEFT)
- btree_xlog_split(true, true, lsn, record); /* new item on the left */
+ btree_xlog_split(true, true, lsn, record); /* new item on the left */
else if (info == XLOG_BTREE_NEWROOT)
btree_xlog_newroot(true, lsn, record);
else
@@ -1057,7 +1068,7 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
void
btree_undo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_DELETE)
@@ -1065,9 +1076,9 @@ btree_undo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_BTREE_INSERT)
btree_xlog_insert(false, lsn, record);
else if (info == XLOG_BTREE_SPLIT)
- btree_xlog_split(false, false, lsn, record);/* new item on the right */
+ btree_xlog_split(false, false, lsn, record); /* new item on the right */
else if (info == XLOG_BTREE_SPLEFT)
- btree_xlog_split(false, true, lsn, record); /* new item on the left */
+ btree_xlog_split(false, true, lsn, record); /* new item on the left */
else if (info == XLOG_BTREE_NEWROOT)
btree_xlog_newroot(false, lsn, record);
else
@@ -1078,45 +1089,49 @@ static void
out_target(char *buf, xl_btreetid *target)
{
sprintf(buf + strlen(buf), "node %u/%u; tid %u/%u",
- target->node.tblNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.tblNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
-
+
void
-btree_desc(char *buf, uint8 xl_info, char* rec)
+btree_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_INSERT)
{
- xl_btree_insert *xlrec = (xl_btree_insert*) rec;
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_DELETE)
{
- xl_btree_delete *xlrec = (xl_btree_delete*) rec;
+ xl_btree_delete *xlrec = (xl_btree_delete *) rec;
+
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_SPLIT || info == XLOG_BTREE_SPLEFT)
{
- xl_btree_split *xlrec = (xl_btree_split*) rec;
- sprintf(buf + strlen(buf), "split(%s): ",
- (info == XLOG_BTREE_SPLIT) ? "right" : "left");
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
+
+ sprintf(buf + strlen(buf), "split(%s): ",
+ (info == XLOG_BTREE_SPLIT) ? "right" : "left");
out_target(buf, &(xlrec->target));
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- BlockIdGetBlockNumber(&xlrec->otherblk),
- BlockIdGetBlockNumber(&xlrec->rightblk));
+ BlockIdGetBlockNumber(&xlrec->otherblk),
+ BlockIdGetBlockNumber(&xlrec->rightblk));
}
else if (info == XLOG_BTREE_NEWROOT)
{
- xl_btree_newroot *xlrec = (xl_btree_newroot*) rec;
+ xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
+
sprintf(buf + strlen(buf), "root: node %u/%u; blk %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- BlockIdGetBlockNumber(&xlrec->rootblk));
+ xlrec->node.tblNode, xlrec->node.relNode,
+ BlockIdGetBlockNumber(&xlrec->rootblk));
}
else
strcat(buf, "UNKNOWN");
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 6f41ab9c847..d8b8e0682a0 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.63 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.64 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,20 +32,20 @@ static RetrieveIndexResult _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
*
* NOTE that the returned buffer is read-locked regardless of the access
* parameter. However, access = BT_WRITE will allow an empty root page
- * to be created and returned. When access = BT_READ, an empty index
+ * to be created and returned. When access = BT_READ, an empty index
* will result in *bufP being set to InvalidBuffer.
*/
BTStack
_bt_search(Relation rel, int keysz, ScanKey scankey,
Buffer *bufP, int access)
{
- BTStack stack_in = NULL;
+ BTStack stack_in = NULL;
/* Get the root page to start with */
*bufP = _bt_getroot(rel, access);
/* If index is empty and access = BT_READ, no root page is created. */
- if (! BufferIsValid(*bufP))
+ if (!BufferIsValid(*bufP))
return (BTStack) NULL;
/* Loop iterates once per level descended in the tree */
@@ -79,13 +79,13 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
par_blkno = BufferGetBlockNumber(*bufP);
/*
- * We need to save the bit image of the index entry we chose in the
- * parent page on a stack. In case we split the tree, we'll use this
- * bit image to figure out what our real parent page is, in case the
- * parent splits while we're working lower in the tree. See the paper
- * by Lehman and Yao for how this is detected and handled. (We use the
- * child link to disambiguate duplicate keys in the index -- Lehman
- * and Yao disallow duplicate keys.)
+ * We need to save the bit image of the index entry we chose in
+ * the parent page on a stack. In case we split the tree, we'll
+ * use this bit image to figure out what our real parent page is,
+ * in case the parent splits while we're working lower in the
+ * tree. See the paper by Lehman and Yao for how this is detected
+ * and handled. (We use the child link to disambiguate duplicate
+ * keys in the index -- Lehman and Yao disallow duplicate keys.)
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
@@ -98,9 +98,9 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*bufP = _bt_getbuf(rel, blkno, BT_READ);
/*
- * Race -- the page we just grabbed may have split since we read its
- * pointer in the parent. If it has, we may need to move right to its
- * new sibling. Do that.
+ * Race -- the page we just grabbed may have split since we read
+ * its pointer in the parent. If it has, we may need to move
+ * right to its new sibling. Do that.
*/
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
@@ -127,7 +127,7 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*
* On entry, we have the buffer pinned and a lock of the proper type.
* If we move right, we release the buffer and lock and acquire the
- * same on the right sibling. Return value is the buffer we stop at.
+ * same on the right sibling. Return value is the buffer we stop at.
*/
Buffer
_bt_moveright(Relation rel,
@@ -153,7 +153,7 @@ _bt_moveright(Relation rel,
_bt_compare(rel, keysz, scankey, page, P_HIKEY) > 0)
{
/* step right one page */
- BlockNumber rblkno = opaque->btpo_next;
+ BlockNumber rblkno = opaque->btpo_next;
_bt_relbuf(rel, buf, access);
buf = _bt_getbuf(rel, rblkno, access);
@@ -184,7 +184,7 @@ _bt_moveright(Relation rel,
* find all leaf keys >= given scankey.
*
* This procedure is not responsible for walking right, it just examines
- * the given page. _bt_binsrch() has no lock or refcount side effects
+ * the given page. _bt_binsrch() has no lock or refcount side effects
* on the buffer.
*/
OffsetNumber
@@ -299,7 +299,7 @@ _bt_compare(Relation rel,
* Force result ">" if target item is first data item on an internal
* page --- see NOTE above.
*/
- if (! P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
+ if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
return 1;
btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum));
@@ -327,7 +327,7 @@ _bt_compare(Relation rel,
datum = index_getattr(itup, entry->sk_attno, itupdesc, &isNull);
/* see comments about NULLs handling in btbuild */
- if (entry->sk_flags & SK_ISNULL) /* key is NULL */
+ if (entry->sk_flags & SK_ISNULL) /* key is NULL */
{
if (isNull)
result = 0; /* NULL "=" NULL */
@@ -458,10 +458,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
_bt_orderkeys(rel, so);
/*
- * Quit now if _bt_orderkeys() discovered that the scan keys can
- * never be satisfied (eg, x == 1 AND x > 2).
+ * Quit now if _bt_orderkeys() discovered that the scan keys can never
+ * be satisfied (eg, x == 1 AND x > 2).
*/
- if (! so->qual_ok)
+ if (!so->qual_ok)
return (RetrieveIndexResult) NULL;
/*
@@ -484,17 +484,16 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
strat = _bt_getstrat(rel, attno,
so->keyData[i].sk_procedure);
+
/*
* Can we use this key as a starting boundary for this attr?
*
- * We can use multiple keys if they look like, say, = >= =
- * but we have to stop after accepting a > or < boundary.
+ * We can use multiple keys if they look like, say, = >= = but we
+ * have to stop after accepting a > or < boundary.
*/
if (strat == strat_total ||
strat == BTEqualStrategyNumber)
- {
nKeyIs[keysCount++] = i;
- }
else if (ScanDirectionIsBackward(dir) &&
(strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber))
@@ -536,7 +535,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
for (i = 0; i < keysCount; i++)
{
j = nKeyIs[i];
- /* _bt_orderkeys disallows it, but it's place to add some code later */
+
+ /*
+ * _bt_orderkeys disallows it, but it's place to add some code
+ * later
+ */
if (so->keyData[j].sk_flags & SK_ISNULL)
{
pfree(nKeyIs);
@@ -562,7 +565,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/* don't need to keep the stack around... */
_bt_freestack(stack);
- if (! BufferIsValid(buf))
+ if (!BufferIsValid(buf))
{
/* Only get here if index is completely empty */
ItemPointerSetInvalid(current);
@@ -601,6 +604,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
switch (strat_total)
{
case BTLessStrategyNumber:
+
/*
* Back up one to arrive at last item < scankey
*/
@@ -612,6 +616,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTLessEqualStrategyNumber:
+
/*
* We need to find the last item <= scankey, so step forward
* till we find one > scankey, then step back one.
@@ -645,9 +650,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTEqualStrategyNumber:
+
/*
- * Make sure we are on the first equal item; might have to step
- * forward if currently at end of page.
+ * Make sure we are on the first equal item; might have to
+ * step forward if currently at end of page.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@@ -661,7 +667,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
result = _bt_compare(rel, keysCount, scankeys, page, offnum);
if (result != 0)
- goto nomatches; /* no equal items! */
+ goto nomatches; /* no equal items! */
+
/*
* If a backward scan was specified, need to start with last
* equal item not first one.
@@ -685,6 +692,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterEqualStrategyNumber:
+
/*
* We want the first item >= scankey, which is where we are...
* unless we're not anywhere at all...
@@ -700,9 +708,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterStrategyNumber:
+
/*
- * We want the first item > scankey, so make sure we are on
- * an item and then step over any equal items.
+ * We want the first item > scankey, so make sure we are on an
+ * item and then step over any equal items.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@@ -850,11 +859,12 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
*bufP = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
* If the adjacent page just split, then we have to walk
- * right to find the block that's now adjacent to where
- * we were. Because pages only split right, we don't have
- * to worry about this failing to terminate.
+ * right to find the block that's now adjacent to where we
+ * were. Because pages only split right, we don't have to
+ * worry about this failing to terminate.
*/
while (opaque->btpo_next != obknum)
{
@@ -912,12 +922,12 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
/*
* Scan down to the leftmost or rightmost leaf page. This is a
- * simplified version of _bt_search(). We don't maintain a stack
+ * simplified version of _bt_search(). We don't maintain a stack
* since we know we won't need it.
*/
buf = _bt_getroot(rel, BT_READ);
- if (! BufferIsValid(buf))
+ if (!BufferIsValid(buf))
{
/* empty index... */
ItemPointerSetInvalid(current);
@@ -981,7 +991,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Assert(P_RIGHTMOST(opaque));
start = PageGetMaxOffsetNumber(page);
- if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty page */
+ if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty
+ * page */
start = P_FIRSTDATAKEY(opaque);
}
else
@@ -995,8 +1006,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
so->btso_curbuf = buf;
/*
- * Left/rightmost page could be empty due to deletions,
- * if so step till we find a nonempty page.
+ * Left/rightmost page could be empty due to deletions, if so step
+ * till we find a nonempty page.
*/
if (start > maxoff)
{
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index e9224a485af..2aca6bf7cfc 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -6,7 +6,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
- * for each level. We load source tuples into leaf-level pages.
+ * for each level. We load source tuples into leaf-level pages.
* Whenever we fill a page at one level, we add a link to it to its
* parent level (starting a new parent level if necessary). When
* done, we write out each final page on each level, adding it to
@@ -35,7 +35,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.59 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.60 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@ struct BTSpool
};
/*
- * Status record for a btree page being built. We have one of these
+ * Status record for a btree page being built. We have one of these
* for each active tree level.
*
* The reason we need to store a copy of the minimum key is that we'll
@@ -73,11 +73,13 @@ typedef struct BTPageState
{
Buffer btps_buf; /* current buffer & page */
Page btps_page;
- BTItem btps_minkey; /* copy of minimum key (first item) on page */
+ BTItem btps_minkey; /* copy of minimum key (first item) on
+ * page */
OffsetNumber btps_lastoff; /* last item offset loaded */
int btps_level; /* tree level (0 = leaf) */
- Size btps_full; /* "full" if less than this much free space */
- struct BTPageState *btps_next; /* link to parent level, if any */
+ Size btps_full; /* "full" if less than this much free
+ * space */
+ struct BTPageState *btps_next; /* link to parent level, if any */
} BTPageState;
@@ -92,7 +94,7 @@ static void _bt_blnewpage(Relation index, Buffer *buf, Page *page, int flags);
static BTPageState *_bt_pagestate(Relation index, int flags, int level);
static void _bt_slideleft(Relation index, Buffer buf, Page page);
static void _bt_sortaddtup(Page page, Size itemsize,
- BTItem btitem, OffsetNumber itup_off);
+ BTItem btitem, OffsetNumber itup_off);
static void _bt_buildadd(Relation index, BTPageState *state, BTItem bti);
static void _bt_uppershutdown(Relation index, BTPageState *state);
static void _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2);
@@ -162,7 +164,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
ShowUsage();
ResetUsage();
}
-#endif /* BTREE_BUILD_STATS */
+#endif /* BTREE_BUILD_STATS */
tuplesort_performsort(btspool->sortstate);
if (btspool2)
@@ -269,9 +271,9 @@ _bt_sortaddtup(Page page,
OffsetNumber itup_off)
{
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- BTItemData truncitem;
+ BTItemData truncitem;
- if (! P_ISLEAF(opaque) && itup_off == P_FIRSTKEY)
+ if (!P_ISLEAF(opaque) && itup_off == P_FIRSTKEY)
{
memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData);
@@ -290,7 +292,7 @@ _bt_sortaddtup(Page page,
* We must be careful to observe the page layout conventions of nbtsearch.c:
* - rightmost pages start data items at P_HIKEY instead of at P_FIRSTKEY.
* - on non-leaf pages, the key portion of the first item need not be
- * stored, we should store only the link.
+ * stored, we should store only the link.
*
* A leaf page being built looks like:
*
@@ -347,11 +349,12 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
*/
if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %ld",
- (unsigned long)btisz,
- (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
+ (unsigned long) btisz,
+ (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
if (pgspc < btisz || pgspc < state->btps_full)
{
+
/*
* Item won't fit on this page, or we feel the page is full enough
* already. Finish off the page and write it out.
@@ -388,9 +391,9 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
/*
- * Link the old buffer into its parent, using its minimum key.
- * If we don't have a parent, we have to create one;
- * this adds a new btree level.
+ * Link the old buffer into its parent, using its minimum key. If
+ * we don't have a parent, we have to create one; this adds a new
+ * btree level.
*/
if (state->btps_next == (BTPageState *) NULL)
{
@@ -405,8 +408,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* Save a copy of the minimum key for the new page. We have to
- * copy it off the old page, not the new one, in case we are
- * not at leaf level.
+ * copy it off the old page, not the new one, in case we are not
+ * at leaf level.
*/
state->btps_minkey = _bt_formitem(&(obti->bti_itup));
@@ -414,13 +417,13 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
* Set the sibling links for both pages, and parent links too.
*
* It's not necessary to set the parent link at all, because it's
- * only used for handling concurrent root splits, but we may as well
- * do it as a debugging aid. Note we set new page's link as well
- * as old's, because if the new page turns out to be the last of
- * the level, _bt_uppershutdown won't change it. The links may be
- * out of date by the time the build finishes, but that's OK; they
- * need only point to a left-sibling of the true parent. See the
- * README file for more info.
+ * only used for handling concurrent root splits, but we may as
+ * well do it as a debugging aid. Note we set new page's link as
+ * well as old's, because if the new page turns out to be the last
+ * of the level, _bt_uppershutdown won't change it. The links may
+ * be out of date by the time the build finishes, but that's OK;
+ * they need only point to a left-sibling of the true parent. See
+ * the README file for more info.
*/
{
BTPageOpaque oopaque = (BTPageOpaque) PageGetSpecialPointer(opage);
@@ -434,7 +437,7 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
}
/*
- * Write out the old page. We never want to see it again, so we
+ * Write out the old page. We never want to see it again, so we
* can give up our lock (if we had one; most likely BuildingBtree
* is set, so we aren't locking).
*/
@@ -449,8 +452,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* If the new item is the first for its page, stash a copy for later.
* Note this will only happen for the first item on a level; on later
- * pages, the first item for a page is copied from the prior page
- * in the code above.
+ * pages, the first item for a page is copied from the prior page in
+ * the code above.
*/
if (last_off == P_HIKEY)
{
@@ -493,8 +496,8 @@ _bt_uppershutdown(Relation index, BTPageState *state)
*
* If we're at the top, it's the root, so attach it to the metapage.
* Otherwise, add an entry for it to its parent using its minimum
- * key. This may cause the last page of the parent level to split,
- * but that's not a problem -- we haven't gotten to it yet.
+ * key. This may cause the last page of the parent level to
+ * split, but that's not a problem -- we haven't gotten to it yet.
*/
if (s->btps_next == (BTPageState *) NULL)
{
@@ -513,7 +516,7 @@ _bt_uppershutdown(Relation index, BTPageState *state)
/*
* This is the rightmost page, so the ItemId array needs to be
- * slid back one slot. Then we can dump out the page.
+ * slid back one slot. Then we can dump out the page.
*/
_bt_slideleft(index, s->btps_buf, s->btps_page);
_bt_wrtbuf(index, s->btps_buf);
@@ -529,22 +532,29 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
{
BTPageState *state = NULL;
bool merge = (btspool2 != NULL);
- BTItem bti, bti2 = NULL;
- bool should_free, should_free2, load1;
+ BTItem bti,
+ bti2 = NULL;
+ bool should_free,
+ should_free2,
+ load1;
TupleDesc tupdes = RelationGetDescr(index);
- int i, keysz = RelationGetNumberOfAttributes(index);
+ int i,
+ keysz = RelationGetNumberOfAttributes(index);
ScanKey indexScanKey = NULL;
if (merge)
{
+
/*
- * Another BTSpool for dead tuples exists.
- * Now we have to merge btspool and btspool2.
- */
- ScanKey entry;
- Datum attrDatum1, attrDatum2;
- bool isFirstNull, isSecondNull;
- int32 compare;
+ * Another BTSpool for dead tuples exists. Now we have to merge
+ * btspool and btspool2.
+ */
+ ScanKey entry;
+ Datum attrDatum1,
+ attrDatum2;
+ bool isFirstNull,
+ isSecondNull;
+ int32 compare;
/* the preparation of merge */
bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free);
@@ -552,7 +562,7 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
indexScanKey = _bt_mkscankey_nodata(index);
for (;;)
{
- load1 = true; /* load BTSpool next ? */
+ load1 = true; /* load BTSpool next ? */
if (NULL == bti2)
{
if (NULL == bti)
@@ -564,8 +574,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
for (i = 1; i <= keysz; i++)
{
entry = indexScanKey + i - 1;
- attrDatum1 = index_getattr((IndexTuple)bti, i, tupdes, &isFirstNull);
- attrDatum2 = index_getattr((IndexTuple)bti2, i, tupdes, &isSecondNull);
+ attrDatum1 = index_getattr((IndexTuple) bti, i, tupdes, &isFirstNull);
+ attrDatum2 = index_getattr((IndexTuple) bti2, i, tupdes, &isSecondNull);
if (isFirstNull)
{
if (!isSecondNull)
@@ -586,7 +596,7 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
}
else if (compare < 0)
break;
- }
+ }
}
}
else
@@ -613,7 +623,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
}
_bt_freeskey(indexScanKey);
}
- else /* merge is unnecessary */
+ else
+/* merge is unnecessary */
{
while (bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free), bti != (BTItem) NULL)
{
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 507205f2be7..2a37147d68e 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.42 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.43 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -124,7 +124,7 @@ _bt_freestack(BTStack stack)
* Construct a BTItem from a plain IndexTuple.
*
* This is now useless code, since a BTItem *is* an index tuple with
- * no extra stuff. We hang onto it for the moment to preserve the
+ * no extra stuff. We hang onto it for the moment to preserve the
* notational distinction, in case we want to add some extra stuff
* again someday.
*/
@@ -165,7 +165,7 @@ _bt_formitem(IndexTuple itup)
* are "x = 1 AND y < 4 AND z < 5", then _bt_checkkeys will reject a tuple
* (1,2,7), but we must continue the scan in case there are tuples (1,3,z).
* But once we reach tuples like (1,4,z) we can stop scanning because no
- * later tuples could match. This is reflected by setting
+ * later tuples could match. This is reflected by setting
* so->numberOfRequiredKeys to the number of leading keys that must be
* matched to continue the scan. numberOfRequiredKeys is equal to the
* number of leading "=" keys plus the key(s) for the first non "="
@@ -178,7 +178,7 @@ _bt_formitem(IndexTuple itup)
*
* XXX this routine is one of many places that fail to handle SK_COMMUTE
* scankeys properly. Currently, the planner is careful never to generate
- * any indexquals that would require SK_COMMUTE to be set. Someday we ought
+ * any indexquals that would require SK_COMMUTE to be set. Someday we ought
* to try to fix this, though it's not real critical as long as indexable
* operators all have commutators...
*
@@ -191,7 +191,7 @@ _bt_formitem(IndexTuple itup)
void
_bt_orderkeys(Relation relation, BTScanOpaque so)
{
- ScanKeyData xform[BTMaxStrategyNumber];
+ ScanKeyData xform[BTMaxStrategyNumber];
bool init[BTMaxStrategyNumber];
uint16 numberOfKeys = so->numberOfKeys;
ScanKey key;
@@ -240,14 +240,14 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
/*
* Initialize for processing of keys for attr 1.
*
- * xform[i] holds a copy of the current scan key of strategy type i+1,
- * if any; init[i] is TRUE if we have found such a key for this attr.
+ * xform[i] holds a copy of the current scan key of strategy type i+1, if
+ * any; init[i] is TRUE if we have found such a key for this attr.
*/
attno = 1;
map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
BTMaxStrategyNumber,
attno);
- MemSet(xform, 0, sizeof(xform)); /* not really necessary */
+ MemSet(xform, 0, sizeof(xform)); /* not really necessary */
MemSet(init, 0, sizeof(init));
/*
@@ -255,7 +255,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* pass to handle after-last-key processing. Actual exit from the
* loop is at the "break" statement below.
*/
- for (i = 0; ; cur++, i++)
+ for (i = 0;; cur++, i++)
{
if (i < numberOfKeys)
{
@@ -263,7 +263,9 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (cur->sk_flags & SK_ISNULL)
{
so->qual_ok = false;
- /* Quit processing so we don't try to invoke comparison
+
+ /*
+ * Quit processing so we don't try to invoke comparison
* routines on NULLs.
*/
return;
@@ -271,8 +273,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
/*
- * If we are at the end of the keys for a particular attr,
- * finish up processing and emit the cleaned-up keys.
+ * If we are at the end of the keys for a particular attr, finish
+ * up processing and emit the cleaned-up keys.
*/
if (i == numberOfKeys || cur->sk_attno != attno)
{
@@ -296,7 +298,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
eq = &xform[BTEqualStrategyNumber - 1];
for (j = BTMaxStrategyNumber; --j >= 0;)
{
- if (! init[j] ||
+ if (!init[j] ||
j == (BTEqualStrategyNumber - 1))
continue;
chk = &xform[j];
@@ -313,6 +315,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
else
{
+
/*
* No "=" for this key, so we're done with required keys
*/
@@ -355,8 +358,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* Emit the cleaned-up keys back into the key[] array in the
* correct order. Note we are overwriting our input here!
* It's OK because (a) xform[] is a physical copy of the keys
- * we want, (b) we cannot emit more keys than we input, so
- * we won't overwrite as-yet-unprocessed keys.
+ * we want, (b) we cannot emit more keys than we input, so we
+ * won't overwrite as-yet-unprocessed keys.
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
{
@@ -383,7 +386,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
BTMaxStrategyNumber,
attno);
- MemSet(xform, 0, sizeof(xform)); /* not really necessary */
+ MemSet(xform, 0, sizeof(xform)); /* not really necessary */
MemSet(init, 0, sizeof(init));
}
@@ -409,7 +412,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (DatumGetBool(test))
xform[j].sk_argument = cur->sk_argument;
else if (j == (BTEqualStrategyNumber - 1))
- so->qual_ok = false; /* key == a && key == b, but a != b */
+ so->qual_ok = false; /* key == a && key == b, but a !=
+ * b */
}
else
{
@@ -473,16 +477,18 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (isNull)
{
+
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual
- * is one of the "must match" subset. On a backward scan,
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if (keysok < so->numberOfRequiredKeys &&
ScanDirectionIsForward(dir))
*continuescan = false;
+
/*
* In any case, this indextuple doesn't match the qual.
*/
@@ -498,9 +504,10 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (DatumGetBool(test) == !!(key->sk_flags & SK_NEGATE))
{
+
/*
- * Tuple fails this qual. If it's a required qual, then
- * we can conclude no further tuples will pass, either.
+ * Tuple fails this qual. If it's a required qual, then we
+ * can conclude no further tuples will pass, either.
*/
if (keysok < so->numberOfRequiredKeys)
*continuescan = false;
diff --git a/src/backend/access/rtree/rtget.c b/src/backend/access/rtree/rtget.c
index df0f5e9c80e..c8fa6b18d68 100644
--- a/src/backend/access/rtree/rtget.c
+++ b/src/backend/access/rtree/rtget.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.24 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.25 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,8 +30,8 @@ static ItemPointer rtheapptr(Relation r, ItemPointer itemp);
Datum
rtgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/* if we have it cached in the scan desc, just return the value */
diff --git a/src/backend/access/rtree/rtproc.c b/src/backend/access/rtree/rtproc.c
index fd610caebe1..400be10ccb3 100644
--- a/src/backend/access/rtree/rtproc.c
+++ b/src/backend/access/rtree/rtproc.c
@@ -6,7 +6,7 @@
* NOTE: for largely-historical reasons, the intersection functions should
* return a NULL pointer (*not* an SQL null value) to indicate "no
* intersection". The size functions must be prepared to accept such
- * a pointer and return 0. This convention means that only pass-by-reference
+ * a pointer and return 0. This convention means that only pass-by-reference
* data types can be used as the output of the union and intersection
* routines, but that's not a big problem.
*
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.31 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.32 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,6 +70,7 @@ Datum
rt_box_size(PG_FUNCTION_ARGS)
{
BOX *a = PG_GETARG_BOX_P(0);
+
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
@@ -98,8 +99,8 @@ rt_bigbox_size(PG_FUNCTION_ARGS)
Datum
rt_poly_union(PG_FUNCTION_ARGS)
{
- POLYGON *a = PG_GETARG_POLYGON_P(0);
- POLYGON *b = PG_GETARG_POLYGON_P(1);
+ POLYGON *a = PG_GETARG_POLYGON_P(0);
+ POLYGON *b = PG_GETARG_POLYGON_P(1);
POLYGON *p;
p = (POLYGON *) palloc(sizeof(POLYGON));
@@ -122,8 +123,8 @@ rt_poly_union(PG_FUNCTION_ARGS)
Datum
rt_poly_inter(PG_FUNCTION_ARGS)
{
- POLYGON *a = PG_GETARG_POLYGON_P(0);
- POLYGON *b = PG_GETARG_POLYGON_P(1);
+ POLYGON *a = PG_GETARG_POLYGON_P(0);
+ POLYGON *b = PG_GETARG_POLYGON_P(1);
POLYGON *p;
p = (POLYGON *) palloc(sizeof(POLYGON));
@@ -155,13 +156,15 @@ Datum
rt_poly_size(PG_FUNCTION_ARGS)
{
Pointer aptr = PG_GETARG_POINTER(0);
+
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
- POLYGON *a;
+ POLYGON *a;
double xdim,
ydim;
- /* Can't just use GETARG because of possibility that input is NULL;
+ /*
+ * Can't just use GETARG because of possibility that input is NULL;
* since POLYGON is toastable, GETARG will try to inspect its value
*/
if (aptr == NULL)
diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c
index 45382d5ef3c..3752a59e99a 100644
--- a/src/backend/access/rtree/rtree.c
+++ b/src/backend/access/rtree/rtree.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.60 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.61 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,12 +68,12 @@ static InsertIndexResult rtdoinsert(Relation r, IndexTuple itup,
static void rttighten(Relation r, RTSTACK *stk, Datum datum, int att_size,
RTSTATE *rtstate);
static InsertIndexResult rtdosplit(Relation r, Buffer buffer, RTSTACK *stack,
- IndexTuple itup, RTSTATE *rtstate);
+ IndexTuple itup, RTSTATE *rtstate);
static void rtintinsert(Relation r, RTSTACK *stk, IndexTuple ltup,
IndexTuple rtup, RTSTATE *rtstate);
static void rtnewroot(Relation r, IndexTuple lt, IndexTuple rt);
static void rtpicksplit(Relation r, Page page, SPLITVEC *v, IndexTuple itup,
- RTSTATE *rtstate);
+ RTSTATE *rtstate);
static void RTInitBuffer(Buffer b, uint32 f);
static OffsetNumber choose(Relation r, Page p, IndexTuple it,
RTSTATE *rtstate);
@@ -84,12 +84,14 @@ static void initRtstate(RTSTATE *rtstate, Relation index);
Datum
rtbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -101,9 +103,11 @@ rtbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@@ -171,6 +175,7 @@ rtbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -232,9 +237,7 @@ rtbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -278,12 +281,14 @@ rtbuild(PG_FUNCTION_ARGS)
Datum
rtinsert(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+
#endif
InsertIndexResult res;
IndexTuple itup;
@@ -412,7 +417,7 @@ rttighten(Relation r,
p = BufferGetPage(b);
oldud = IndexTupleGetDatum(PageGetItem(p,
- PageGetItemId(p, stk->rts_child)));
+ PageGetItemId(p, stk->rts_child)));
FunctionCall2(&rtstate->sizeFn, oldud,
PointerGetDatum(&old_size));
@@ -564,7 +569,7 @@ rtdosplit(Relation r,
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
/* now insert the new index tuple */
- if (*spl_left == maxoff+1)
+ if (*spl_left == maxoff + 1)
{
if (PageAddItem(left, (Item) itup, IndexTupleSize(itup),
leftoff, LP_USED) == InvalidOffsetNumber)
@@ -576,7 +581,7 @@ rtdosplit(Relation r,
}
else
{
- Assert(*spl_right == maxoff+1);
+ Assert(*spl_right == maxoff + 1);
if (PageAddItem(right, (Item) itup, IndexTupleSize(itup),
rightoff, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "rtdosplit: failed to add index item to %s",
@@ -665,10 +670,10 @@ rtintinsert(Relation r,
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
/*
- * This is a hack. Right now, we force rtree internal keys to be constant
- * size. To fix this, need delete the old key and add both left and
- * right for the two new pages. The insertion of left may force a
- * split if the new left key is bigger than the old key.
+ * This is a hack. Right now, we force rtree internal keys to be
+ * constant size. To fix this, need delete the old key and add both
+ * left and right for the two new pages. The insertion of left may
+ * force a split if the new left key is bigger than the old key.
*/
if (IndexTupleSize(old) != IndexTupleSize(ltup))
@@ -734,7 +739,7 @@ rtnewroot(Relation r, IndexTuple lt, IndexTuple rt)
* We return two vectors of index item numbers, one for the items to be
* put on the left page, one for the items to be put on the right page.
* In addition, the item to be added (itup) is listed in the appropriate
- * vector. It is represented by item number N+1 (N = # of items on page).
+ * vector. It is represented by item number N+1 (N = # of items on page).
*
* Both vectors appear in sequence order with a terminating sentinel value
* of InvalidOffsetNumber.
@@ -747,9 +752,9 @@ rtnewroot(Relation r, IndexTuple lt, IndexTuple rt)
*
* We must also deal with a consideration not found in Guttman's algorithm:
* variable-length data. In particular, the incoming item might be
- * large enough that not just any split will work. In the worst case,
+ * large enough that not just any split will work. In the worst case,
* our "split" may have to be the new item on one page and all the existing
- * items on the other. Short of that, we have to take care that we do not
+ * items on the other. Short of that, we have to take care that we do not
* make a split that leaves both pages too full for the new item.
*/
static void
@@ -794,9 +799,10 @@ rtpicksplit(Relation r,
right_avail_space;
/*
- * First, make sure the new item is not so large that we can't possibly
- * fit it on a page, even by itself. (It's sufficient to make this test
- * here, since any oversize tuple must lead to a page split attempt.)
+ * First, make sure the new item is not so large that we can't
+ * possibly fit it on a page, even by itself. (It's sufficient to
+ * make this test here, since any oversize tuple must lead to a page
+ * split attempt.)
*/
newitemsz = IndexTupleTotalSize(itup);
if (newitemsz > RTPageAvailSpace)
@@ -804,7 +810,8 @@ rtpicksplit(Relation r,
(unsigned long) newitemsz, (unsigned long) RTPageAvailSpace);
maxoff = PageGetMaxOffsetNumber(page);
- newitemoff = OffsetNumberNext(maxoff); /* phony index for new item */
+ newitemoff = OffsetNumberNext(maxoff); /* phony index for new
+ * item */
/* Make arrays big enough for worst case, including sentinel */
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
@@ -827,8 +834,8 @@ rtpicksplit(Relation r,
item_2_sz = IndexTupleTotalSize(item_2);
/*
- * Ignore seed pairs that don't leave room for the new item
- * on either split page.
+ * Ignore seed pairs that don't leave room for the new item on
+ * either split page.
*/
if (newitemsz + item_1_sz > RTPageAvailSpace &&
newitemsz + item_2_sz > RTPageAvailSpace)
@@ -841,8 +848,10 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_union));
inter_d = FunctionCall2(&rtstate->interFn,
datum_alpha, datum_beta);
- /* The interFn may return a NULL pointer (not an SQL null!)
- * to indicate no intersection. sizeFn must cope with this.
+
+ /*
+ * The interFn may return a NULL pointer (not an SQL null!) to
+ * indicate no intersection. sizeFn must cope with this.
*/
FunctionCall2(&rtstate->sizeFn, inter_d,
PointerGetDatum(&size_inter));
@@ -869,6 +878,7 @@ rtpicksplit(Relation r,
if (firsttime)
{
+
/*
* There is no possible split except to put the new item on its
* own page. Since we still have to compute the union rectangles,
@@ -916,14 +926,14 @@ rtpicksplit(Relation r,
for (i = FirstOffsetNumber; i <= newitemoff; i = OffsetNumberNext(i))
{
- bool left_feasible,
- right_feasible,
- choose_left;
+ bool left_feasible,
+ right_feasible,
+ choose_left;
/*
* If we've already decided where to place this item, just put it
- * on the correct list. Otherwise, we need to figure out which page
- * needs the least enlargement in order to store the item.
+ * on the correct list. Otherwise, we need to figure out which
+ * page needs the least enlargement in order to store the item.
*/
if (i == seed_1)
@@ -961,12 +971,13 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_beta));
/*
- * We prefer the page that shows smaller enlargement of its union area
- * (Guttman's algorithm), but we must take care that at least one page
- * will still have room for the new item after this one is added.
+ * We prefer the page that shows smaller enlargement of its union
+ * area (Guttman's algorithm), but we must take care that at least
+ * one page will still have room for the new item after this one
+ * is added.
*
- * (We know that all the old items together can fit on one page,
- * so we need not worry about any other problem than failing to fit
+ * (We know that all the old items together can fit on one page, so
+ * we need not worry about any other problem than failing to fit
* the new item.)
*/
left_feasible = (left_avail_space >= item_1_sz &&
@@ -987,7 +998,7 @@ rtpicksplit(Relation r,
else
{
elog(ERROR, "rtpicksplit: failed to find a workable page split");
- choose_left = false; /* keep compiler quiet */
+ choose_left = false;/* keep compiler quiet */
}
if (choose_left)
@@ -1012,7 +1023,7 @@ rtpicksplit(Relation r,
}
}
- *left = *right = InvalidOffsetNumber; /* add ending sentinels */
+ *left = *right = InvalidOffsetNumber; /* add ending sentinels */
v->spl_ldatum = datum_l;
v->spl_rdatum = datum_r;
@@ -1096,8 +1107,8 @@ freestack(RTSTACK *s)
Datum
rtdelete(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
BlockNumber blkno;
OffsetNumber offnum;
Buffer buf;
@@ -1203,14 +1214,14 @@ rtree_redo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "rtree_redo: unimplemented");
}
-
+
void
rtree_undo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "rtree_undo: unimplemented");
}
-
+
void
-rtree_desc(char *buf, uint8 xl_info, char* rec)
+rtree_desc(char *buf, uint8 xl_info, char *rec)
{
}
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index 605d51b5d33..f3e6d52fe67 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.35 2001/01/24 19:42:50 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.36 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,9 +75,9 @@ rtbeginscan(PG_FUNCTION_ARGS)
Datum
rtrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- bool fromEnd = PG_GETARG_BOOL(1);
- ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ bool fromEnd = PG_GETARG_BOOL(1);
+ ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
RTreeScanOpaque p;
RegProcedure internal_proc;
int i;
@@ -162,7 +162,7 @@ rtrescan(PG_FUNCTION_ARGS)
Datum
rtmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
RTreeScanOpaque p;
RTSTACK *o,
*n,
@@ -198,7 +198,7 @@ rtmarkpos(PG_FUNCTION_ARGS)
Datum
rtrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
RTreeScanOpaque p;
RTSTACK *o,
*n,
@@ -234,7 +234,7 @@ rtrestrpos(PG_FUNCTION_ARGS)
Datum
rtendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
RTreeScanOpaque p;
p = (RTreeScanOpaque) s->opaque;
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index b25db74da8c..625b0db3202 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -9,21 +9,21 @@
#include "storage/smgr.h"
#include "commands/sequence.h"
-RmgrData RmgrTable[] = {
-{"XLOG", xlog_redo, xlog_undo, xlog_desc},
-{"Transaction", xact_redo, xact_undo, xact_desc},
-{"Storage", smgr_redo, smgr_undo, smgr_desc},
-{"Reserved 3", NULL, NULL, NULL},
-{"Reserved 4", NULL, NULL, NULL},
-{"Reserved 5", NULL, NULL, NULL},
-{"Reserved 6", NULL, NULL, NULL},
-{"Reserved 7", NULL, NULL, NULL},
-{"Reserved 8", NULL, NULL, NULL},
-{"Reserved 9", NULL, NULL, NULL},
-{"Heap", heap_redo, heap_undo, heap_desc},
-{"Btree", btree_redo, btree_undo, btree_desc},
-{"Hash", hash_redo, hash_undo, hash_desc},
-{"Rtree", rtree_redo, rtree_undo, rtree_desc},
-{"Gist", gist_redo, gist_undo, gist_desc},
-{"Sequence", seq_redo, seq_undo, seq_desc}
+RmgrData RmgrTable[] = {
+ {"XLOG", xlog_redo, xlog_undo, xlog_desc},
+ {"Transaction", xact_redo, xact_undo, xact_desc},
+ {"Storage", smgr_redo, smgr_undo, smgr_desc},
+ {"Reserved 3", NULL, NULL, NULL},
+ {"Reserved 4", NULL, NULL, NULL},
+ {"Reserved 5", NULL, NULL, NULL},
+ {"Reserved 6", NULL, NULL, NULL},
+ {"Reserved 7", NULL, NULL, NULL},
+ {"Reserved 8", NULL, NULL, NULL},
+ {"Reserved 9", NULL, NULL, NULL},
+ {"Heap", heap_redo, heap_undo, heap_desc},
+ {"Btree", btree_redo, btree_undo, btree_desc},
+ {"Hash", hash_redo, hash_undo, hash_desc},
+ {"Rtree", rtree_redo, rtree_undo, rtree_desc},
+ {"Gist", gist_redo, gist_undo, gist_desc},
+ {"Sequence", seq_redo, seq_undo, seq_desc}
};
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 64289057926..29e72e84175 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.41 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.42 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -427,8 +427,8 @@ InitializeTransactionLog(void)
TransactionLogUpdate(AmiTransactionId, XID_COMMIT);
TransactionIdStore(AmiTransactionId, &cachedTestXid);
cachedTestXidStatus = XID_COMMIT;
- Assert(!IsUnderPostmaster &&
- ShmemVariableCache->nextXid <= FirstTransactionId);
+ Assert(!IsUnderPostmaster &&
+ ShmemVariableCache->nextXid <= FirstTransactionId);
ShmemVariableCache->nextXid = FirstTransactionId;
}
else if (RecoveryCheckingEnabled())
diff --git a/src/backend/access/transam/transsup.c b/src/backend/access/transam/transsup.c
index e4ff7979cf9..c433506eae6 100644
--- a/src/backend/access/transam/transsup.c
+++ b/src/backend/access/transam/transsup.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.28 2001/01/24 19:42:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.29 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains support functions for the high
@@ -186,7 +186,7 @@ TransBlockGetXidStatus(Block tblock,
bits8 bit2;
BitIndex offset;
- tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr));
+ tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ----------------
* calculate the index into the transaction data where
@@ -229,7 +229,7 @@ TransBlockSetXidStatus(Block tblock,
Index index;
BitIndex offset;
- tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr));
+ tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ----------------
* calculate the index into the transaction data where
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index d6097b2567c..34c607eab9f 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.37 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.38 2001/03/22 03:59:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -23,8 +23,8 @@
#define VAR_OID_PREFETCH 8192
/* Spinlocks for serializing generation of XIDs and OIDs, respectively */
-SPINLOCK XidGenLockId;
-SPINLOCK OidGenLockId;
+SPINLOCK XidGenLockId;
+SPINLOCK OidGenLockId;
/* pointer to "variable cache" in shared memory (set up by shmem.c) */
VariableCache ShmemVariableCache = NULL;
@@ -32,9 +32,10 @@ VariableCache ShmemVariableCache = NULL;
void
GetNewTransactionId(TransactionId *xid)
{
+
/*
- * During bootstrap initialization, we return the special
- * bootstrap transaction id.
+ * During bootstrap initialization, we return the special bootstrap
+ * transaction id.
*/
if (AMI_OVERRIDE)
{
@@ -60,9 +61,10 @@ GetNewTransactionId(TransactionId *xid)
void
ReadNewTransactionId(TransactionId *xid)
{
+
/*
- * During bootstrap initialization, we return the special
- * bootstrap transaction id.
+ * During bootstrap initialization, we return the special bootstrap
+ * transaction id.
*/
if (AMI_OVERRIDE)
{
@@ -80,7 +82,7 @@ ReadNewTransactionId(TransactionId *xid)
* ----------------------------------------------------------------
*/
-static Oid lastSeenOid = InvalidOid;
+static Oid lastSeenOid = InvalidOid;
void
GetNewObjectId(Oid *oid_return)
@@ -119,10 +121,10 @@ CheckMaxObjectId(Oid assigned_oid)
}
/* If we are in the logged oid range, just bump nextOid up */
- if (assigned_oid <= ShmemVariableCache->nextOid +
- ShmemVariableCache->oidCount - 1)
+ if (assigned_oid <= ShmemVariableCache->nextOid +
+ ShmemVariableCache->oidCount - 1)
{
- ShmemVariableCache->oidCount -=
+ ShmemVariableCache->oidCount -=
assigned_oid - ShmemVariableCache->nextOid + 1;
ShmemVariableCache->nextOid = assigned_oid + 1;
SpinRelease(OidGenLockId);
@@ -130,10 +132,9 @@ CheckMaxObjectId(Oid assigned_oid)
}
/*
- * We have exceeded the logged oid range.
- * We should lock the database and kill all other backends
- * but we are loading oid's that we can not guarantee are unique
- * anyway, so we must rely on the user.
+ * We have exceeded the logged oid range. We should lock the database
+ * and kill all other backends but we are loading oid's that we can
+ * not guarantee are unique anyway, so we must rely on the user.
*/
XLogPutNextOid(assigned_oid + VAR_OID_PREFETCH);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 1331c8e9834..6a8e6c0639f 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.99 2001/03/13 01:17:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.100 2001/03/22 03:59:18 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -222,9 +222,10 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel;
int CommitDelay = 0; /* precommit delay in microseconds */
-int CommitSiblings = 5; /* number of concurrent xacts needed to sleep */
+int CommitSiblings = 5; /* number of concurrent xacts needed to
+ * sleep */
-static void (*_RollbackFunc)(void*) = NULL;
+static void (*_RollbackFunc) (void *) = NULL;
static void *_RollbackData = NULL;
/* ----------------
@@ -666,39 +667,40 @@ RecordTransactionCommit()
if (MyLastRecPtr.xrecoff != 0)
{
- XLogRecData rdata;
- xl_xact_commit xlrec;
- XLogRecPtr recptr;
+ XLogRecData rdata;
+ xl_xact_commit xlrec;
+ XLogRecPtr recptr;
BufmgrCommit();
xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&xlrec);
+ rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactCommit;
rdata.next = NULL;
START_CRIT_SECTION();
+
/*
* SHOULD SAVE ARRAY OF RELFILENODE-s TO DROP
*/
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT, &rdata);
- /*
- * Sleep before commit! So we can flush more than one
- * commit records per single fsync. (The idea is some other
- * backend may do the XLogFlush while we're sleeping. This
- * needs work still, because on most Unixen, the minimum
- * select() delay is 10msec or more, which is way too long.)
+ /*
+ * Sleep before commit! So we can flush more than one commit
+ * records per single fsync. (The idea is some other backend may
+ * do the XLogFlush while we're sleeping. This needs work still,
+ * because on most Unixen, the minimum select() delay is 10msec or
+ * more, which is way too long.)
*
- * We do not sleep if enableFsync is not turned on, nor if there
- * are fewer than CommitSiblings other backends with active
+ * We do not sleep if enableFsync is not turned on, nor if there are
+ * fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
CountActiveBackends() >= CommitSiblings)
{
- struct timeval delay;
+ struct timeval delay;
delay.tv_sec = 0;
delay.tv_usec = CommitDelay;
@@ -812,13 +814,13 @@ RecordTransactionAbort(void)
*/
if (MyLastRecPtr.xrecoff != 0 && !TransactionIdDidCommit(xid))
{
- XLogRecData rdata;
- xl_xact_abort xlrec;
- XLogRecPtr recptr;
+ XLogRecData rdata;
+ xl_xact_abort xlrec;
+ XLogRecPtr recptr;
xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&xlrec);
+ rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactAbort;
rdata.next = NULL;
@@ -879,7 +881,7 @@ AtAbort_Memory(void)
{
/* ----------------
* Make sure we are in a valid context (not a child of
- * TransactionCommandContext...). Note that it is possible
+ * TransactionCommandContext...). Note that it is possible
* for this code to be called when we aren't in a transaction
* at all; go directly to TopMemoryContext in that case.
* ----------------
@@ -896,9 +898,7 @@ AtAbort_Memory(void)
MemoryContextResetAndDeleteChildren(TransactionCommandContext);
}
else
- {
MemoryContextSwitchTo(TopMemoryContext);
- }
}
@@ -1021,6 +1021,7 @@ CurrentXactInProgress(void)
{
return CurrentTransactionState->state == TRANS_INPROGRESS;
}
+
#endif
/* --------------------------------
@@ -1106,7 +1107,7 @@ CommitTransaction(void)
AtCommit_Memory();
AtEOXact_Files();
- SharedBufferChanged = false; /* safest place to do it */
+ SharedBufferChanged = false;/* safest place to do it */
/* ----------------
* done with commit processing, set current transaction
@@ -1143,15 +1144,16 @@ AbortTransaction(void)
/*
* Release any spinlocks or buffer context locks we might be holding
- * as quickly as possible. (Real locks, however, must be held till
- * we finish aborting.) Releasing spinlocks is critical since we
- * might try to grab them again while cleaning up!
+ * as quickly as possible. (Real locks, however, must be held till we
+ * finish aborting.) Releasing spinlocks is critical since we might
+ * try to grab them again while cleaning up!
*/
ProcReleaseSpins(NULL);
UnlockBuffers();
+
/*
- * Also clean up any open wait for lock, since the lock manager
- * will choke if we try to wait for another lock before doing this.
+ * Also clean up any open wait for lock, since the lock manager will
+ * choke if we try to wait for another lock before doing this.
*/
LockWaitCancel();
@@ -1203,7 +1205,7 @@ AbortTransaction(void)
AtEOXact_Files();
AtAbort_Locks();
- SharedBufferChanged = false; /* safest place to do it */
+ SharedBufferChanged = false;/* safest place to do it */
/* ----------------
* State remains TRANS_ABORT until CleanupTransaction().
@@ -1327,8 +1329,8 @@ StartTransactionCommand(void)
}
/*
- * We must switch to TransactionCommandContext before returning.
- * This is already done if we called StartTransaction, otherwise not.
+ * We must switch to TransactionCommandContext before returning. This
+ * is already done if we called StartTransaction, otherwise not.
*/
Assert(TransactionCommandContext != NULL);
MemoryContextSwitchTo(TransactionCommandContext);
@@ -1757,7 +1759,7 @@ IsTransactionBlock(void)
void
xact_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT)
{
@@ -1765,9 +1767,7 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
/* SHOULD REMOVE FILES OF ALL DROPPED RELATIONS */
}
else if (info == XLOG_XACT_ABORT)
- {
TransactionIdAbort(record->xl_xid);
- }
else
elog(STOP, "xact_redo: unknown op code %u", info);
}
@@ -1775,43 +1775,43 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
void
xact_undo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
- if (info == XLOG_XACT_COMMIT) /* shouldn't be called by XLOG */
+ if (info == XLOG_XACT_COMMIT) /* shouldn't be called by XLOG */
elog(STOP, "xact_undo: can't undo committed xaction");
else if (info != XLOG_XACT_ABORT)
elog(STOP, "xact_redo: unknown op code %u", info);
}
-
+
void
-xact_desc(char *buf, uint8 xl_info, char* rec)
+xact_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT)
{
- xl_xact_commit *xlrec = (xl_xact_commit*) rec;
- struct tm *tm = localtime(&xlrec->xtime);
+ xl_xact_commit *xlrec = (xl_xact_commit *) rec;
+ struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "commit: %04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
}
else if (info == XLOG_XACT_ABORT)
{
- xl_xact_abort *xlrec = (xl_xact_abort*) rec;
- struct tm *tm = localtime(&xlrec->xtime);
+ xl_xact_abort *xlrec = (xl_xact_abort *) rec;
+ struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "abort: %04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
}
else
strcat(buf, "UNKNOWN");
}
void
-XactPushRollback(void (*func) (void *), void* data)
+ XactPushRollback(void (*func) (void *), void *data)
{
#ifdef XLOG_II
if (_RollbackFunc != NULL)
diff --git a/src/backend/access/transam/xid.c b/src/backend/access/transam/xid.c
index 6ee28d1a2b0..624d6da850c 100644
--- a/src/backend/access/transam/xid.c
+++ b/src/backend/access/transam/xid.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: xid.c,v 1.29 2001/01/24 19:42:51 momjian Exp $
+ * $Id: xid.c,v 1.30 2001/03/22 03:59:18 momjian Exp $
*
* OLD COMMENTS
* XXX WARNING
@@ -26,8 +26,8 @@
/*
* TransactionId is typedef'd as uint32, so...
*/
-#define PG_GETARG_TRANSACTIONID(n) PG_GETARG_UINT32(n)
-#define PG_RETURN_TRANSACTIONID(x) PG_RETURN_UINT32(x)
+#define PG_GETARG_TRANSACTIONID(n) PG_GETARG_UINT32(n)
+#define PG_RETURN_TRANSACTIONID(x) PG_RETURN_UINT32(x)
extern TransactionId NullTransactionId;
@@ -49,6 +49,7 @@ Datum
xidout(PG_FUNCTION_ARGS)
{
TransactionId transactionId = PG_GETARG_TRANSACTIONID(0);
+
/* maximum 32 bit unsigned integer representation takes 10 chars */
char *representation = palloc(11);
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 9994025dd69..59d783264bb 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.62 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.63 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,57 +45,60 @@
/*
* This chunk of hackery attempts to determine which file sync methods
* are available on the current platform, and to choose an appropriate
- * default method. We assume that fsync() is always available, and that
+ * default method. We assume that fsync() is always available, and that
* configure determined whether fdatasync() is.
*/
#define SYNC_METHOD_FSYNC 0
#define SYNC_METHOD_FDATASYNC 1
-#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and O_DSYNC */
+#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and
+ * O_DSYNC */
#if defined(O_SYNC)
-# define OPEN_SYNC_FLAG O_SYNC
+#define OPEN_SYNC_FLAG O_SYNC
#else
-# if defined(O_FSYNC)
-# define OPEN_SYNC_FLAG O_FSYNC
-# endif
+#if defined(O_FSYNC)
+#define OPEN_SYNC_FLAG O_FSYNC
+#endif
#endif
#if defined(OPEN_SYNC_FLAG)
-# if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG)
-# define OPEN_DATASYNC_FLAG O_DSYNC
-# endif
+#if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG)
+#define OPEN_DATASYNC_FLAG O_DSYNC
+#endif
#endif
#if defined(OPEN_DATASYNC_FLAG)
-# define DEFAULT_SYNC_METHOD_STR "open_datasync"
-# define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
-# define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
+#define DEFAULT_SYNC_METHOD_STR "open_datasync"
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
+#define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
#else
-# if defined(HAVE_FDATASYNC)
-# define DEFAULT_SYNC_METHOD_STR "fdatasync"
-# define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
-# define DEFAULT_SYNC_FLAGBIT 0
-# else
-# define DEFAULT_SYNC_METHOD_STR "fsync"
-# define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC
-# define DEFAULT_SYNC_FLAGBIT 0
-# endif
+#if defined(HAVE_FDATASYNC)
+#define DEFAULT_SYNC_METHOD_STR "fdatasync"
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
+#define DEFAULT_SYNC_FLAGBIT 0
+#else
+#define DEFAULT_SYNC_METHOD_STR "fsync"
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC
+#define DEFAULT_SYNC_FLAGBIT 0
+#endif
#endif
/* Max time to wait to acquire XLog activity locks */
-#define XLOG_LOCK_TIMEOUT (5*60*1000000) /* 5 minutes */
+#define XLOG_LOCK_TIMEOUT (5*60*1000000) /* 5 minutes */
/* Max time to wait to acquire checkpoint lock */
-#define CHECKPOINT_LOCK_TIMEOUT (20*60*1000000) /* 20 minutes */
+#define CHECKPOINT_LOCK_TIMEOUT (20*60*1000000) /* 20 minutes */
/* User-settable parameters */
int CheckPointSegments = 3;
int XLOGbuffers = 8;
-int XLOGfiles = 0; /* how many files to pre-allocate during ckpt */
+int XLOGfiles = 0; /* how many files to pre-allocate during
+ * ckpt */
int XLOG_DEBUG = 0;
char *XLOG_sync_method = NULL;
const char XLOG_sync_method_default[] = DEFAULT_SYNC_METHOD_STR;
-char XLOG_archive_dir[MAXPGPATH]; /* null string means delete 'em */
+char XLOG_archive_dir[MAXPGPATH]; /* null string means
+ * delete 'em */
/* these are derived from XLOG_sync_method by assign_xlog_sync_method */
static int sync_method = DEFAULT_SYNC_METHOD;
@@ -135,7 +138,7 @@ static XLogRecPtr ProcLastRecPtr = {0, 0};
/*
* RedoRecPtr is this backend's local copy of the REDO record pointer
* (which is almost but not quite the same as a pointer to the most recent
- * CHECKPOINT record). We update this from the shared-memory copy,
+ * CHECKPOINT record). We update this from the shared-memory copy,
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
* hold the Insert spinlock). See XLogInsert for details.
*/
@@ -164,12 +167,12 @@ SPINLOCK ControlFileLockId;
*
* XLogCtl->LogwrtResult and XLogCtl->Write.LogwrtResult are both "always
* right", since both are updated by a write or flush operation before
- * it releases logwrt_lck. The point of keeping XLogCtl->Write.LogwrtResult
+ * it releases logwrt_lck. The point of keeping XLogCtl->Write.LogwrtResult
* is that it can be examined/modified by code that already holds logwrt_lck
* without needing to grab info_lck as well.
*
* XLogCtl->Insert.LogwrtResult may lag behind the reality of the other two,
- * but is updated when convenient. Again, it exists for the convenience of
+ * but is updated when convenient. Again, it exists for the convenience of
* code that is already holding insert_lck but not the other locks.
*
* The unshared LogwrtResult may lag behind any or all of these, and again
@@ -187,25 +190,25 @@ typedef struct XLogwrtRqst
{
XLogRecPtr Write; /* last byte + 1 to write out */
XLogRecPtr Flush; /* last byte + 1 to flush */
-} XLogwrtRqst;
+} XLogwrtRqst;
typedef struct XLogwrtResult
{
XLogRecPtr Write; /* last byte + 1 written out */
XLogRecPtr Flush; /* last byte + 1 flushed */
-} XLogwrtResult;
+} XLogwrtResult;
/*
* Shared state data for XLogInsert.
*/
typedef struct XLogCtlInsert
{
- XLogwrtResult LogwrtResult; /* a recent value of LogwrtResult */
- XLogRecPtr PrevRecord; /* start of previously-inserted record */
- uint16 curridx; /* current block index in cache */
- XLogPageHeader currpage; /* points to header of block in cache */
- char *currpos; /* current insertion point in cache */
- XLogRecPtr RedoRecPtr; /* current redo point for insertions */
+ XLogwrtResult LogwrtResult; /* a recent value of LogwrtResult */
+ XLogRecPtr PrevRecord; /* start of previously-inserted record */
+ uint16 curridx; /* current block index in cache */
+ XLogPageHeader currpage; /* points to header of block in cache */
+ char *currpos; /* current insertion point in cache */
+ XLogRecPtr RedoRecPtr; /* current redo point for insertions */
} XLogCtlInsert;
/*
@@ -213,8 +216,8 @@ typedef struct XLogCtlInsert
*/
typedef struct XLogCtlWrite
{
- XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
- uint16 curridx; /* cache index of next block to write */
+ XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
+ uint16 curridx; /* cache index of next block to write */
} XLogCtlWrite;
/*
@@ -223,30 +226,31 @@ typedef struct XLogCtlWrite
typedef struct XLogCtlData
{
/* Protected by insert_lck: */
- XLogCtlInsert Insert;
+ XLogCtlInsert Insert;
/* Protected by info_lck: */
- XLogwrtRqst LogwrtRqst;
- XLogwrtResult LogwrtResult;
+ XLogwrtRqst LogwrtRqst;
+ XLogwrtResult LogwrtResult;
/* Protected by logwrt_lck: */
- XLogCtlWrite Write;
+ XLogCtlWrite Write;
+
/*
* These values do not change after startup, although the pointed-to
- * pages and xlblocks values certainly do. Permission to read/write
+ * pages and xlblocks values certainly do. Permission to read/write
* the pages and xlblocks values depends on insert_lck and logwrt_lck.
*/
- char *pages; /* buffers for unwritten XLOG pages */
- XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
- uint32 XLogCacheByte; /* # bytes in xlog buffers */
- uint32 XLogCacheBlck; /* highest allocated xlog buffer index */
- StartUpID ThisStartUpID;
+ char *pages; /* buffers for unwritten XLOG pages */
+ XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
+ uint32 XLogCacheByte; /* # bytes in xlog buffers */
+ uint32 XLogCacheBlck; /* highest allocated xlog buffer index */
+ StartUpID ThisStartUpID;
/* This value is not protected by *any* spinlock... */
- XLogRecPtr RedoRecPtr; /* see SetRedoRecPtr/GetRedoRecPtr */
+ XLogRecPtr RedoRecPtr; /* see SetRedoRecPtr/GetRedoRecPtr */
- slock_t insert_lck; /* XLogInsert lock */
- slock_t info_lck; /* locks shared LogwrtRqst/LogwrtResult */
- slock_t logwrt_lck; /* XLogWrite/XLogFlush lock */
- slock_t chkp_lck; /* checkpoint lock */
+ slock_t insert_lck; /* XLogInsert lock */
+ slock_t info_lck; /* locks shared LogwrtRqst/LogwrtResult */
+ slock_t logwrt_lck; /* XLogWrite/XLogFlush lock */
+ slock_t chkp_lck; /* checkpoint lock */
} XLogCtlData;
static XLogCtlData *XLogCtl = NULL;
@@ -271,7 +275,7 @@ static ControlFileData *ControlFile = NULL;
( \
(recptr).xlogid = XLogCtl->xlblocks[curridx].xlogid, \
(recptr).xrecoff = \
- XLogCtl->xlblocks[curridx].xrecoff - INSERT_FREESPACE(Insert) \
+ XLogCtl->xlblocks[curridx].xrecoff - INSERT_FREESPACE(Insert) \
)
@@ -303,7 +307,7 @@ static ControlFileData *ControlFile = NULL;
* Compute ID and segment from an XLogRecPtr.
*
* For XLByteToSeg, do the computation at face value. For XLByteToPrevSeg,
- * a boundary byte is taken to be in the previous segment. This is suitable
+ * a boundary byte is taken to be in the previous segment. This is suitable
* for deciding which segment to write given a pointer to a record end,
* for example.
*/
@@ -354,8 +358,8 @@ static ControlFileData *ControlFile = NULL;
/* File path names */
-static char XLogDir[MAXPGPATH];
-static char ControlFilePath[MAXPGPATH];
+static char XLogDir[MAXPGPATH];
+static char ControlFilePath[MAXPGPATH];
/*
* Private, possibly out-of-date copy of shared LogwrtResult.
@@ -384,8 +388,10 @@ static int readFile = -1;
static uint32 readId = 0;
static uint32 readSeg = 0;
static uint32 readOff = 0;
+
/* Buffer for currently read page (BLCKSZ bytes) */
static char *readBuf = NULL;
+
/* State information for XLOG reading */
static XLogRecPtr ReadRecPtr;
static XLogRecPtr EndRecPtr;
@@ -397,16 +403,16 @@ static bool InRedo = false;
static bool AdvanceXLInsertBuffer(void);
static void XLogWrite(XLogwrtRqst WriteRqst);
-static int XLogFileInit(uint32 log, uint32 seg,
- bool *use_existent, bool use_lock);
+static int XLogFileInit(uint32 log, uint32 seg,
+ bool *use_existent, bool use_lock);
static int XLogFileOpen(uint32 log, uint32 seg, bool econt);
static void PreallocXlogFiles(XLogRecPtr endptr);
static void MoveOfflineLogs(uint32 log, uint32 seg);
static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer);
static bool ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI);
static XLogRecord *ReadCheckpointRecord(XLogRecPtr RecPtr,
- const char *whichChkpt,
- char *buffer);
+ const char *whichChkpt,
+ char *buffer);
static void WriteControlFile(void);
static void ReadControlFile(void);
static char *str_time(time_t tnow);
@@ -432,44 +438,44 @@ static void issue_xlog_fsync(void);
XLogRecPtr
XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
{
- XLogCtlInsert *Insert = &XLogCtl->Insert;
- XLogRecord *record;
+ XLogCtlInsert *Insert = &XLogCtl->Insert;
+ XLogRecord *record;
XLogContRecord *contrecord;
- XLogRecPtr RecPtr;
- XLogRecPtr WriteRqst;
- uint32 freespace;
- uint16 curridx;
- XLogRecData *rdt;
- Buffer dtbuf[XLR_MAX_BKP_BLOCKS];
- bool dtbuf_bkp[XLR_MAX_BKP_BLOCKS];
- BkpBlock dtbuf_xlg[XLR_MAX_BKP_BLOCKS];
- XLogRecPtr dtbuf_lsn[XLR_MAX_BKP_BLOCKS];
- XLogRecData dtbuf_rdt[2 * XLR_MAX_BKP_BLOCKS];
- crc64 rdata_crc;
- uint32 len,
- write_len;
- unsigned i;
- bool do_logwrt;
- bool updrqst;
- bool no_tran = (rmid == RM_XLOG_ID) ? true : false;
+ XLogRecPtr RecPtr;
+ XLogRecPtr WriteRqst;
+ uint32 freespace;
+ uint16 curridx;
+ XLogRecData *rdt;
+ Buffer dtbuf[XLR_MAX_BKP_BLOCKS];
+ bool dtbuf_bkp[XLR_MAX_BKP_BLOCKS];
+ BkpBlock dtbuf_xlg[XLR_MAX_BKP_BLOCKS];
+ XLogRecPtr dtbuf_lsn[XLR_MAX_BKP_BLOCKS];
+ XLogRecData dtbuf_rdt[2 * XLR_MAX_BKP_BLOCKS];
+ crc64 rdata_crc;
+ uint32 len,
+ write_len;
+ unsigned i;
+ bool do_logwrt;
+ bool updrqst;
+ bool no_tran = (rmid == RM_XLOG_ID) ? true : false;
if (info & XLR_INFO_MASK)
{
if ((info & XLR_INFO_MASK) != XLOG_NO_TRAN)
- elog(STOP, "XLogInsert: invalid info mask %02X",
+ elog(STOP, "XLogInsert: invalid info mask %02X",
(info & XLR_INFO_MASK));
no_tran = true;
info &= ~XLR_INFO_MASK;
}
/*
- * In bootstrap mode, we don't actually log anything but XLOG resources;
- * return a phony record pointer.
+ * In bootstrap mode, we don't actually log anything but XLOG
+ * resources; return a phony record pointer.
*/
if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
{
RecPtr.xlogid = 0;
- RecPtr.xrecoff = SizeOfXLogPHD; /* start of 1st checkpoint record */
+ RecPtr.xrecoff = SizeOfXLogPHD; /* start of 1st checkpoint record */
return (RecPtr);
}
@@ -479,16 +485,17 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* header isn't added into the CRC yet since we don't know the final
* length or info bits quite yet.
*
- * We may have to loop back to here if a race condition is detected below.
- * We could prevent the race by doing all this work while holding the
- * insert spinlock, but it seems better to avoid doing CRC calculations
- * while holding the lock. This means we have to be careful about
- * modifying the rdata list until we know we aren't going to loop back
- * again. The only change we allow ourselves to make earlier is to set
- * rdt->data = NULL in list items we have decided we will have to back
- * up the whole buffer for. This is OK because we will certainly decide
- * the same thing again for those items if we do it over; doing it here
- * saves an extra pass over the list later.
+ * We may have to loop back to here if a race condition is detected
+ * below. We could prevent the race by doing all this work while
+ * holding the insert spinlock, but it seems better to avoid doing CRC
+ * calculations while holding the lock. This means we have to be
+ * careful about modifying the rdata list until we know we aren't
+ * going to loop back again. The only change we allow ourselves to
+ * make earlier is to set rdt->data = NULL in list items we have
+ * decided we will have to back up the whole buffer for. This is OK
+ * because we will certainly decide the same thing again for those
+ * items if we do it over; doing it here saves an extra pass over the
+ * list later.
*/
begin:;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -499,7 +506,7 @@ begin:;
INIT_CRC64(rdata_crc);
len = 0;
- for (rdt = rdata; ; )
+ for (rdt = rdata;;)
{
if (rdt->buffer == InvalidBuffer)
{
@@ -528,13 +535,14 @@ begin:;
{
/* OK, put it in this slot */
dtbuf[i] = rdt->buffer;
+
/*
* XXX We assume page LSN is first data on page
*/
- dtbuf_lsn[i] = *((XLogRecPtr*)BufferGetBlock(rdt->buffer));
+ dtbuf_lsn[i] = *((XLogRecPtr *) BufferGetBlock(rdt->buffer));
if (XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
- crc64 dtcrc;
+ crc64 dtcrc;
dtbuf_bkp[i] = true;
rdt->data = NULL;
@@ -545,7 +553,7 @@ begin:;
dtbuf_xlg[i].node = BufferGetFileNode(dtbuf[i]);
dtbuf_xlg[i].block = BufferGetBlockNumber(dtbuf[i]);
COMP_CRC64(dtcrc,
- (char*) &(dtbuf_xlg[i]) + sizeof(crc64),
+ (char *) &(dtbuf_xlg[i]) + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(dtcrc);
dtbuf_xlg[i].crc = dtcrc;
@@ -571,7 +579,7 @@ begin:;
/*
* NOTE: the test for len == 0 here is somewhat fishy, since in theory
* all of the rmgr data might have been suppressed in favor of backup
- * blocks. Currently, all callers of XLogInsert provide at least some
+ * blocks. Currently, all callers of XLogInsert provide at least some
* not-in-a-buffer data and so len == 0 should never happen, but that
* may not be true forever. If you need to remove the len == 0 check,
* also remove the check for xl_len == 0 in ReadRecord, below.
@@ -589,16 +597,16 @@ begin:;
/* try to update LogwrtResult while waiting for insert lock */
if (!TAS(&(XLogCtl->info_lck)))
{
- XLogwrtRqst LogwrtRqst;
+ XLogwrtRqst LogwrtRqst;
LogwrtRqst = XLogCtl->LogwrtRqst;
LogwrtResult = XLogCtl->LogwrtResult;
S_UNLOCK(&(XLogCtl->info_lck));
/*
- * If cache is half filled then try to acquire logwrt lock
- * and do LOGWRT work, but only once per XLogInsert call.
- * Ignore any fractional blocks in performing this check.
+ * If cache is half filled then try to acquire logwrt lock and
+ * do LOGWRT work, but only once per XLogInsert call. Ignore
+ * any fractional blocks in performing this check.
*/
LogwrtRqst.Write.xrecoff -= LogwrtRqst.Write.xrecoff % BLCKSZ;
if (do_logwrt &&
@@ -625,8 +633,9 @@ begin:;
/*
* Check to see if my RedoRecPtr is out of date. If so, may have to
- * go back and recompute everything. This can only happen just after a
- * checkpoint, so it's better to be slow in this case and fast otherwise.
+ * go back and recompute everything. This can only happen just after
+ * a checkpoint, so it's better to be slow in this case and fast
+ * otherwise.
*/
if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
{
@@ -640,9 +649,10 @@ begin:;
if (dtbuf_bkp[i] == false &&
XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
+
/*
- * Oops, this buffer now needs to be backed up, but we didn't
- * think so above. Start over.
+ * Oops, this buffer now needs to be backed up, but we
+ * didn't think so above. Start over.
*/
S_UNLOCK(&(XLogCtl->insert_lck));
END_CRIT_SECTION();
@@ -658,8 +668,9 @@ begin:;
* this loop, write_len includes the backup block data.
*
* Also set the appropriate info bits to show which buffers were backed
- * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
- * buffer value (ignoring InvalidBuffer) appearing in the rdata list.
+ * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th
+ * distinct buffer value (ignoring InvalidBuffer) appearing in the
+ * rdata list.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -671,13 +682,13 @@ begin:;
rdt->next = &(dtbuf_rdt[2 * i]);
- dtbuf_rdt[2 * i].data = (char*) &(dtbuf_xlg[i]);
+ dtbuf_rdt[2 * i].data = (char *) &(dtbuf_xlg[i]);
dtbuf_rdt[2 * i].len = sizeof(BkpBlock);
write_len += sizeof(BkpBlock);
rdt = dtbuf_rdt[2 * i].next = &(dtbuf_rdt[2 * i + 1]);
- dtbuf_rdt[2 * i + 1].data = (char*) BufferGetBlock(dtbuf[i]);
+ dtbuf_rdt[2 * i + 1].data = (char *) BufferGetBlock(dtbuf[i]);
dtbuf_rdt[2 * i + 1].len = BLCKSZ;
write_len += BLCKSZ;
dtbuf_rdt[2 * i + 1].next = NULL;
@@ -711,7 +722,7 @@ begin:;
record->xl_rmid = rmid;
/* Now we can finish computing the main CRC */
- COMP_CRC64(rdata_crc, (char*) record + sizeof(crc64),
+ COMP_CRC64(rdata_crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(rdata_crc);
record->xl_crc = rdata_crc;
@@ -729,7 +740,7 @@ begin:;
if (XLOG_DEBUG)
{
- char buf[8192];
+ char buf[8192];
sprintf(buf, "INSERT @ %u/%u: ", RecPtr.xlogid, RecPtr.xrecoff);
xlog_outrec(buf, record);
@@ -791,18 +802,19 @@ begin:;
/* Ensure next record will be properly aligned */
Insert->currpos = (char *) Insert->currpage +
- MAXALIGN(Insert->currpos - (char *) Insert->currpage);
+ MAXALIGN(Insert->currpos - (char *) Insert->currpage);
freespace = INSERT_FREESPACE(Insert);
/*
- * The recptr I return is the beginning of the *next* record.
- * This will be stored as LSN for changed data pages...
+ * The recptr I return is the beginning of the *next* record. This
+ * will be stored as LSN for changed data pages...
*/
INSERT_RECPTR(RecPtr, Insert, curridx);
/* Need to update shared LogwrtRqst if some block was filled up */
if (freespace < SizeOfXLogRecord)
- updrqst = true; /* curridx is filled and available for writing out */
+ updrqst = true; /* curridx is filled and available for
+ * writing out */
else
curridx = PrevBufIdx(curridx);
WriteRqst = XLogCtl->xlblocks[curridx];
@@ -850,9 +862,9 @@ AdvanceXLInsertBuffer(void)
LogwrtResult = Insert->LogwrtResult;
/*
- * Get ending-offset of the buffer page we need to replace (this may be
- * zero if the buffer hasn't been used yet). Fall through if it's already
- * written out.
+ * Get ending-offset of the buffer page we need to replace (this may
+ * be zero if the buffer hasn't been used yet). Fall through if it's
+ * already written out.
*/
OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
@@ -870,7 +882,7 @@ AdvanceXLInsertBuffer(void)
{
if (XLByteLT(XLogCtl->LogwrtRqst.Write, FinishedPageRqstPtr))
XLogCtl->LogwrtRqst.Write = FinishedPageRqstPtr;
- update_needed = false; /* Did the shared-request update */
+ update_needed = false; /* Did the shared-request update */
LogwrtResult = XLogCtl->LogwrtResult;
S_UNLOCK(&(XLogCtl->info_lck));
@@ -883,8 +895,8 @@ AdvanceXLInsertBuffer(void)
}
/*
- * LogwrtResult lock is busy or we know the page is still dirty.
- * Try to acquire logwrt lock and write full blocks.
+ * LogwrtResult lock is busy or we know the page is still
+ * dirty. Try to acquire logwrt lock and write full blocks.
*/
if (!TAS(&(XLogCtl->logwrt_lck)))
{
@@ -896,9 +908,10 @@ AdvanceXLInsertBuffer(void)
Insert->LogwrtResult = LogwrtResult;
break;
}
+
/*
- * Have to write buffers while holding insert lock.
- * This is not good, so only write as much as we absolutely
+ * Have to write buffers while holding insert lock. This
+ * is not good, so only write as much as we absolutely
* must.
*/
WriteRqst.Write = OldPageRqstPtr;
@@ -933,14 +946,15 @@ AdvanceXLInsertBuffer(void)
}
Insert->curridx = nextidx;
Insert->currpage = (XLogPageHeader) (XLogCtl->pages + nextidx * BLCKSZ);
- Insert->currpos = ((char*) Insert->currpage) + SizeOfXLogPHD;
+ Insert->currpos = ((char *) Insert->currpage) + SizeOfXLogPHD;
+
/*
- * Be sure to re-zero the buffer so that bytes beyond what we've written
- * will look like zeroes and not valid XLOG records...
+ * Be sure to re-zero the buffer so that bytes beyond what we've
+ * written will look like zeroes and not valid XLOG records...
*/
- MemSet((char*) Insert->currpage, 0, BLCKSZ);
+ MemSet((char *) Insert->currpage, 0, BLCKSZ);
Insert->currpage->xlp_magic = XLOG_PAGE_MAGIC;
- /* Insert->currpage->xlp_info = 0; */ /* done by memset */
+ /* Insert->currpage->xlp_info = 0; *//* done by memset */
Insert->currpage->xlp_sui = ThisStartUpID;
return update_needed;
@@ -959,11 +973,15 @@ XLogWrite(XLogwrtRqst WriteRqst)
bool ispartialpage;
bool use_existent;
- /* Update local LogwrtResult (caller probably did this already, but...) */
+ /*
+ * Update local LogwrtResult (caller probably did this already,
+ * but...)
+ */
LogwrtResult = Write->LogwrtResult;
while (XLByteLT(LogwrtResult.Write, WriteRqst.Write))
{
+
/*
* Make sure we're not ahead of the insert process. This could
* happen if we're passed a bogus WriteRqst.Write that is past the
@@ -979,6 +997,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
+
/*
* Switch to new logfile segment.
*/
@@ -1011,11 +1030,12 @@ XLogWrite(XLogwrtRqst WriteRqst)
ControlFile->logSeg = openLogSeg + 1;
ControlFile->time = time(NULL);
UpdateControlFile();
+
/*
- * Signal postmaster to start a checkpoint if it's been too
- * long since the last one. (We look at local copy of
- * RedoRecPtr which might be a little out of date, but should
- * be close enough for this purpose.)
+ * Signal postmaster to start a checkpoint if it's been
+ * too long since the last one. (We look at local copy of
+ * RedoRecPtr which might be a little out of date, but
+ * should be close enough for this purpose.)
*/
if (IsUnderPostmaster &&
(openLogId != RedoRecPtr.xlogid ||
@@ -1056,14 +1076,14 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* If we just wrote the whole last page of a logfile segment,
* fsync the segment immediately. This avoids having to go back
- * and re-open prior segments when an fsync request comes along later.
- * Doing it here ensures that one and only one backend will perform
- * this fsync.
+ * and re-open prior segments when an fsync request comes along
+ * later. Doing it here ensures that one and only one backend will
+ * perform this fsync.
*/
if (openLogOff >= XLogSegSize && !ispartialpage)
{
issue_xlog_fsync();
- LogwrtResult.Flush = LogwrtResult.Write; /* end of current page */
+ LogwrtResult.Flush = LogwrtResult.Write; /* end of current page */
}
if (ispartialpage)
@@ -1081,15 +1101,16 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) &&
XLByteLT(LogwrtResult.Flush, LogwrtResult.Write))
{
+
/*
- * Could get here without iterating above loop, in which case
- * we might have no open file or the wrong one. However, we do
- * not need to fsync more than one file.
+ * Could get here without iterating above loop, in which case we
+ * might have no open file or the wrong one. However, we do not
+ * need to fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN)
{
if (openLogFile >= 0 &&
- !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
+ !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
if (close(openLogFile) != 0)
elog(STOP, "close(logfile %u seg %u) failed: %m",
@@ -1110,8 +1131,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* Update shared-memory status
*
- * We make sure that the shared 'request' values do not fall behind
- * the 'result' values. This is not absolutely essential, but it saves
+ * We make sure that the shared 'request' values do not fall behind the
+ * 'result' values. This is not absolutely essential, but it saves
* some code in a couple of places.
*/
S_LOCK(&(XLogCtl->info_lck));
@@ -1163,8 +1184,9 @@ XLogFlush(XLogRecPtr record)
* Since fsync is usually a horribly expensive operation, we try to
* piggyback as much data as we can on each fsync: if we see any more
* data entered into the xlog buffer, we'll write and fsync that too,
- * so that the final value of LogwrtResult.Flush is as large as possible.
- * This gives us some chance of avoiding another fsync immediately after.
+ * so that the final value of LogwrtResult.Flush is as large as
+ * possible. This gives us some chance of avoiding another fsync
+ * immediately after.
*/
/* initialize to given target; may increase below */
@@ -1192,9 +1214,7 @@ XLogFlush(XLogRecPtr record)
uint32 freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord) /* buffer is full */
- {
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
- }
else
{
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
@@ -1232,7 +1252,7 @@ XLogFlush(XLogRecPtr record)
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
- * pre-existing file will be deleted). On return, TRUE if a pre-existing
+ * pre-existing file will be deleted). On return, TRUE if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock spinlock while moving file into
@@ -1257,7 +1277,8 @@ XLogFileInit(uint32 log, uint32 seg,
XLogFileName(path, log, seg);
/*
- * Try to use existent file (checkpoint maker may have created it already)
+ * Try to use existent file (checkpoint maker may have created it
+ * already)
*/
if (*use_existent)
{
@@ -1270,14 +1291,14 @@ XLogFileInit(uint32 log, uint32 seg,
log, seg);
}
else
- return(fd);
+ return (fd);
}
/*
- * Initialize an empty (all zeroes) segment. NOTE: it is possible that
- * another process is doing the same thing. If so, we will end up
- * pre-creating an extra log segment. That seems OK, and better than
- * holding the spinlock throughout this lengthy process.
+ * Initialize an empty (all zeroes) segment. NOTE: it is possible
+ * that another process is doing the same thing. If so, we will end
+ * up pre-creating an extra log segment. That seems OK, and better
+ * than holding the spinlock throughout this lengthy process.
*/
snprintf(tmppath, MAXPGPATH, "%s%cxlogtemp.%d",
XLogDir, SEP_CHAR, (int) getpid());
@@ -1291,10 +1312,10 @@ XLogFileInit(uint32 log, uint32 seg,
elog(STOP, "InitCreate(%s) failed: %m", tmppath);
/*
- * Zero-fill the file. We have to do this the hard way to ensure that
+ * Zero-fill the file. We have to do this the hard way to ensure that
* all the file space has really been allocated --- on platforms that
* allow "holes" in files, just seeking to the end doesn't allocate
- * intermediate space. This way, we know that we have all the space
+ * intermediate space. This way, we know that we have all the space
* and (after the fsync below) that all the indirect blocks are down
* on disk. Therefore, fdatasync(2) or O_DSYNC will be sufficient to
* sync future writes to the log file.
@@ -1304,9 +1325,12 @@ XLogFileInit(uint32 log, uint32 seg,
{
if ((int) write(fd, zbuffer, sizeof(zbuffer)) != (int) sizeof(zbuffer))
{
- int save_errno = errno;
+ int save_errno = errno;
- /* If we fail to make the file, delete it to release disk space */
+ /*
+ * If we fail to make the file, delete it to release disk
+ * space
+ */
unlink(tmppath);
errno = save_errno;
@@ -1336,10 +1360,8 @@ XLogFileInit(uint32 log, uint32 seg,
targseg = seg;
strcpy(targpath, path);
- if (! *use_existent)
- {
+ if (!*use_existent)
unlink(targpath);
- }
else
{
while ((fd = BasicOpenFile(targpath, O_RDWR | PG_BINARY,
@@ -1451,10 +1473,10 @@ PreallocXlogFiles(XLogRecPtr endptr)
static void
MoveOfflineLogs(uint32 log, uint32 seg)
{
- DIR *xldir;
- struct dirent *xlde;
- char lastoff[32];
- char path[MAXPGPATH];
+ DIR *xldir;
+ struct dirent *xlde;
+ char lastoff[32];
+ char path[MAXPGPATH];
Assert(XLOG_archive_dir[0] == 0); /* ! implemented yet */
@@ -1471,9 +1493,9 @@ MoveOfflineLogs(uint32 log, uint32 seg)
strspn(xlde->d_name, "0123456789ABCDEF") == 16 &&
strcmp(xlde->d_name, lastoff) <= 0)
{
- elog(LOG, "MoveOfflineLogs: %s %s", (XLOG_archive_dir[0]) ?
+ elog(LOG, "MoveOfflineLogs: %s %s", (XLOG_archive_dir[0]) ?
"archive" : "remove", xlde->d_name);
- sprintf(path, "%s%c%s", XLogDir, SEP_CHAR, xlde->d_name);
+ sprintf(path, "%s%c%s", XLogDir, SEP_CHAR, xlde->d_name);
if (XLOG_archive_dir[0] == 0)
unlink(path);
}
@@ -1499,13 +1521,13 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
char *blk;
int i;
- blk = (char*)XLogRecGetData(record) + record->xl_len;
+ blk = (char *) XLogRecGetData(record) + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
continue;
- memcpy((char*)&bkpb, blk, sizeof(BkpBlock));
+ memcpy((char *) &bkpb, blk, sizeof(BkpBlock));
blk += sizeof(BkpBlock);
reln = XLogOpenRelation(true, record->xl_rmid, bkpb.node);
@@ -1516,7 +1538,7 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
if (BufferIsValid(buffer))
{
page = (Page) BufferGetPage(buffer);
- memcpy((char*)page, blk, BLCKSZ);
+ memcpy((char *) page, blk, BLCKSZ);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
@@ -1546,7 +1568,7 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
/* Check CRC of rmgr data and record header */
INIT_CRC64(crc);
COMP_CRC64(crc, XLogRecGetData(record), len);
- COMP_CRC64(crc, (char*) record + sizeof(crc64),
+ COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc);
@@ -1554,11 +1576,11 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
{
elog(emode, "ReadRecord: bad rmgr data CRC in record at %u/%u",
recptr.xlogid, recptr.xrecoff);
- return(false);
+ return (false);
}
/* Check CRCs of backup blocks, if any */
- blk = (char*)XLogRecGetData(record) + len;
+ blk = (char *) XLogRecGetData(record) + len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
@@ -1569,18 +1591,19 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
COMP_CRC64(crc, blk + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(crc);
- memcpy((char*)&cbuf, blk, sizeof(crc64)); /* don't assume alignment */
+ memcpy((char *) &cbuf, blk, sizeof(crc64)); /* don't assume
+ * alignment */
if (!EQ_CRC64(cbuf, crc))
{
elog(emode, "ReadRecord: bad bkp block %d CRC in record at %u/%u",
i + 1, recptr.xlogid, recptr.xrecoff);
- return(false);
+ return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
}
- return(true);
+ return (true);
}
/*
@@ -1609,13 +1632,14 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
if (readBuf == NULL)
{
+
/*
* First time through, permanently allocate readBuf. We do it
* this way, rather than just making a static array, for two
- * reasons: (1) no need to waste the storage in most instantiations
- * of the backend; (2) a static char array isn't guaranteed to
- * have any particular alignment, whereas malloc() will provide
- * MAXALIGN'd storage.
+ * reasons: (1) no need to waste the storage in most
+ * instantiations of the backend; (2) a static char array isn't
+ * guaranteed to have any particular alignment, whereas malloc()
+ * will provide MAXALIGN'd storage.
*/
readBuf = (char *) malloc(BLCKSZ);
Assert(readBuf != NULL);
@@ -1656,7 +1680,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
readFile = XLogFileOpen(readId, readSeg, (emode == LOG));
if (readFile < 0)
goto next_record_is_invalid;
- readOff = (uint32) (-1); /* force read to occur below */
+ readOff = (uint32) (-1);/* force read to occur below */
}
targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ;
@@ -1688,9 +1712,10 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
got_record:;
+
/*
- * Currently, xl_len == 0 must be bad data, but that might not be
- * true forever. See note in XLogInsert.
+ * Currently, xl_len == 0 must be bad data, but that might not be true
+ * forever. See note in XLogInsert.
*/
if (record->xl_len == 0)
{
@@ -1698,8 +1723,10 @@ got_record:;
RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
+
/*
- * Compute total length of record including any appended backup blocks.
+ * Compute total length of record including any appended backup
+ * blocks.
*/
total_len = SizeOfXLogRecord + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -1708,6 +1735,7 @@ got_record:;
continue;
total_len += sizeof(BkpBlock) + BLCKSZ;
}
+
/*
* Make sure it will fit in buffer (currently, it is mechanically
* impossible for this test to fail, but it seems like a good idea
@@ -1731,7 +1759,7 @@ got_record:;
{
/* Need to reassemble record */
XLogContRecord *contrecord;
- uint32 gotlen = len;
+ uint32 gotlen = len;
memcpy(buffer, record, len);
record = (XLogRecord *) buffer;
@@ -1764,7 +1792,7 @@ got_record:;
goto next_record_is_invalid;
}
contrecord = (XLogContRecord *) ((char *) readBuf + SizeOfXLogPHD);
- if (contrecord->xl_rem_len == 0 ||
+ if (contrecord->xl_rem_len == 0 ||
total_len != (contrecord->xl_rem_len + gotlen))
{
elog(emode, "ReadRecord: invalid cont-record len %u in logfile %u seg %u off %u",
@@ -1774,7 +1802,7 @@ got_record:;
len = BLCKSZ - SizeOfXLogPHD - SizeOfXLogContRecord;
if (contrecord->xl_rem_len > len)
{
- memcpy(buffer, (char *)contrecord + SizeOfXLogContRecord, len);
+ memcpy(buffer, (char *) contrecord + SizeOfXLogContRecord, len);
gotlen += len;
buffer += len;
continue;
@@ -1788,12 +1816,12 @@ got_record:;
if (BLCKSZ - SizeOfXLogRecord >= SizeOfXLogPHD +
SizeOfXLogContRecord + MAXALIGN(contrecord->xl_rem_len))
{
- nextRecord = (XLogRecord *) ((char *) contrecord +
+ nextRecord = (XLogRecord *) ((char *) contrecord +
SizeOfXLogContRecord + MAXALIGN(contrecord->xl_rem_len));
}
EndRecPtr.xlogid = readId;
EndRecPtr.xrecoff = readSeg * XLogSegSize + readOff +
- SizeOfXLogPHD + SizeOfXLogContRecord +
+ SizeOfXLogPHD + SizeOfXLogContRecord +
MAXALIGN(contrecord->xl_rem_len);
ReadRecPtr = *RecPtr;
return record;
@@ -1822,7 +1850,7 @@ next_record_is_invalid:;
* Check whether the xlog header of a page just read in looks valid.
*
* This is just a convenience subroutine to avoid duplicated code in
- * ReadRecord. It's not intended for use from anywhere else.
+ * ReadRecord. It's not intended for use from anywhere else.
*/
static bool
ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
@@ -1839,14 +1867,16 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_info, readId, readSeg, readOff);
return false;
}
+
/*
- * We disbelieve a SUI less than the previous page's SUI, or more
- * than a few counts greater. In theory as many as 512 shutdown
- * checkpoint records could appear on a 32K-sized xlog page, so
- * that's the most differential there could legitimately be.
+ * We disbelieve a SUI less than the previous page's SUI, or more than
+ * a few counts greater. In theory as many as 512 shutdown checkpoint
+ * records could appear on a 32K-sized xlog page, so that's the most
+ * differential there could legitimately be.
*
* Note this check can only be applied when we are reading the next page
- * in sequence, so ReadRecord passes a flag indicating whether to check.
+ * in sequence, so ReadRecord passes a flag indicating whether to
+ * check.
*/
if (checkSUI)
{
@@ -1866,7 +1896,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
* I/O routines for pg_control
*
* *ControlFile is a buffer in shared memory that holds an image of the
- * contents of pg_control. WriteControlFile() initializes pg_control
+ * contents of pg_control. WriteControlFile() initializes pg_control
* given a preloaded buffer, ReadControlFile() loads the buffer from
* the pg_control file (during postmaster or standalone-backend startup),
* and UpdateControlFile() rewrites pg_control after we modify xlog state.
@@ -1890,9 +1920,11 @@ static void
WriteControlFile(void)
{
int fd;
- char buffer[BLCKSZ]; /* need not be aligned */
+ char buffer[BLCKSZ]; /* need not be aligned */
+
#ifdef USE_LOCALE
char *localeptr;
+
#endif
/*
@@ -1911,16 +1943,17 @@ WriteControlFile(void)
if (!localeptr)
elog(STOP, "Invalid LC_CTYPE setting");
StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
+
/*
* Issue warning notice if initdb'ing in a locale that will not permit
- * LIKE index optimization. This is not a clean place to do it, but
- * I don't see a better place either...
+ * LIKE index optimization. This is not a clean place to do it, but I
+ * don't see a better place either...
*/
if (!locale_is_like_safe())
elog(NOTICE, "Initializing database with %s collation order."
"\n\tThis locale setting will prevent use of index optimization for"
"\n\tLIKE and regexp searches. If you are concerned about speed of"
- "\n\tsuch queries, you may wish to set LC_COLLATE to \"C\" and"
+ "\n\tsuch queries, you may wish to set LC_COLLATE to \"C\" and"
"\n\tre-initdb. For more information see the Administrator's Guide.",
ControlFile->lc_collate);
#else
@@ -1930,17 +1963,17 @@ WriteControlFile(void)
/* Contents are protected with a CRC */
INIT_CRC64(ControlFile->crc);
- COMP_CRC64(ControlFile->crc,
- (char*) ControlFile + sizeof(crc64),
+ COMP_CRC64(ControlFile->crc,
+ (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc);
/*
- * We write out BLCKSZ bytes into pg_control, zero-padding the
- * excess over sizeof(ControlFileData). This reduces the odds
- * of premature-EOF errors when reading pg_control. We'll still
- * fail when we check the contents of the file, but hopefully with
- * a more specific error than "couldn't read pg_control".
+ * We write out BLCKSZ bytes into pg_control, zero-padding the excess
+ * over sizeof(ControlFileData). This reduces the odds of
+ * premature-EOF errors when reading pg_control. We'll still fail
+ * when we check the contents of the file, but hopefully with a more
+ * specific error than "couldn't read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
elog(STOP, "sizeof(ControlFileData) is too large ... fix xlog.c");
@@ -1993,8 +2026,8 @@ ReadControlFile(void)
/* Now check the CRC. */
INIT_CRC64(crc);
- COMP_CRC64(crc,
- (char*) ControlFile + sizeof(crc64),
+ COMP_CRC64(crc,
+ (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(crc);
@@ -2002,14 +2035,15 @@ ReadControlFile(void)
elog(STOP, "Invalid CRC in control file");
/*
- * Do compatibility checking immediately. We do this here for 2 reasons:
+ * Do compatibility checking immediately. We do this here for 2
+ * reasons:
*
- * (1) if the database isn't compatible with the backend executable,
- * we want to abort before we can possibly do any damage;
+ * (1) if the database isn't compatible with the backend executable, we
+ * want to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file
- * for themselves. (These locale settings are considered critical
+ * for themselves. (These locale settings are considered critical
* compatibility items because they can affect sort order of indexes.)
*/
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
@@ -2042,8 +2076,8 @@ UpdateControlFile(void)
int fd;
INIT_CRC64(ControlFile->crc);
- COMP_CRC64(ControlFile->crc,
- (char*) ControlFile + sizeof(crc64),
+ COMP_CRC64(ControlFile->crc,
+ (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc);
@@ -2096,6 +2130,7 @@ XLOGShmemInit(void)
Assert(!found);
memset(XLogCtl, 0, sizeof(XLogCtlData));
+
/*
* Since XLogCtlData contains XLogRecPtr fields, its sizeof should be
* a multiple of the alignment for same, so no extra alignment padding
@@ -2104,9 +2139,10 @@ XLOGShmemInit(void)
XLogCtl->xlblocks = (XLogRecPtr *)
(((char *) XLogCtl) + sizeof(XLogCtlData));
memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers);
+
/*
- * Here, on the other hand, we must MAXALIGN to ensure the page buffers
- * have worst-case alignment.
+ * Here, on the other hand, we must MAXALIGN to ensure the page
+ * buffers have worst-case alignment.
*/
XLogCtl->pages =
((char *) XLogCtl) + MAXALIGN(sizeof(XLogCtlData) +
@@ -2114,8 +2150,8 @@ XLOGShmemInit(void)
memset(XLogCtl->pages, 0, BLCKSZ * XLOGbuffers);
/*
- * Do basic initialization of XLogCtl shared data.
- * (StartupXLOG will fill in additional info.)
+ * Do basic initialization of XLogCtl shared data. (StartupXLOG will
+ * fill in additional info.)
*/
XLogCtl->XLogCacheByte = BLCKSZ * XLOGbuffers;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
@@ -2145,7 +2181,7 @@ BootStrapXLOG(void)
char *buffer;
XLogPageHeader page;
XLogRecord *record;
- bool use_existent;
+ bool use_existent;
crc64 crc;
/* Use malloc() to ensure buffer is MAXALIGNED */
@@ -2180,7 +2216,7 @@ BootStrapXLOG(void)
INIT_CRC64(crc);
COMP_CRC64(crc, &checkPoint, sizeof(checkPoint));
- COMP_CRC64(crc, (char*) record + sizeof(crc64),
+ COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc);
record->xl_crc = crc;
@@ -2246,8 +2282,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
- * Note: in most control paths, *ControlFile is already valid and we
- * need not do ReadControlFile() here, but might as well do it to be sure.
+ * Note: in most control paths, *ControlFile is already valid and we need
+ * not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@@ -2297,9 +2333,7 @@ StartupXLOG(void)
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
- {
elog(STOP, "Unable to locate a valid CheckPoint record");
- }
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
@@ -2320,7 +2354,7 @@ StartupXLOG(void)
ShmemVariableCache->oidCount = 0;
ThisStartUpID = checkPoint.ThisStartUpID;
- RedoRecPtr = XLogCtl->Insert.RedoRecPtr =
+ RedoRecPtr = XLogCtl->Insert.RedoRecPtr =
XLogCtl->RedoRecPtr = checkPoint.redo;
if (XLByteLT(RecPtr, checkPoint.redo))
@@ -2328,7 +2362,7 @@ StartupXLOG(void)
if (checkPoint.undo.xrecoff == 0)
checkPoint.undo = RecPtr;
- if (XLByteLT(checkPoint.undo, RecPtr) ||
+ if (XLByteLT(checkPoint.undo, RecPtr) ||
XLByteLT(checkPoint.redo, RecPtr))
{
if (wasShutdown)
@@ -2336,9 +2370,7 @@ StartupXLOG(void)
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
- {
InRecovery = true;
- }
/* REDO */
if (InRecovery)
@@ -2355,7 +2387,8 @@ StartupXLOG(void)
/* Is REDO required ? */
if (XLByteLT(checkPoint.redo, RecPtr))
record = ReadRecord(&(checkPoint.redo), STOP, buffer);
- else /* read past CheckPoint record */
+ else
+/* read past CheckPoint record */
record = ReadRecord(NULL, LOG, buffer);
if (record != NULL)
@@ -2369,15 +2402,15 @@ StartupXLOG(void)
ShmemVariableCache->nextXid = record->xl_xid + 1;
if (XLOG_DEBUG)
{
- char buf[8192];
+ char buf[8192];
- sprintf(buf, "REDO @ %u/%u; LSN %u/%u: ",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
- EndRecPtr.xlogid, EndRecPtr.xrecoff);
+ sprintf(buf, "REDO @ %u/%u; LSN %u/%u: ",
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
+ EndRecPtr.xlogid, EndRecPtr.xrecoff);
xlog_outrec(buf, record);
strcat(buf, " - ");
- RmgrTable[record->xl_rmid].rm_desc(buf,
- record->xl_info, XLogRecGetData(record));
+ RmgrTable[record->xl_rmid].rm_desc(buf,
+ record->xl_info, XLogRecGetData(record));
fprintf(stderr, "%s\n", buf);
}
@@ -2411,8 +2444,11 @@ StartupXLOG(void)
XLogCtl->xlblocks[0].xrecoff =
((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
Insert = &XLogCtl->Insert;
- /* Tricky point here: readBuf contains the *last* block that the LastRec
- * record spans, not the one it starts in, which is what we want.
+
+ /*
+ * Tricky point here: readBuf contains the *last* block that the
+ * LastRec record spans, not the one it starts in, which is what we
+ * want.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@@ -2458,6 +2494,7 @@ StartupXLOG(void)
if (InRecovery)
{
+
/*
* In case we had to use the secondary checkpoint, make sure that
* it will still be shown as the secondary checkpoint after this
@@ -2554,7 +2591,7 @@ SetThisStartUpID(void)
/*
* CheckPoint process called by postmaster saves copy of new RedoRecPtr
- * in shmem (using SetRedoRecPtr). When checkpointer completes, postmaster
+ * in shmem (using SetRedoRecPtr). When checkpointer completes, postmaster
* calls GetRedoRecPtr to update its own copy of RedoRecPtr, so that
* subsequently-spawned backends will start out with a reasonably up-to-date
* local RedoRecPtr. Since these operations are not protected by any spinlock
@@ -2605,7 +2642,7 @@ CreateCheckPoint(bool shutdown)
CheckPoint checkPoint;
XLogRecPtr recptr;
XLogCtlInsert *Insert = &XLogCtl->Insert;
- XLogRecData rdata;
+ XLogRecData rdata;
uint32 freespace;
uint32 _logId;
uint32 _logSeg;
@@ -2613,7 +2650,7 @@ CreateCheckPoint(bool shutdown)
if (MyLastRecPtr.xrecoff != 0)
elog(ERROR, "CreateCheckPoint: cannot be called inside transaction block");
-
+
START_CRIT_SECTION();
/* Grab lock, using larger than normal sleep between tries (1 sec) */
@@ -2639,17 +2676,17 @@ CreateCheckPoint(bool shutdown)
/*
* If this isn't a shutdown, and we have not inserted any XLOG records
* since the start of the last checkpoint, skip the checkpoint. The
- * idea here is to avoid inserting duplicate checkpoints when the system
- * is idle. That wastes log space, and more importantly it exposes us to
- * possible loss of both current and previous checkpoint records if the
- * machine crashes just as we're writing the update. (Perhaps it'd make
- * even more sense to checkpoint only when the previous checkpoint record
- * is in a different xlog page?)
+ * idea here is to avoid inserting duplicate checkpoints when the
+ * system is idle. That wastes log space, and more importantly it
+ * exposes us to possible loss of both current and previous checkpoint
+ * records if the machine crashes just as we're writing the update.
+ * (Perhaps it'd make even more sense to checkpoint only when the
+ * previous checkpoint record is in a different xlog page?)
*
* We have to make two tests to determine that nothing has happened since
- * the start of the last checkpoint: current insertion point must match
- * the end of the last checkpoint record, and its redo pointer must point
- * to itself.
+ * the start of the last checkpoint: current insertion point must
+ * match the end of the last checkpoint record, and its redo pointer
+ * must point to itself.
*/
if (!shutdown)
{
@@ -2677,7 +2714,7 @@ CreateCheckPoint(bool shutdown)
* NB: this is NOT necessarily where the checkpoint record itself will
* be, since other backends may insert more XLOG records while we're
* off doing the buffer flush work. Those XLOG records are logically
- * after the checkpoint, even though physically before it. Got that?
+ * after the checkpoint, even though physically before it. Got that?
*/
freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord)
@@ -2687,16 +2724,18 @@ CreateCheckPoint(bool shutdown)
freespace = BLCKSZ - SizeOfXLogPHD;
}
INSERT_RECPTR(checkPoint.redo, Insert, Insert->curridx);
+
/*
* Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock.
*/
RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
+
/*
- * Get UNDO record ptr - this is oldest of PROC->logRec values.
- * We do this while holding insert lock to ensure that we won't miss
- * any about-to-commit transactions (UNDO must include all xacts that
- * have commits after REDO point).
+ * Get UNDO record ptr - this is oldest of PROC->logRec values. We do
+ * this while holding insert lock to ensure that we won't miss any
+ * about-to-commit transactions (UNDO must include all xacts that have
+ * commits after REDO point).
*/
checkPoint.undo = GetUndoRecPtr();
@@ -2720,8 +2759,8 @@ CreateCheckPoint(bool shutdown)
SpinRelease(OidGenLockId);
/*
- * Having constructed the checkpoint record, ensure all shmem disk buffers
- * are flushed to disk.
+ * Having constructed the checkpoint record, ensure all shmem disk
+ * buffers are flushed to disk.
*/
FlushBufferPool();
@@ -2729,7 +2768,7 @@ CreateCheckPoint(bool shutdown)
* Now insert the checkpoint record into XLOG.
*/
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&checkPoint);
+ rdata.data = (char *) (&checkPoint);
rdata.len = sizeof(checkPoint);
rdata.next = NULL;
@@ -2748,11 +2787,11 @@ CreateCheckPoint(bool shutdown)
elog(STOP, "XLog concurrent activity while data base is shutting down");
/*
- * Remember location of prior checkpoint's earliest info.
- * Oldest item is redo or undo, whichever is older; but watch out
- * for case that undo = 0.
+ * Remember location of prior checkpoint's earliest info. Oldest item
+ * is redo or undo, whichever is older; but watch out for case that
+ * undo = 0.
*/
- if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
+ if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
XLByteLT(ControlFile->checkPointCopy.undo,
ControlFile->checkPointCopy.redo))
XLByteToSeg(ControlFile->checkPointCopy.undo, _logId, _logSeg);
@@ -2801,10 +2840,10 @@ CreateCheckPoint(bool shutdown)
void
XLogPutNextOid(Oid nextOid)
{
- XLogRecData rdata;
+ XLogRecData rdata;
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&nextOid);
+ rdata.data = (char *) (&nextOid);
rdata.len = sizeof(Oid);
rdata.next = NULL;
(void) XLogInsert(RM_XLOG_ID, XLOG_NEXTOID, &rdata);
@@ -2816,11 +2855,11 @@ XLogPutNextOid(Oid nextOid)
void
xlog_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
if (info == XLOG_NEXTOID)
{
- Oid nextOid;
+ Oid nextOid;
memcpy(&nextOid, XLogRecGetData(record), sizeof(Oid));
if (ShmemVariableCache->nextOid < nextOid)
@@ -2846,9 +2885,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
/* In an ONLINE checkpoint, treat the counters like NEXTOID */
if (ShmemVariableCache->nextXid < checkPoint.nextXid)
- {
ShmemVariableCache->nextXid = checkPoint.nextXid;
- }
if (ShmemVariableCache->nextOid < checkPoint.nextOid)
{
ShmemVariableCache->nextOid = checkPoint.nextOid;
@@ -2856,32 +2893,33 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
}
}
}
-
+
void
xlog_undo(XLogRecPtr lsn, XLogRecord *record)
{
}
-
+
void
-xlog_desc(char *buf, uint8 xl_info, char* rec)
+xlog_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_CHECKPOINT_SHUTDOWN ||
info == XLOG_CHECKPOINT_ONLINE)
{
- CheckPoint *checkpoint = (CheckPoint*) rec;
+ CheckPoint *checkpoint = (CheckPoint *) rec;
+
sprintf(buf + strlen(buf), "checkpoint: redo %u/%u; undo %u/%u; "
- "sui %u; xid %u; oid %u; %s",
- checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
- checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
- checkpoint->ThisStartUpID, checkpoint->nextXid,
- checkpoint->nextOid,
- (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
+ "sui %u; xid %u; oid %u; %s",
+ checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
+ checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
+ checkpoint->ThisStartUpID, checkpoint->nextXid,
+ checkpoint->nextOid,
+ (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
}
else if (info == XLOG_NEXTOID)
{
- Oid nextOid;
+ Oid nextOid;
memcpy(&nextOid, rec, sizeof(Oid));
sprintf(buf + strlen(buf), "nextOid: %u", nextOid);
@@ -2893,13 +2931,13 @@ xlog_desc(char *buf, uint8 xl_info, char* rec)
static void
xlog_outrec(char *buf, XLogRecord *record)
{
- int bkpb;
- int i;
+ int bkpb;
+ int i;
sprintf(buf + strlen(buf), "prev %u/%u; xprev %u/%u; xid %u",
- record->xl_prev.xlogid, record->xl_prev.xrecoff,
- record->xl_xact_prev.xlogid, record->xl_xact_prev.xrecoff,
- record->xl_xid);
+ record->xl_prev.xlogid, record->xl_prev.xrecoff,
+ record->xl_xact_prev.xlogid, record->xl_xact_prev.xrecoff,
+ record->xl_xid);
for (i = 0, bkpb = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
@@ -2912,7 +2950,7 @@ xlog_outrec(char *buf, XLogRecord *record)
sprintf(buf + strlen(buf), "; bkpb %d", bkpb);
sprintf(buf + strlen(buf), ": %s",
- RmgrTable[record->xl_rmid].rm_name);
+ RmgrTable[record->xl_rmid].rm_name);
}
@@ -2923,15 +2961,19 @@ xlog_outrec(char *buf, XLogRecord *record)
bool
check_xlog_sync_method(const char *method)
{
- if (strcasecmp(method, "fsync") == 0) return true;
+ if (strcasecmp(method, "fsync") == 0)
+ return true;
#ifdef HAVE_FDATASYNC
- if (strcasecmp(method, "fdatasync") == 0) return true;
+ if (strcasecmp(method, "fdatasync") == 0)
+ return true;
#endif
#ifdef OPEN_SYNC_FLAG
- if (strcasecmp(method, "open_sync") == 0) return true;
+ if (strcasecmp(method, "open_sync") == 0)
+ return true;
#endif
#ifdef OPEN_DATASYNC_FLAG
- if (strcasecmp(method, "open_datasync") == 0) return true;
+ if (strcasecmp(method, "open_datasync") == 0)
+ return true;
#endif
return false;
}
@@ -2939,8 +2981,8 @@ check_xlog_sync_method(const char *method)
void
assign_xlog_sync_method(const char *method)
{
- int new_sync_method;
- int new_sync_bit;
+ int new_sync_method;
+ int new_sync_bit;
if (strcasecmp(method, "fsync") == 0)
{
@@ -2978,11 +3020,12 @@ assign_xlog_sync_method(const char *method)
if (sync_method != new_sync_method || open_sync_bit != new_sync_bit)
{
+
/*
- * To ensure that no blocks escape unsynced, force an fsync on
- * the currently open log segment (if any). Also, if the open
- * flag is changing, close the log file so it will be reopened
- * (with new flag bit) at next use.
+ * To ensure that no blocks escape unsynced, force an fsync on the
+ * currently open log segment (if any). Also, if the open flag is
+ * changing, close the log file so it will be reopened (with new
+ * flag bit) at next use.
*/
if (openLogFile >= 0)
{
@@ -3011,7 +3054,7 @@ issue_xlog_fsync(void)
{
switch (sync_method)
{
- case SYNC_METHOD_FSYNC:
+ case SYNC_METHOD_FSYNC:
if (pg_fsync(openLogFile) != 0)
elog(STOP, "fsync(logfile %u seg %u) failed: %m",
openLogId, openLogSeg);
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 8b80c326cab..a3f440ca5f9 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.14 2001/03/13 01:17:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.15 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,26 +37,26 @@
* xaction/command and return
*
* - -1 if not
- * - 0 if there is no tuple at all
- * - 1 if yes
+ * - 0 if there is no tuple at all
+ * - 1 if yes
*/
int
-XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
- TransactionId xid, CommandId cid)
+XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
+ TransactionId xid, CommandId cid)
{
- Relation reln;
- Buffer buffer;
- Page page;
- ItemId lp;
- HeapTupleHeader htup;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ ItemId lp;
+ HeapTupleHeader htup;
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln))
- return(0);
+ return (0);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer))
- return(0);
+ return (0);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer);
@@ -64,13 +64,13 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{
UnlockAndReleaseBuffer(buffer);
- return(0);
+ return (0);
}
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{
UnlockAndReleaseBuffer(buffer);
- return(0);
+ return (0);
}
htup = (HeapTupleHeader) PageGetItem(page, lp);
@@ -79,11 +79,11 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
if (htup->t_xmin != xid || htup->t_cmin != cid)
{
UnlockAndReleaseBuffer(buffer);
- return(-1);
+ return (-1);
}
UnlockAndReleaseBuffer(buffer);
- return(1);
+ return (1);
}
/*
@@ -95,19 +95,19 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
bool
XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
{
- Relation reln;
- Buffer buffer;
- Page page;
- ItemId lp;
- HeapTupleHeader htup;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ ItemId lp;
+ HeapTupleHeader htup;
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln))
- return(false);
+ return (false);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer))
- return(false);
+ return (false);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer);
@@ -115,21 +115,21 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{
UnlockAndReleaseBuffer(buffer);
- return(false);
+ return (false);
}
if (PageGetSUI(page) != ThisStartUpID)
{
Assert(PageGetSUI(page) < ThisStartUpID);
UnlockAndReleaseBuffer(buffer);
- return(true);
+ return (true);
}
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{
UnlockAndReleaseBuffer(buffer);
- return(false);
+ return (false);
}
htup = (HeapTupleHeader) PageGetItem(page, lp);
@@ -140,22 +140,22 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
{
if (htup->t_infomask & HEAP_XMIN_INVALID ||
(htup->t_infomask & HEAP_MOVED_IN &&
- TransactionIdDidAbort((TransactionId)htup->t_cmin)) ||
+ TransactionIdDidAbort((TransactionId) htup->t_cmin)) ||
TransactionIdDidAbort(htup->t_xmin))
{
UnlockAndReleaseBuffer(buffer);
- return(false);
+ return (false);
}
}
UnlockAndReleaseBuffer(buffer);
- return(true);
+ return (true);
}
/*
* Open pg_log in recovery
*/
-extern Relation LogRelation; /* pg_log relation */
+extern Relation LogRelation; /* pg_log relation */
void
XLogOpenLogRelation(void)
@@ -189,32 +189,32 @@ XLogOpenLogRelation(void)
Buffer
XLogReadBuffer(bool extend, Relation reln, BlockNumber blkno)
{
- BlockNumber lastblock = RelationGetNumberOfBlocks(reln);
+ BlockNumber lastblock = RelationGetNumberOfBlocks(reln);
Buffer buffer;
if (blkno >= lastblock)
{
buffer = InvalidBuffer;
- if (extend) /* we do this in recovery only - no locks */
+ if (extend) /* we do this in recovery only - no locks */
{
Assert(InRecovery);
while (lastblock <= blkno)
{
if (buffer != InvalidBuffer)
- ReleaseBuffer(buffer); /* must be WriteBuffer()? */
+ ReleaseBuffer(buffer); /* must be WriteBuffer()? */
buffer = ReadBuffer(reln, P_NEW);
lastblock++;
}
}
if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
- return(buffer);
+ return (buffer);
}
buffer = ReadBuffer(reln, blkno);
if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
- return(buffer);
+ return (buffer);
}
/*
@@ -223,32 +223,33 @@ XLogReadBuffer(bool extend, Relation reln, BlockNumber blkno)
typedef struct XLogRelDesc
{
- RelationData reldata;
- struct XLogRelDesc *lessRecently;
- struct XLogRelDesc *moreRecently;
+ RelationData reldata;
+ struct XLogRelDesc *lessRecently;
+ struct XLogRelDesc *moreRecently;
} XLogRelDesc;
typedef struct XLogRelCacheEntry
{
- RelFileNode rnode;
- XLogRelDesc *rdesc;
+ RelFileNode rnode;
+ XLogRelDesc *rdesc;
} XLogRelCacheEntry;
-static HTAB *_xlrelcache;
-static XLogRelDesc *_xlrelarr = NULL;
-static Form_pg_class _xlpgcarr = NULL;
-static int _xlast = 0;
-static int _xlcnt = 0;
-#define _XLOG_RELCACHESIZE 512
+static HTAB *_xlrelcache;
+static XLogRelDesc *_xlrelarr = NULL;
+static Form_pg_class _xlpgcarr = NULL;
+static int _xlast = 0;
+static int _xlcnt = 0;
+
+#define _XLOG_RELCACHESIZE 512
static void
_xl_init_rel_cache(void)
{
- HASHCTL ctl;
+ HASHCTL ctl;
_xlcnt = _XLOG_RELCACHESIZE;
_xlast = 0;
- _xlrelarr = (XLogRelDesc*) malloc(sizeof(XLogRelDesc) * _xlcnt);
+ _xlrelarr = (XLogRelDesc *) malloc(sizeof(XLogRelDesc) * _xlcnt);
memset(_xlrelarr, 0, sizeof(XLogRelDesc) * _xlcnt);
_xlpgcarr = (Form_pg_class) malloc(sizeof(FormData_pg_class) * _xlcnt);
memset(_xlpgcarr, 0, sizeof(FormData_pg_class) * _xlcnt);
@@ -258,26 +259,26 @@ _xl_init_rel_cache(void)
memset(&ctl, 0, (int) sizeof(ctl));
ctl.keysize = sizeof(RelFileNode);
- ctl.datasize = sizeof(XLogRelDesc*);
+ ctl.datasize = sizeof(XLogRelDesc *);
ctl.hash = tag_hash;
_xlrelcache = hash_create(_XLOG_RELCACHESIZE, &ctl,
- HASH_ELEM | HASH_FUNCTION);
+ HASH_ELEM | HASH_FUNCTION);
}
static void
_xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
{
- XLogRelCacheEntry *hentry;
- bool found;
- XLogRelDesc *rdesc = *edata;
- Form_pg_class tpgc = rdesc->reldata.rd_rel;
+ XLogRelCacheEntry *hentry;
+ bool found;
+ XLogRelDesc *rdesc = *edata;
+ Form_pg_class tpgc = rdesc->reldata.rd_rel;
rdesc->lessRecently->moreRecently = rdesc->moreRecently;
rdesc->moreRecently->lessRecently = rdesc->lessRecently;
- hentry = (XLogRelCacheEntry*) hash_search(_xlrelcache,
- (char*)&(rdesc->reldata.rd_node), HASH_REMOVE, &found);
+ hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
+ (char *) &(rdesc->reldata.rd_node), HASH_REMOVE, &found);
if (hentry == NULL)
elog(STOP, "_xl_remove_hash_entry: can't delete from cache");
@@ -294,16 +295,16 @@ _xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
return;
}
-static XLogRelDesc*
+static XLogRelDesc *
_xl_new_reldesc(void)
{
- XLogRelDesc *res;
+ XLogRelDesc *res;
_xlast++;
if (_xlast < _xlcnt)
{
_xlrelarr[_xlast].reldata.rd_rel = &(_xlpgcarr[_xlast]);
- return(&(_xlrelarr[_xlast]));
+ return (&(_xlrelarr[_xlast]));
}
/* reuse */
@@ -312,7 +313,7 @@ _xl_new_reldesc(void)
_xl_remove_hash_entry(&res, 0);
_xlast--;
- return(res);
+ return (res);
}
@@ -344,12 +345,12 @@ XLogCloseRelationCache(void)
Relation
XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
{
- XLogRelDesc *res;
- XLogRelCacheEntry *hentry;
- bool found;
+ XLogRelDesc *res;
+ XLogRelCacheEntry *hentry;
+ bool found;
- hentry = (XLogRelCacheEntry*)
- hash_search(_xlrelcache, (char*)&rnode, HASH_FIND, &found);
+ hentry = (XLogRelCacheEntry *)
+ hash_search(_xlrelcache, (char *) &rnode, HASH_FIND, &found);
if (hentry == NULL)
elog(STOP, "XLogOpenRelation: error in cache");
@@ -372,8 +373,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode;
res->reldata.rd_node = rnode;
- hentry = (XLogRelCacheEntry*)
- hash_search(_xlrelcache, (char*)&rnode, HASH_ENTER, &found);
+ hentry = (XLogRelCacheEntry *)
+ hash_search(_xlrelcache, (char *) &rnode, HASH_ENTER, &found);
if (hentry == NULL)
elog(STOP, "XLogOpenRelation: can't insert into cache");
@@ -385,7 +386,7 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_fd = -1;
res->reldata.rd_fd = smgropen(DEFAULT_SMGR, &(res->reldata),
- true /* allow failure */);
+ true /* allow failure */ );
}
res->moreRecently = &(_xlrelarr[0]);
@@ -393,8 +394,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
_xlrelarr[0].lessRecently = res;
res->lessRecently->moreRecently = res;
- if (res->reldata.rd_fd < 0) /* file doesn't exist */
- return(NULL);
+ if (res->reldata.rd_fd < 0) /* file doesn't exist */
+ return (NULL);
- return(&(res->reldata));
+ return (&(res->reldata));
}
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index b9bb36bc0ca..bfc4cc2a454 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.46 2001/01/24 19:42:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.47 2001/03/22 03:59:18 momjian Exp $
*
* NOTES
* See acl.h.
@@ -34,7 +34,7 @@
#include "utils/syscache.h"
static int32 aclcheck(char *relname, Acl *acl, AclId id,
- AclIdType idtype, AclMode mode);
+ AclIdType idtype, AclMode mode);
/* warning messages, now more explicit. */
/* MUST correspond to the order of the ACLCHK_* result codes in acl.h. */
@@ -59,7 +59,7 @@ dumpacl(Acl *acl)
for (i = 0; i < ACL_NUM(acl); ++i)
elog(DEBUG, " acl[%d]: %s", i,
DatumGetCString(DirectFunctionCall1(aclitemout,
- PointerGetDatum(aip + i))));
+ PointerGetDatum(aip + i))));
}
#endif
@@ -250,8 +250,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
num;
/*
- * If ACL is null, default to "OK" --- this should not happen,
- * since caller should have inserted appropriate default
+ * If ACL is null, default to "OK" --- this should not happen, since
+ * caller should have inserted appropriate default
*/
if (!acl)
{
@@ -265,8 +265,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
/*
* We'll treat the empty ACL like that, too, although this is more
* like an error (i.e., you manually blew away your ACL array) -- the
- * system never creates an empty ACL, since there must always be
- * a "world" entry in the first slot.
+ * system never creates an empty ACL, since there must always be a
+ * "world" entry in the first slot.
*/
if (num < 1)
{
@@ -352,7 +352,7 @@ pg_aclcheck(char *relname, Oid userid, AclMode mode)
{
int32 result;
HeapTuple tuple;
- char *usename;
+ char *usename;
Datum aclDatum;
bool isNull;
Acl *acl;
@@ -439,7 +439,7 @@ pg_ownercheck(Oid userid,
{
HeapTuple tuple;
AclId owner_id;
- char *usename;
+ char *usename;
tuple = SearchSysCache(SHADOWSYSID,
ObjectIdGetDatum(userid),
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 0952a079f77..7d10bdea7a7 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.39 2001/01/24 19:42:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.40 2001/03/22 03:59:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -105,7 +105,7 @@ relpath_blind(const char *dbname, const char *relname,
return path;
}
-#else /* ! OLD_FILE_NAMING */
+#else /* ! OLD_FILE_NAMING */
/*
* relpath - construct path to a relation's file
@@ -118,7 +118,7 @@ relpath(RelFileNode rnode)
{
char *path;
- if (rnode.tblNode == (Oid) 0) /* "global tablespace" */
+ if (rnode.tblNode == (Oid) 0) /* "global tablespace" */
{
/* Shared system relations live in {datadir}/global */
path = (char *) palloc(strlen(DataDir) + 8 + sizeof(NameData) + 1);
@@ -127,8 +127,8 @@ relpath(RelFileNode rnode)
else
{
path = (char *) palloc(strlen(DataDir) + 6 + 2 * sizeof(NameData) + 3);
- sprintf(path, "%s%cbase%c%u%c%u", DataDir, SEP_CHAR, SEP_CHAR,
- rnode.tblNode, SEP_CHAR, rnode.relNode);
+ sprintf(path, "%s%cbase%c%u%c%u", DataDir, SEP_CHAR, SEP_CHAR,
+ rnode.tblNode, SEP_CHAR, rnode.relNode);
}
return path;
}
@@ -144,7 +144,7 @@ GetDatabasePath(Oid tblNode)
{
char *path;
- if (tblNode == (Oid) 0) /* "global tablespace" */
+ if (tblNode == (Oid) 0) /* "global tablespace" */
{
/* Shared system relations live in {datadir}/global */
path = (char *) palloc(strlen(DataDir) + 8);
@@ -158,7 +158,7 @@ GetDatabasePath(Oid tblNode)
return path;
}
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
/*
* IsSystemRelationName
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index f0aa9042e02..34a22412c39 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.160 2001/02/14 21:34:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.161 2001/03/22 03:59:19 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -68,7 +68,7 @@
static void AddNewRelationTuple(Relation pg_class_desc,
- Relation new_rel_desc, Oid new_rel_oid, Oid new_type_oid,
+ Relation new_rel_desc, Oid new_rel_oid, Oid new_type_oid,
int natts, char relkind, char *temp_relname);
static void DeleteAttributeTuples(Relation rel);
static void DeleteRelationTuple(Relation rel);
@@ -76,7 +76,7 @@ static void DeleteTypeTuple(Relation rel);
static void RelationRemoveIndexes(Relation relation);
static void RelationRemoveInheritance(Relation relation);
static void AddNewRelationType(char *typeName, Oid new_rel_oid,
- Oid new_type_oid);
+ Oid new_type_oid);
static void StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
bool updatePgAttribute);
static void StoreRelCheck(Relation rel, char *ccname, char *ccbin);
@@ -178,13 +178,13 @@ heap_create(char *relname,
{
static unsigned int uniqueId = 0;
- Oid relid;
- Relation rel;
- bool nailme = false;
- int natts = tupDesc->natts;
- int i;
- MemoryContext oldcxt;
- Oid tblNode = MyDatabaseId;
+ Oid relid;
+ Relation rel;
+ bool nailme = false;
+ int natts = tupDesc->natts;
+ int i;
+ MemoryContext oldcxt;
+ Oid tblNode = MyDatabaseId;
/* ----------------
* sanity checks
@@ -270,7 +270,11 @@ heap_create(char *relname,
if (istemp)
{
- /* replace relname of caller with a unique name for a temp relation */
+
+ /*
+ * replace relname of caller with a unique name for a temp
+ * relation
+ */
snprintf(relname, NAMEDATALEN, "pg_temp.%d.%u",
(int) MyProcPid, uniqueId++);
}
@@ -738,6 +742,7 @@ AddNewRelationTuple(Relation pg_class_desc,
static void
AddNewRelationType(char *typeName, Oid new_rel_oid, Oid new_type_oid)
{
+
/*
* The sizes are set to oid size because it makes implementing sets
* MUCH easier, and no one (we hope) uses these fields to figure out
@@ -1025,9 +1030,7 @@ RelationRemoveInheritance(Relation relation)
&entry);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
simple_heap_delete(catalogRelation, &tuple->t_self);
- }
heap_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
@@ -1152,8 +1155,8 @@ RelationTruncateIndexes(Oid heapId)
/*
* We have to re-open the heap rel each time through this loop
* because index_build will close it again. We need grab no lock,
- * however, because we assume heap_truncate is holding an exclusive
- * lock on the heap rel.
+ * however, because we assume heap_truncate is holding an
+ * exclusive lock on the heap rel.
*/
heapRelation = heap_open(heapId, NoLock);
@@ -1164,8 +1167,8 @@ RelationTruncateIndexes(Oid heapId)
LockRelation(currentIndex, AccessExclusiveLock);
/*
- * Drop any buffers associated with this index. If they're
- * dirty, they're just dropped without bothering to flush to disk.
+ * Drop any buffers associated with this index. If they're dirty,
+ * they're just dropped without bothering to flush to disk.
*/
DropRelationBuffers(currentIndex);
@@ -1177,6 +1180,7 @@ RelationTruncateIndexes(Oid heapId)
InitIndexStrategy(indexInfo->ii_NumIndexAttrs,
currentIndex, accessMethodId);
index_build(heapRelation, currentIndex, indexInfo, NULL);
+
/*
* index_build will close both the heap and index relations (but
* not give up the locks we hold on them).
@@ -1514,7 +1518,7 @@ heap_drop_with_catalog(const char *relname,
if (has_toasttable)
{
- char toast_relname[NAMEDATALEN];
+ char toast_relname[NAMEDATALEN];
sprintf(toast_relname, "pg_toast_%u", rid);
heap_drop_with_catalog(toast_relname, true);
@@ -1553,16 +1557,16 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
* deparse it
*/
adsrc = deparse_expression(expr,
- deparse_context_for(RelationGetRelationName(rel),
- RelationGetRelid(rel)),
+ deparse_context_for(RelationGetRelationName(rel),
+ RelationGetRelid(rel)),
false);
values[Anum_pg_attrdef_adrelid - 1] = RelationGetRelid(rel);
values[Anum_pg_attrdef_adnum - 1] = attnum;
values[Anum_pg_attrdef_adbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(adbin));
+ CStringGetDatum(adbin));
values[Anum_pg_attrdef_adsrc - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(adsrc));
+ CStringGetDatum(adsrc));
adrel = heap_openr(AttrDefaultRelationName, RowExclusiveLock);
tuple = heap_formtuple(adrel->rd_att, values, nulls);
heap_insert(adrel, tuple);
@@ -1631,17 +1635,17 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
* deparse it
*/
ccsrc = deparse_expression(expr,
- deparse_context_for(RelationGetRelationName(rel),
- RelationGetRelid(rel)),
+ deparse_context_for(RelationGetRelationName(rel),
+ RelationGetRelid(rel)),
false);
values[Anum_pg_relcheck_rcrelid - 1] = RelationGetRelid(rel);
values[Anum_pg_relcheck_rcname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(ccname));
+ CStringGetDatum(ccname));
values[Anum_pg_relcheck_rcbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(ccbin));
+ CStringGetDatum(ccbin));
values[Anum_pg_relcheck_rcsrc - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(ccsrc));
+ CStringGetDatum(ccsrc));
rcrel = heap_openr(RelCheckRelationName, RowExclusiveLock);
tuple = heap_formtuple(rcrel->rd_att, values, nulls);
heap_insert(rcrel, tuple);
@@ -1981,9 +1985,7 @@ RemoveAttrDefault(Relation rel)
adscan = heap_beginscan(adrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(adscan, 0)))
- {
simple_heap_delete(adrel, &tup->t_self);
- }
heap_endscan(adscan);
heap_close(adrel, RowExclusiveLock);
@@ -2005,9 +2007,7 @@ RemoveRelCheck(Relation rel)
rcscan = heap_beginscan(rcrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(rcscan, 0)))
- {
simple_heap_delete(rcrel, &tup->t_self);
- }
heap_endscan(rcscan);
heap_close(rcrel, RowExclusiveLock);
@@ -2044,9 +2044,7 @@ RemoveStatistics(Relation rel)
scan = heap_beginscan(pgstatistic, false, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
simple_heap_delete(pgstatistic, &tuple->t_self);
- }
heap_endscan(scan);
heap_close(pgstatistic, RowExclusiveLock);
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 6dd28ed02cb..103c4ccc016 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.142 2001/02/23 09:31:52 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.143 2001/03/22 03:59:19 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -63,19 +63,19 @@ static Oid GetHeapRelationOid(char *heapRelationName, char *indexRelationName,
bool istemp);
static TupleDesc BuildFuncTupleDesc(Oid funcOid);
static TupleDesc ConstructTupleDescriptor(Relation heapRelation,
- int numatts, AttrNumber *attNums);
+ int numatts, AttrNumber *attNums);
static void ConstructIndexReldesc(Relation indexRelation, Oid amoid);
static Oid UpdateRelationRelation(Relation indexRelation, char *temp_relname);
static void InitializeAttributeOids(Relation indexRelation,
int numatts, Oid indexoid);
static void AppendAttributeTuples(Relation indexRelation, int numatts);
static void UpdateIndexRelation(Oid indexoid, Oid heapoid,
- IndexInfo *indexInfo,
- Oid *classOids,
- bool islossy, bool primary);
+ IndexInfo *indexInfo,
+ Oid *classOids,
+ bool islossy, bool primary);
static void DefaultBuild(Relation heapRelation, Relation indexRelation,
- IndexInfo *indexInfo, Node *oldPred,
- IndexStrategy indexStrategy);
+ IndexInfo *indexInfo, Node *oldPred,
+ IndexStrategy indexStrategy);
static Oid IndexGetRelation(Oid indexId);
static bool activate_index(Oid indexId, bool activate, bool inplace);
@@ -301,7 +301,8 @@ ConstructTupleDescriptor(Relation heapRelation,
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
/*
- * Fix the stuff that should not be the same as the underlying attr
+ * Fix the stuff that should not be the same as the underlying
+ * attr
*/
to->attnum = i + 1;
@@ -311,9 +312,9 @@ ConstructTupleDescriptor(Relation heapRelation,
to->attcacheoff = -1;
/*
- * We do not yet have the correct relation OID for the index,
- * so just set it invalid for now. InitializeAttributeOids()
- * will fix it later.
+ * We do not yet have the correct relation OID for the index, so
+ * just set it invalid for now. InitializeAttributeOids() will
+ * fix it later.
*/
to->attrelid = InvalidOid;
}
@@ -331,7 +332,7 @@ ConstructTupleDescriptor(Relation heapRelation,
* typically CacheMemoryContext).
*
* There was a note here about adding indexing, but I don't see a need
- * for it. There are so few tuples in pg_am that an indexscan would
+ * for it. There are so few tuples in pg_am that an indexscan would
* surely be slower.
* ----------------------------------------------------------------
*/
@@ -394,7 +395,7 @@ static void
ConstructIndexReldesc(Relation indexRelation, Oid amoid)
{
indexRelation->rd_am = AccessMethodObjectIdGetForm(amoid,
- CacheMemoryContext);
+ CacheMemoryContext);
/* ----------------
* XXX missing the initialization of some other fields
@@ -625,12 +626,12 @@ UpdateIndexRelation(Oid indexoid,
{
predString = nodeToString(indexInfo->ii_Predicate);
predText = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(predString)));
+ CStringGetDatum(predString)));
pfree(predString);
}
else
predText = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum("")));
+ CStringGetDatum("")));
predLen = VARSIZE(predText);
itupLen = predLen + sizeof(FormData_pg_index);
@@ -646,7 +647,7 @@ UpdateIndexRelation(Oid indexoid,
indexForm->indproc = indexInfo->ii_FuncOid;
indexForm->indisclustered = false;
indexForm->indislossy = islossy;
- indexForm->indhaskeytype = true; /* not actually used anymore */
+ indexForm->indhaskeytype = true; /* not actually used anymore */
indexForm->indisunique = indexInfo->ii_Unique;
indexForm->indisprimary = primary;
memcpy((char *) &indexForm->indpred, (char *) predText, predLen);
@@ -747,12 +748,12 @@ UpdateIndexPredicate(Oid indexoid, Node *oldPred, Node *predicate)
{
predString = nodeToString(newPred);
predText = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(predString)));
+ CStringGetDatum(predString)));
pfree(predString);
}
else
predText = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum("")));
+ CStringGetDatum("")));
/* open the index system catalog relation */
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
@@ -911,15 +912,15 @@ index_create(char *heapRelationName,
else
indexTupDesc = ConstructTupleDescriptor(heapRelation,
indexInfo->ii_NumKeyAttrs,
- indexInfo->ii_KeyAttrNumbers);
+ indexInfo->ii_KeyAttrNumbers);
if (istemp)
{
/* save user relation name because heap_create changes it */
- temp_relname = pstrdup(indexRelationName); /* save original value */
+ temp_relname = pstrdup(indexRelationName); /* save original value */
indexRelationName = palloc(NAMEDATALEN);
- strcpy(indexRelationName, temp_relname); /* heap_create will
- * change this */
+ strcpy(indexRelationName, temp_relname); /* heap_create will
+ * change this */
}
/* ----------------
@@ -1008,9 +1009,7 @@ index_create(char *heapRelationName,
/* XXX shouldn't we close the heap and index rels here? */
}
else
- {
index_build(heapRelation, indexRelation, indexInfo, NULL);
- }
}
/* ----------------------------------------------------------------
@@ -1081,12 +1080,12 @@ index_drop(Oid indexId)
heap_freetuple(tuple);
/*
- * Update the pg_class tuple for the owning relation. We are presently
- * too lazy to attempt to compute the new correct value of relhasindex
- * (the next VACUUM will fix it if necessary). But we must send out a
- * shared-cache-inval notice on the owning relation to ensure other
- * backends update their relcache lists of indexes. So, unconditionally
- * do setRelhasindex(true).
+ * Update the pg_class tuple for the owning relation. We are
+ * presently too lazy to attempt to compute the new correct value of
+ * relhasindex (the next VACUUM will fix it if necessary). But we
+ * must send out a shared-cache-inval notice on the owning relation to
+ * ensure other backends update their relcache lists of indexes. So,
+ * unconditionally do setRelhasindex(true).
*/
setRelhasindex(heapId, true);
@@ -1160,11 +1159,11 @@ index_drop(Oid indexId)
*
* IndexInfo stores the information about the index that's needed by
* FormIndexDatum, which is used for both index_build() and later insertion
- * of individual index tuples. Normally we build an IndexInfo for an index
+ * of individual index tuples. Normally we build an IndexInfo for an index
* just once per command, and then use it for (potentially) many tuples.
* ----------------
*/
-IndexInfo *
+IndexInfo *
BuildIndexInfo(HeapTuple indexTuple)
{
Form_pg_index indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
@@ -1199,7 +1198,7 @@ BuildIndexInfo(HeapTuple indexTuple)
{
ii->ii_NumIndexAttrs = 1;
/* Do a lookup on the function, too */
- fmgr_info(indexStruct->indproc, & ii->ii_FuncInfo);
+ fmgr_info(indexStruct->indproc, &ii->ii_FuncInfo);
}
else
ii->ii_NumIndexAttrs = numKeys;
@@ -1213,7 +1212,7 @@ BuildIndexInfo(HeapTuple indexTuple)
char *predString;
predString = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(&indexStruct->indpred)));
+ PointerGetDatum(&indexStruct->indpred)));
ii->ii_Predicate = stringToNode(predString);
pfree(predString);
}
@@ -1262,8 +1261,8 @@ FormIndexDatum(IndexInfo *indexInfo,
* Functional index --- compute the single index attribute
* ----------------
*/
- FunctionCallInfoData fcinfo;
- bool anynull = false;
+ FunctionCallInfoData fcinfo;
+ bool anynull = false;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.flinfo = &indexInfo->ii_FuncInfo;
@@ -1326,8 +1325,8 @@ LockClassinfoForUpdate(Oid relid, HeapTuple rtup,
Relation relationRelation;
/*
- * NOTE: get and hold RowExclusiveLock on pg_class, because caller will
- * probably modify the rel's pg_class tuple later on.
+ * NOTE: get and hold RowExclusiveLock on pg_class, because caller
+ * will probably modify the rel's pg_class tuple later on.
*/
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
classTuple = SearchSysCache(RELOID, PointerGetDatum(relid),
@@ -1342,7 +1341,7 @@ LockClassinfoForUpdate(Oid relid, HeapTuple rtup,
while (1)
{
- ItemPointerData tidsave;
+ ItemPointerData tidsave;
ItemPointerCopy(&(rtup->t_self), &tidsave);
test = heap_mark4update(relationRelation, rtup, buffer);
@@ -1393,7 +1392,7 @@ IndexesAreActive(Oid relid, bool confirmCommitted)
if (!LockClassinfoForUpdate(relid, &tuple, &buffer, confirmCommitted))
elog(ERROR, "IndexesAreActive couldn't lock %u", relid);
if (((Form_pg_class) GETSTRUCT(&tuple))->relkind != RELKIND_RELATION &&
- ((Form_pg_class) GETSTRUCT(&tuple))->relkind != RELKIND_TOASTVALUE)
+ ((Form_pg_class) GETSTRUCT(&tuple))->relkind != RELKIND_TOASTVALUE)
elog(ERROR, "relation %u isn't an indexable relation", relid);
isactive = ((Form_pg_class) GETSTRUCT(&tuple))->relhasindex;
ReleaseBuffer(buffer);
@@ -1438,7 +1437,7 @@ setRelhasindex(Oid relid, bool hasindex)
if (!IsIgnoringSystemIndexes())
#else
if (!IsIgnoringSystemIndexes() && (!IsReindexProcessing() || pg_class->rd_rel->relhasindex))
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
{
tuple = SearchSysCacheCopy(RELOID,
ObjectIdGetDatum(relid),
@@ -1513,18 +1512,19 @@ setRelhasindex(Oid relid, bool hasindex)
void
setNewRelfilenode(Relation relation)
{
- Relation pg_class, idescs[Num_pg_class_indices];
- Oid newrelfilenode;
+ Relation pg_class,
+ idescs[Num_pg_class_indices];
+ Oid newrelfilenode;
bool in_place_update = false;
- HeapTupleData lockTupleData;
- HeapTuple classTuple = NULL;
+ HeapTupleData lockTupleData;
+ HeapTuple classTuple = NULL;
Buffer buffer;
- RelationData workrel;
-
+ RelationData workrel;
+
Assert(!IsSystemRelationName(NameStr(relation->rd_rel->relname)) || relation->rd_rel->relkind == RELKIND_INDEX);
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
- /* Fetch and lock the classTuple associated with this relation */
+ /* Fetch and lock the classTuple associated with this relation */
if (!LockClassinfoForUpdate(relation->rd_id, &lockTupleData, &buffer, true))
elog(ERROR, "setNewRelfilenode impossible to lock class tuple");
if (IsIgnoringSystemIndexes())
@@ -1567,7 +1567,7 @@ setNewRelfilenode(Relation relation)
if (!in_place_update && pg_class->rd_rel->relhasindex)
{
CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices,
- idescs);
+ idescs);
CatalogIndexInsert(idescs, Num_pg_class_indices, pg_class, classTuple);
CatalogCloseIndices(Num_pg_class_indices, idescs);
}
@@ -1577,7 +1577,8 @@ setNewRelfilenode(Relation relation)
/* Make sure the relfilenode change */
CommandCounterIncrement();
}
-#endif /* OLD_FILE_NAMING */
+
+#endif /* OLD_FILE_NAMING */
/* ----------------
* UpdateStats
@@ -1639,7 +1640,7 @@ UpdateStats(Oid relid, long reltuples)
in_place_upd = (IsReindexProcessing() || IsBootstrapProcessingMode());
#else
in_place_upd = (IsIgnoringSystemIndexes() || IsReindexProcessing());
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
if (!in_place_upd)
{
@@ -1713,9 +1714,10 @@ UpdateStats(Oid relid, long reltuples)
*/
if (in_place_upd)
{
+
/*
* At bootstrap time, we don't need to worry about concurrency or
- * visibility of changes, so we cheat. Also cheat if REINDEX.
+ * visibility of changes, so we cheat. Also cheat if REINDEX.
*/
rd_rel = (Form_pg_class) GETSTRUCT(tuple);
LockBuffer(pg_class_scan->rs_cbuf, BUFFER_LOCK_EXCLUSIVE);
@@ -1777,7 +1779,7 @@ DefaultBuild(Relation heapRelation,
Relation indexRelation,
IndexInfo *indexInfo,
Node *oldPred,
- IndexStrategy indexStrategy) /* not used */
+ IndexStrategy indexStrategy) /* not used */
{
HeapScanDesc scan;
HeapTuple heapTuple;
@@ -1787,9 +1789,11 @@ DefaultBuild(Relation heapRelation,
long reltuples,
indtuples;
Node *predicate = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult insertResult;
@@ -1855,6 +1859,7 @@ DefaultBuild(Relation heapRelation,
reltuples++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -1906,9 +1911,7 @@ DefaultBuild(Relation heapRelation,
#ifndef OMIT_PARTIAL_INDEX
if (predicate != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -1972,7 +1975,7 @@ index_build(Relation heapRelation,
PointerGetDatum(indexRelation),
PointerGetDatum(indexInfo),
PointerGetDatum(oldPred),
- PointerGetDatum(RelationGetIndexStrategy(indexRelation)));
+ PointerGetDatum(RelationGetIndexStrategy(indexRelation)));
else
DefaultBuild(heapRelation,
indexRelation,
@@ -2087,21 +2090,22 @@ reindex_index(Oid indexId, bool force, bool inplace)
#ifndef OLD_FILE_NAMING
if (!inplace)
- {
- inplace = IsSharedSystemRelationName(NameStr(iRel->rd_rel->relname));
+ {
+ inplace = IsSharedSystemRelationName(NameStr(iRel->rd_rel->relname));
if (!inplace)
setNewRelfilenode(iRel);
}
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
/* Obtain exclusive lock on it, just to be sure */
LockRelation(iRel, AccessExclusiveLock);
if (inplace)
{
+
/*
- * Release any buffers associated with this index. If they're dirty,
- * they're just dropped without bothering to flush to disk.
- */
+ * Release any buffers associated with this index. If they're
+ * dirty, they're just dropped without bothering to flush to disk.
+ */
DropRelationBuffers(iRel);
/* Now truncate the actual data and set blocks to zero */
@@ -2115,7 +2119,7 @@ reindex_index(Oid indexId, bool force, bool inplace)
/*
* index_build will close both the heap and index relations (but not
- * give up the locks we hold on them). So we're done.
+ * give up the locks we hold on them). So we're done.
*/
SetReindexProcessing(old);
@@ -2164,31 +2168,37 @@ reindex_relation(Oid relid, bool force)
bool old,
reindexed;
- bool deactivate_needed, overwrite, upd_pg_class_inplace;
+ bool deactivate_needed,
+ overwrite,
+ upd_pg_class_inplace;
+
#ifdef OLD_FILE_NAMING
- overwrite = upd_pg_class_inplace = deactivate_needed = true;
+ overwrite = upd_pg_class_inplace = deactivate_needed = true;
#else
- Relation rel;
- overwrite = upd_pg_class_inplace = deactivate_needed = false;
+ Relation rel;
+
+ overwrite = upd_pg_class_inplace = deactivate_needed = false;
+
/*
- * avoid heap_update() pg_class tuples while processing
- * reindex for pg_class.
- */
+ * avoid heap_update() pg_class tuples while processing reindex for
+ * pg_class.
+ */
if (IsIgnoringSystemIndexes())
upd_pg_class_inplace = true;
+
/*
* ignore the indexes of the target system relation while processing
* reindex.
- */
+ */
rel = RelationIdGetRelation(relid);
if (!IsIgnoringSystemIndexes() && IsSystemRelationName(NameStr(rel->rd_rel->relname)))
deactivate_needed = true;
-#ifndef ENABLE_REINDEX_NAILED_RELATIONS
- /*
- * nailed relations are never updated.
- * We couldn't keep the consistency between the relation
- * descriptors and pg_class tuples.
- */
+#ifndef ENABLE_REINDEX_NAILED_RELATIONS
+
+ /*
+ * nailed relations are never updated. We couldn't keep the
+ * consistency between the relation descriptors and pg_class tuples.
+ */
if (rel->rd_isnailed)
{
if (IsIgnoringSystemIndexes())
@@ -2199,10 +2209,11 @@ reindex_relation(Oid relid, bool force)
else
elog(ERROR, "the target relation %u is nailed", relid);
}
-#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
+#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
+
/*
- * Shared system indexes must be overwritten because it's
- * impossible to update pg_class tuples of all databases.
+ * Shared system indexes must be overwritten because it's impossible
+ * to update pg_class tuples of all databases.
*/
if (IsSharedSystemRelationName(NameStr(rel->rd_rel->relname)))
{
@@ -2215,7 +2226,7 @@ reindex_relation(Oid relid, bool force)
elog(ERROR, "the target relation %u is shared", relid);
}
RelationClose(rel);
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
old = SetReindexProcessing(true);
if (deactivate_needed)
{
@@ -2252,24 +2263,27 @@ reindex_relation(Oid relid, bool force)
heap_endscan(scan);
heap_close(indexRelation, AccessShareLock);
if (reindexed)
- /*
- * Ok,we could use the reindexed indexes of the target
- * system relation now.
- */
- {
+
+ /*
+ * Ok,we could use the reindexed indexes of the target system
+ * relation now.
+ */
+ {
if (deactivate_needed)
{
if (!overwrite && relid == RelOid_pg_class)
{
- /*
- * For pg_class, relhasindex should be set
- * to true here in place.
+
+ /*
+ * For pg_class, relhasindex should be set to true here in
+ * place.
*/
setRelhasindex(relid, true);
CommandCounterIncrement();
- /*
- * However the following setRelhasindex()
- * is needed to keep consistency with WAL.
+
+ /*
+ * However the following setRelhasindex() is needed to
+ * keep consistency with WAL.
*/
}
setRelhasindex(relid, true);
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index 51337004cbe..8261e9dcfcb 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.76 2001/01/24 19:42:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.77 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -124,7 +124,7 @@ CatalogCloseIndices(int nIndices, Relation *idescs)
* NOTE: since this routine looks up all the pg_index data on each call,
* it's relatively inefficient for inserting a large number of tuples into
* the same catalog. We use it only for inserting one or a few tuples
- * in a given command. See ExecOpenIndices() and related routines if you
+ * in a given command. See ExecOpenIndices() and related routines if you
* are inserting tuples in bulk.
*
* NOTE: we do not bother to handle partial indices. Nor do we try to
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index e9a0450a7a1..7157ffb2c98 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.37 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.38 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,7 +58,7 @@ AggregateCreate(char *aggName,
Datum values[Natts_pg_aggregate];
Form_pg_proc proc;
Oid transfn;
- Oid finalfn = InvalidOid; /* can be omitted */
+ Oid finalfn = InvalidOid; /* can be omitted */
Oid basetype;
Oid transtype;
Oid finaltype;
@@ -79,8 +79,8 @@ AggregateCreate(char *aggName,
/*
* Handle the aggregate's base type (input data type). This can be
- * specified as 'ANY' for a data-independent transition function,
- * such as COUNT(*).
+ * specified as 'ANY' for a data-independent transition function, such
+ * as COUNT(*).
*/
basetype = GetSysCacheOid(TYPENAME,
PointerGetDatum(aggbasetypeName),
@@ -118,9 +118,7 @@ AggregateCreate(char *aggName,
nargs = 2;
}
else
- {
nargs = 1;
- }
tup = SearchSysCache(PROCNAME,
PointerGetDatum(aggtransfnName),
Int32GetDatum(nargs),
@@ -134,16 +132,17 @@ AggregateCreate(char *aggName,
if (proc->prorettype != transtype)
elog(ERROR, "AggregateCreate: return type of '%s' is not '%s'",
aggtransfnName, aggtranstypeName);
+
/*
- * If the transfn is strict and the initval is NULL, make sure
- * input type and transtype are the same (or at least binary-
- * compatible), so that it's OK to use the first input value
- * as the initial transValue.
+ * If the transfn is strict and the initval is NULL, make sure input
+ * type and transtype are the same (or at least binary- compatible),
+ * so that it's OK to use the first input value as the initial
+ * transValue.
*/
if (proc->proisstrict && agginitval == NULL)
{
if (basetype != transtype &&
- ! IS_BINARY_COMPATIBLE(basetype, transtype))
+ !IS_BINARY_COMPATIBLE(basetype, transtype))
elog(ERROR, "AggregateCreate: must not omit initval when transfn is strict and transtype is not compatible with input type");
}
ReleaseSysCache(tup);
@@ -168,6 +167,7 @@ AggregateCreate(char *aggName,
}
else
{
+
/*
* If no finalfn, aggregate result type is type of the state value
*/
diff --git a/src/backend/catalog/pg_largeobject.c b/src/backend/catalog/pg_largeobject.c
index 688b96ed84b..2becb34929f 100644
--- a/src/backend/catalog/pg_largeobject.c
+++ b/src/backend/catalog/pg_largeobject.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.7 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.8 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ LargeObjectCreate(Oid loid)
*/
for (i = 0; i < Natts_pg_largeobject; i++)
{
- values[i] = (Datum)NULL;
+ values[i] = (Datum) NULL;
nulls[i] = ' ';
}
@@ -60,7 +60,7 @@ LargeObjectCreate(Oid loid)
values[i++] = Int32GetDatum(0);
values[i++] = DirectFunctionCall1(byteain,
CStringGetDatum(""));
-
+
ntup = heap_formtuple(pg_largeobject->rd_att, values, nulls);
/*
@@ -77,7 +77,7 @@ LargeObjectCreate(Oid loid)
CatalogIndexInsert(idescs, Num_pg_largeobject_indices, pg_largeobject, ntup);
CatalogCloseIndices(Num_pg_largeobject_indices, idescs);
}
-
+
heap_close(pg_largeobject, RowExclusiveLock);
heap_freetuple(ntup);
@@ -91,9 +91,9 @@ LargeObjectDrop(Oid loid)
bool found = false;
Relation pg_largeobject;
Relation pg_lo_idx;
- ScanKeyData skey[1];
+ ScanKeyData skey[1];
IndexScanDesc sd;
- RetrieveIndexResult indexRes;
+ RetrieveIndexResult indexRes;
HeapTupleData tuple;
Buffer buffer;
@@ -139,9 +139,9 @@ LargeObjectExists(Oid loid)
bool retval = false;
Relation pg_largeobject;
Relation pg_lo_idx;
- ScanKeyData skey[1];
+ ScanKeyData skey[1];
IndexScanDesc sd;
- RetrieveIndexResult indexRes;
+ RetrieveIndexResult indexRes;
HeapTupleData tuple;
Buffer buffer;
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index d9834783414..25ecf12f3b6 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.55 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.56 2001/03/22 03:59:20 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -263,7 +263,7 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
values[i++] = NameGetDatum(&oname);
values[i++] = Int32GetDatum(GetUserId());
values[i++] = UInt16GetDatum(0);
- values[i++] = CharGetDatum('b'); /* assume it's binary */
+ values[i++] = CharGetDatum('b'); /* assume it's binary */
values[i++] = BoolGetDatum(false);
values[i++] = BoolGetDatum(false);
values[i++] = ObjectIdGetDatum(leftObjectId); /* <-- left oid */
@@ -595,7 +595,7 @@ OperatorDef(char *operatorName,
*/
if (restrictionName)
{ /* optional */
- Oid restOid;
+ Oid restOid;
MemSet(typeId, 0, FUNC_MAX_ARGS * sizeof(Oid));
typeId[0] = OIDOID; /* operator OID */
@@ -623,7 +623,7 @@ OperatorDef(char *operatorName,
*/
if (joinName)
{ /* optional */
- Oid joinOid;
+ Oid joinOid;
MemSet(typeId, 0, FUNC_MAX_ARGS * sizeof(Oid));
typeId[0] = OIDOID; /* operator OID */
@@ -745,7 +745,7 @@ OperatorDef(char *operatorName,
otherRightTypeName);
if (!OidIsValid(other_oid))
elog(ERROR,
- "OperatorDef: can't create operator shell \"%s\"",
+ "OperatorDef: can't create operator shell \"%s\"",
name[j]);
values[i++] = ObjectIdGetDatum(other_oid);
}
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index e9918baebcd..0872eb6e977 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.53 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.54 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -156,7 +156,7 @@ ProcedureCreate(char *procedureName,
text *prosrctext;
prosrctext = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(prosrc)));
+ CStringGetDatum(prosrc)));
retval = GetSysCacheOid(PROSRC,
PointerGetDatum(prosrctext),
0, 0, 0);
@@ -237,18 +237,18 @@ ProcedureCreate(char *procedureName,
prosrc = procedureName;
if (fmgr_internal_function(prosrc) == InvalidOid)
elog(ERROR,
- "ProcedureCreate: there is no builtin function named \"%s\"",
+ "ProcedureCreate: there is no builtin function named \"%s\"",
prosrc);
}
/*
* If this is a dynamically loadable procedure, make sure that the
* library file exists, is loadable, and contains the specified link
- * symbol. Also check for a valid function information record.
+ * symbol. Also check for a valid function information record.
*
* We used to perform these checks only when the function was first
- * called, but it seems friendlier to verify the library's validity
- * at CREATE FUNCTION time.
+ * called, but it seems friendlier to verify the library's validity at
+ * CREATE FUNCTION time.
*/
if (languageObjectId == ClanguageId)
@@ -355,7 +355,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlist = parse->targetList;
/*
- * The last query must be a SELECT if and only if there is a return type.
+ * The last query must be a SELECT if and only if there is a return
+ * type.
*/
if (rettype == InvalidOid)
{
@@ -375,8 +376,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlistlen = ExecCleanTargetListLength(tlist);
/*
- * For base-type returns, the target list should have exactly one entry,
- * and its type should agree with what the user declared.
+ * For base-type returns, the target list should have exactly one
+ * entry, and its type should agree with what the user declared.
*/
typerelid = typeidTypeRelid(rettype);
if (typerelid == InvalidOid)
@@ -388,7 +389,7 @@ checkretval(Oid rettype, List *queryTreeList)
resnode = (Resdom *) ((TargetEntry *) lfirst(tlist))->resdom;
if (resnode->restype != rettype)
elog(ERROR, "return type mismatch in function: declared to return %s, returns %s",
- typeidTypeName(rettype), typeidTypeName(resnode->restype));
+ typeidTypeName(rettype), typeidTypeName(resnode->restype));
return;
}
@@ -397,8 +398,8 @@ checkretval(Oid rettype, List *queryTreeList)
* If the target list is of length 1, and the type of the varnode in
* the target list is the same as the declared return type, this is
* okay. This can happen, for example, where the body of the function
- * is 'SELECT (x = func2())', where func2 has the same return type
- * as the function that's calling it.
+ * is 'SELECT (x = func2())', where func2 has the same return type as
+ * the function that's calling it.
*/
if (tlistlen == 1)
{
@@ -408,10 +409,10 @@ checkretval(Oid rettype, List *queryTreeList)
}
/*
- * By here, the procedure returns a tuple or set of tuples. This part of
- * the typechecking is a hack. We look up the relation that is the
- * declared return type, and be sure that attributes 1 .. n in the target
- * list match the declared types.
+ * By here, the procedure returns a tuple or set of tuples. This part
+ * of the typechecking is a hack. We look up the relation that is the
+ * declared return type, and be sure that attributes 1 .. n in the
+ * target list match the declared types.
*/
reln = heap_open(typerelid, AccessShareLock);
relid = reln->rd_id;
@@ -436,7 +437,7 @@ checkretval(Oid rettype, List *queryTreeList)
typeidTypeName(rettype),
typeidTypeName(tletype),
typeidTypeName(reln->rd_att->attrs[i]->atttypid),
- i+1);
+ i + 1);
i++;
}
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 714ea737aec..0fbadb55b2d 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.59 2001/02/12 20:07:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.60 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -171,24 +171,24 @@ TypeShellMakeWithOpenRelation(Relation pg_type_desc, char *typeName)
*/
i = 0;
namestrcpy(&name, typeName);
- values[i++] = NameGetDatum(&name); /* 1 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 2 */
- values[i++] = Int16GetDatum(0); /* 3 */
- values[i++] = Int16GetDatum(0); /* 4 */
- values[i++] = BoolGetDatum(false); /* 5 */
- values[i++] = CharGetDatum(0); /* 6 */
- values[i++] = BoolGetDatum(false); /* 7 */
- values[i++] = CharGetDatum(0); /* 8 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 9 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 10 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 11 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 12 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 13 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 14 */
- values[i++] = CharGetDatum('i'); /* 15 */
- values[i++] = CharGetDatum('p'); /* 16 */
+ values[i++] = NameGetDatum(&name); /* 1 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 2 */
+ values[i++] = Int16GetDatum(0); /* 3 */
+ values[i++] = Int16GetDatum(0); /* 4 */
+ values[i++] = BoolGetDatum(false); /* 5 */
+ values[i++] = CharGetDatum(0); /* 6 */
+ values[i++] = BoolGetDatum(false); /* 7 */
+ values[i++] = CharGetDatum(0); /* 8 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 9 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 10 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 11 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 12 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 13 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 14 */
+ values[i++] = CharGetDatum('i'); /* 15 */
+ values[i++] = CharGetDatum('p'); /* 16 */
values[i++] = DirectFunctionCall1(textin,
- CStringGetDatum(typeName)); /* 17 */
+ CStringGetDatum(typeName)); /* 17 */
/* ----------------
* create a new type tuple with FormHeapTuple
@@ -368,16 +368,16 @@ TypeCreate(char *typeName,
*/
i = 0;
namestrcpy(&name, typeName);
- values[i++] = NameGetDatum(&name); /* 1 */
+ values[i++] = NameGetDatum(&name); /* 1 */
values[i++] = Int32GetDatum(GetUserId()); /* 2 */
values[i++] = Int16GetDatum(internalSize); /* 3 */
values[i++] = Int16GetDatum(externalSize); /* 4 */
values[i++] = BoolGetDatum(passedByValue); /* 5 */
values[i++] = CharGetDatum(typeType); /* 6 */
- values[i++] = BoolGetDatum(true); /* 7 */
+ values[i++] = BoolGetDatum(true); /* 7 */
values[i++] = CharGetDatum(typDelim); /* 8 */
values[i++] = ObjectIdGetDatum(typeType == 'c' ? relationOid : InvalidOid); /* 9 */
- values[i++] = ObjectIdGetDatum(elementObjectId); /* 10 */
+ values[i++] = ObjectIdGetDatum(elementObjectId); /* 10 */
procs[0] = inputProcedure;
procs[1] = outputProcedure;
@@ -386,7 +386,7 @@ TypeCreate(char *typeName,
for (j = 0; j < 4; ++j)
{
- Oid procOid;
+ Oid procOid;
procname = procs[j];
@@ -438,27 +438,27 @@ TypeCreate(char *typeName,
func_error("TypeCreate", procname, 1, argList, NULL);
}
- values[i++] = ObjectIdGetDatum(procOid); /* 11 - 14 */
+ values[i++] = ObjectIdGetDatum(procOid); /* 11 - 14 */
}
/* ----------------
* set default alignment
* ----------------
*/
- values[i++] = CharGetDatum(alignment); /* 15 */
+ values[i++] = CharGetDatum(alignment); /* 15 */
/* ----------------
* set default storage for TOAST
* ----------------
*/
- values[i++] = CharGetDatum(storage); /* 16 */
+ values[i++] = CharGetDatum(storage); /* 16 */
/* ----------------
* initialize the default value for this type.
* ----------------
*/
- values[i] = DirectFunctionCall1(textin, /* 17 */
- CStringGetDatum(defaultTypeValue ? defaultTypeValue : "-"));
+ values[i] = DirectFunctionCall1(textin, /* 17 */
+ CStringGetDatum(defaultTypeValue ? defaultTypeValue : "-"));
/* ----------------
* open pg_type and begin a scan for the type name.
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 4db0068da82..f4e056bd0a7 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.14 2001/02/16 03:16:58 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.15 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,9 +86,10 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
CommitTransactionCommand();
return;
}
+
/*
- * We can VACUUM ANALYZE any table except pg_statistic.
- * see update_relstats
+ * We can VACUUM ANALYZE any table except pg_statistic. see
+ * update_relstats
*/
if (strcmp(NameStr(((Form_pg_class) GETSTRUCT(tuple))->relname),
StatisticRelationName) == 0)
@@ -104,10 +105,12 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
if (!pg_ownercheck(GetUserId(), RelationGetRelationName(onerel),
RELNAME))
{
- /* we already did an elog during vacuum
- elog(NOTICE, "Skipping \"%s\" --- only table owner can VACUUM it",
- RelationGetRelationName(onerel));
- */
+
+ /*
+ * we already did an elog during vacuum elog(NOTICE, "Skipping
+ * \"%s\" --- only table owner can VACUUM it",
+ * RelationGetRelationName(onerel));
+ */
heap_close(onerel, NoLock);
CommitTransactionCommand();
return;
@@ -136,7 +139,7 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
if (namestrcmp(&(attr[i]->attname), col) == 0)
break;
}
- if (i < attr_cnt) /* found */
+ if (i < attr_cnt) /* found */
attnums[tcnt++] = i;
else
{
@@ -295,15 +298,16 @@ attr_stats(Relation onerel, int attr_cnt, VacAttrStats *vacattrstats, HeapTuple
stats->nonnull_cnt++;
/*
- * If the value is toasted, detoast it to avoid repeated detoastings
- * and resultant memory leakage inside the comparison routines.
+ * If the value is toasted, detoast it to avoid repeated
+ * detoastings and resultant memory leakage inside the comparison
+ * routines.
*/
if (!stats->attr->attbyval && stats->attr->attlen == -1)
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
else
value = origvalue;
- if (! stats->initialized)
+ if (!stats->initialized)
{
bucketcpy(stats->attr, value, &stats->best, &stats->best_len);
/* best_cnt gets incremented below */
@@ -433,7 +437,7 @@ bucketcpy(Form_pg_attribute attr, Datum value, Datum *bucket, int *bucket_len)
* Of course, this only works for fixed-size never-null columns, but
* dispersion is.
*
- * pg_statistic rows are just added normally. This means that
+ * pg_statistic rows are just added normally. This means that
* pg_statistic will probably contain some deleted rows at the
* completion of a vacuum cycle, unless it happens to get vacuumed last.
*
@@ -467,7 +471,7 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
VacAttrStats *stats;
attp = (Form_pg_attribute) GETSTRUCT(atup);
- if (attp->attnum <= 0) /* skip system attributes for now */
+ if (attp->attnum <= 0) /* skip system attributes for now */
continue;
for (i = 0; i < natts; i++)
@@ -476,47 +480,45 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
break;
}
if (i >= natts)
- continue; /* skip attr if no stats collected */
+ continue; /* skip attr if no stats collected */
stats = &(vacattrstats[i]);
if (VacAttrStatsEqValid(stats))
{
- float4 selratio; /* average ratio of rows selected
- * for a random constant */
+ float4 selratio; /* average ratio of rows selected
+ * for a random constant */
/* Compute dispersion */
if (stats->nonnull_cnt == 0 && stats->null_cnt == 0)
{
/*
- * empty relation, so put a dummy value in
- * attdispersion
+ * empty relation, so put a dummy value in attdispersion
*/
selratio = 0;
}
else if (stats->null_cnt <= 1 && stats->best_cnt == 1)
{
+
/*
- * looks like we have a unique-key attribute --- flag
- * this with special -1.0 flag value.
+ * looks like we have a unique-key attribute --- flag this
+ * with special -1.0 flag value.
*
- * The correct dispersion is 1.0/numberOfRows, but since
- * the relation row count can get updated without
- * recomputing dispersion, we want to store a
- * "symbolic" value and figure 1.0/numberOfRows on the
- * fly.
+ * The correct dispersion is 1.0/numberOfRows, but since the
+ * relation row count can get updated without recomputing
+ * dispersion, we want to store a "symbolic" value and
+ * figure 1.0/numberOfRows on the fly.
*/
selratio = -1;
}
else
{
if (VacAttrStatsLtGtValid(stats) &&
- stats->min_cnt + stats->max_cnt == stats->nonnull_cnt)
+ stats->min_cnt + stats->max_cnt == stats->nonnull_cnt)
{
/*
- * exact result when there are just 1 or 2
- * values...
+ * exact result when there are just 1 or 2 values...
*/
double min_cnt_d = stats->min_cnt,
max_cnt_d = stats->max_cnt,
@@ -552,12 +554,12 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
/*
* Create pg_statistic tuples for the relation, if we have
- * gathered the right data. del_stats() previously
- * deleted all the pg_statistic tuples for the rel, so we
- * just have to insert new ones here.
+ * gathered the right data. del_stats() previously deleted
+ * all the pg_statistic tuples for the rel, so we just have to
+ * insert new ones here.
*
- * Note analyze_rel() has seen to it that we won't come here
- * when vacuuming pg_statistic itself.
+ * Note analyze_rel() has seen to it that we won't come here when
+ * vacuuming pg_statistic itself.
*/
if (VacAttrStatsLtGtValid(stats) && stats->initialized)
{
@@ -567,7 +569,7 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
char *out_string;
double best_cnt_d = stats->best_cnt,
null_cnt_d = stats->null_cnt,
- nonnull_cnt_d = stats->nonnull_cnt; /* prevent overflow */
+ nonnull_cnt_d = stats->nonnull_cnt; /* prevent overflow */
Datum values[Natts_pg_statistic];
char nulls[Natts_pg_statistic];
Relation irelations[Num_pg_statistic_indices];
@@ -585,31 +587,31 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
* ----------------
*/
i = 0;
- values[i++] = ObjectIdGetDatum(relid); /* starelid */
- values[i++] = Int16GetDatum(attp->attnum); /* staattnum */
- values[i++] = ObjectIdGetDatum(stats->op_cmplt); /* staop */
- values[i++] = Float4GetDatum(nullratio); /* stanullfrac */
- values[i++] = Float4GetDatum(bestratio); /* stacommonfrac */
+ values[i++] = ObjectIdGetDatum(relid); /* starelid */
+ values[i++] = Int16GetDatum(attp->attnum); /* staattnum */
+ values[i++] = ObjectIdGetDatum(stats->op_cmplt); /* staop */
+ values[i++] = Float4GetDatum(nullratio); /* stanullfrac */
+ values[i++] = Float4GetDatum(bestratio); /* stacommonfrac */
out_string = DatumGetCString(FunctionCall3(&out_function,
- stats->best,
- ObjectIdGetDatum(stats->typelem),
- Int32GetDatum(stats->attr->atttypmod)));
- values[i++] = DirectFunctionCall1(textin, /* stacommonval */
- CStringGetDatum(out_string));
+ stats->best,
+ ObjectIdGetDatum(stats->typelem),
+ Int32GetDatum(stats->attr->atttypmod)));
+ values[i++] = DirectFunctionCall1(textin, /* stacommonval */
+ CStringGetDatum(out_string));
pfree(out_string);
out_string = DatumGetCString(FunctionCall3(&out_function,
- stats->min,
- ObjectIdGetDatum(stats->typelem),
- Int32GetDatum(stats->attr->atttypmod)));
- values[i++] = DirectFunctionCall1(textin, /* staloval */
- CStringGetDatum(out_string));
+ stats->min,
+ ObjectIdGetDatum(stats->typelem),
+ Int32GetDatum(stats->attr->atttypmod)));
+ values[i++] = DirectFunctionCall1(textin, /* staloval */
+ CStringGetDatum(out_string));
pfree(out_string);
out_string = DatumGetCString(FunctionCall3(&out_function,
- stats->max,
- ObjectIdGetDatum(stats->typelem),
- Int32GetDatum(stats->attr->atttypmod)));
- values[i++] = DirectFunctionCall1(textin, /* stahival */
- CStringGetDatum(out_string));
+ stats->max,
+ ObjectIdGetDatum(stats->typelem),
+ Int32GetDatum(stats->attr->atttypmod)));
+ values[i++] = DirectFunctionCall1(textin, /* stahival */
+ CStringGetDatum(out_string));
pfree(out_string);
stup = heap_formtuple(sd->rd_att, values, nulls);
@@ -682,6 +684,3 @@ del_stats(Oid relid, int attcnt, int *attnums)
*/
heap_close(pgstatistic, NoLock);
}
-
-
-
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 134f3b7af0e..1eb29dcc99a 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.76 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.77 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -130,7 +130,7 @@ static void NotifyMyFrontEnd(char *relname, int32 listenerPID);
static int AsyncExistsPendingNotify(char *relname);
static void ClearPendingNotifies(void);
-bool Trace_notify = false;
+bool Trace_notify = false;
/*
@@ -161,6 +161,7 @@ Async_Notify(char *relname)
/* no point in making duplicate entries in the list ... */
if (!AsyncExistsPendingNotify(relname))
{
+
/*
* We allocate list memory from the global malloc pool to ensure
* that it will live until we want to use it. This is probably
@@ -349,9 +350,7 @@ Async_UnlistenAll()
sRel = heap_beginscan(lRel, 0, SnapshotNow, 1, key);
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0)))
- {
simple_heap_delete(lRel, &lTuple->t_self);
- }
heap_endscan(sRel);
heap_close(lRel, AccessExclusiveLock);
@@ -499,6 +498,7 @@ AtCommit_Notify()
*/
if (kill(listenerPID, SIGUSR2) < 0)
{
+
/*
* Get rid of pg_listener entry if it refers to a PID
* that no longer exists. Presumably, that backend
@@ -794,7 +794,7 @@ ProcessIncomingNotify(void)
if (Trace_notify)
elog(DEBUG, "ProcessIncomingNotify: received %s from %d",
- relname, (int) sourcePID);
+ relname, (int) sourcePID);
NotifyMyFrontEnd(relname, sourcePID);
/* Rewrite the tuple with 0 in notification column */
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 76f805ca86c..826407c8eb6 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.64 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.65 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@
#include "utils/temprel.h"
-static Oid copy_heap(Oid OIDOldHeap, char *NewName, bool istemp);
+static Oid copy_heap(Oid OIDOldHeap, char *NewName, bool istemp);
static void copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName);
static void rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
@@ -75,8 +75,8 @@ cluster(char *oldrelname, char *oldindexname)
StrNCpy(saveoldindexname, oldindexname, NAMEDATALEN);
/*
- * We grab exclusive access to the target rel and index for the duration
- * of the transaction.
+ * We grab exclusive access to the target rel and index for the
+ * duration of the transaction.
*/
OldHeap = heap_openr(saveoldrelname, AccessExclusiveLock);
OIDOldHeap = RelationGetRelid(OldHeap);
@@ -154,8 +154,8 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
OldHeapDesc = RelationGetDescr(OldHeap);
/*
- * Need to make a copy of the tuple descriptor,
- * since heap_create_with_catalog modifies it.
+ * Need to make a copy of the tuple descriptor, since
+ * heap_create_with_catalog modifies it.
*/
tupdesc = CreateTupleDescCopyConstr(OldHeapDesc);
@@ -164,16 +164,15 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
allowSystemTableMods);
/*
- * Advance command counter so that the newly-created
- * relation's catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's
+ * catalog tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the new relation.
- * Note that AlterTableCreateToastTable ends with
- * CommandCounterIncrement(), so that the TOAST table will
- * be visible for insertion.
+ * If necessary, create a TOAST table for the new relation. Note that
+ * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
+ * that the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(NewName, true);
@@ -198,12 +197,12 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName)
/*
* Create a new index like the old one. To do this I get the info
- * from pg_index, and add a new index with a temporary name (that
- * will be changed later).
+ * from pg_index, and add a new index with a temporary name (that will
+ * be changed later).
*
- * NOTE: index_create will cause the new index to be a temp relation
- * if its parent table is, so we don't need to do anything special
- * for the temp-table case here.
+ * NOTE: index_create will cause the new index to be a temp relation if
+ * its parent table is, so we don't need to do anything special for
+ * the temp-table case here.
*/
Old_pg_index_Tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(OIDOldIndex),
@@ -214,7 +213,7 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName)
indexInfo = BuildIndexInfo(Old_pg_index_Tuple);
Old_pg_index_relation_Tuple = SearchSysCache(RELOID,
- ObjectIdGetDatum(OIDOldIndex),
+ ObjectIdGetDatum(OIDOldIndex),
0, 0, 0);
Assert(Old_pg_index_relation_Tuple);
Old_pg_index_relation_Form = (Form_pg_class) GETSTRUCT(Old_pg_index_relation_Tuple);
@@ -266,13 +265,15 @@ rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
LocalHeapTuple.t_datamcxt = NULL;
LocalHeapTuple.t_data = NULL;
heap_fetch(LocalOldHeap, SnapshotNow, &LocalHeapTuple, &LocalBuffer);
- if (LocalHeapTuple.t_data != NULL) {
+ if (LocalHeapTuple.t_data != NULL)
+ {
+
/*
* We must copy the tuple because heap_insert() will overwrite
* the commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus,
- * the source relation would get trashed, which is bad news
- * if we abort later on. (This was a bug in releases thru 7.0)
+ * the source relation would get trashed, which is bad news if
+ * we abort later on. (This was a bug in releases thru 7.0)
*/
HeapTuple copiedTuple = heap_copytuple(&LocalHeapTuple);
diff --git a/src/backend/commands/command.c b/src/backend/commands/command.c
index 8a3be15a052..49d1edf4c4b 100644
--- a/src/backend/commands/command.c
+++ b/src/backend/commands/command.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.122 2001/02/27 22:07:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.123 2001/03/22 03:59:21 momjian Exp $
*
* NOTES
* The PerformAddAttribute() code, like most of the relation
@@ -173,29 +173,29 @@ PerformPortalFetch(char *name,
* at the end of the available tuples in that direction. If so, do
* nothing. (This check exists because not all plan node types are
* robust about being called again if they've already returned NULL
- * once.) If it's OK to do the fetch, call the executor. Then,
+ * once.) If it's OK to do the fetch, call the executor. Then,
* update the atStart/atEnd state depending on the number of tuples
* that were retrieved.
* ----------------
*/
if (forward)
{
- if (! portal->atEnd)
+ if (!portal->atEnd)
{
ExecutorRun(queryDesc, estate, EXEC_FOR, (long) count);
if (estate->es_processed > 0)
- portal->atStart = false; /* OK to back up now */
+ portal->atStart = false; /* OK to back up now */
if (count <= 0 || (int) estate->es_processed < count)
- portal->atEnd = true; /* we retrieved 'em all */
+ portal->atEnd = true; /* we retrieved 'em all */
}
}
else
{
- if (! portal->atStart)
+ if (!portal->atStart)
{
ExecutorRun(queryDesc, estate, EXEC_BACK, (long) count);
if (estate->es_processed > 0)
- portal->atEnd = false; /* OK to go forward now */
+ portal->atEnd = false; /* OK to go forward now */
if (count <= 0 || (int) estate->es_processed < count)
portal->atStart = true; /* we retrieved 'em all */
}
@@ -502,8 +502,8 @@ AlterTableAddColumn(const char *relationName,
heap_close(rel, NoLock);
/*
- * Automatically create the secondary relation for TOAST
- * if it formerly had no such but now has toastable attributes.
+ * Automatically create the secondary relation for TOAST if it
+ * formerly had no such but now has toastable attributes.
*/
CommandCounterIncrement();
AlterTableCreateToastTable(relationName, true);
@@ -842,7 +842,7 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
relcheck = (Form_pg_relcheck) GETSTRUCT(htup);
ccbin = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(&relcheck->rcbin)));
+ PointerGetDatum(&relcheck->rcbin)));
node = stringToNode(ccbin);
pfree(ccbin);
if (find_attribute_in_node(node, attnum))
@@ -890,7 +890,7 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
else
{
htup = SearchSysCache(RELOID,
- ObjectIdGetDatum(index->indexrelid),
+ ObjectIdGetDatum(index->indexrelid),
0, 0, 0);
RemoveIndex(NameStr(((Form_pg_class) GETSTRUCT(htup))->relname));
ReleaseSysCache(htup);
@@ -1106,339 +1106,361 @@ AlterTableAddConstraint(char *relationName,
#endif
/* Disallow ADD CONSTRAINT on views, indexes, sequences, etc */
- if (! is_relation(relationName))
+ if (!is_relation(relationName))
elog(ERROR, "ALTER TABLE ADD CONSTRAINT: %s is not a table",
relationName);
switch (nodeTag(newConstraint))
{
case T_Constraint:
- {
- Constraint *constr = (Constraint *) newConstraint;
-
- switch (constr->contype)
{
- case CONSTR_CHECK:
+ Constraint *constr = (Constraint *) newConstraint;
+
+ switch (constr->contype)
{
- ParseState *pstate;
- bool successful = true;
- HeapScanDesc scan;
- ExprContext *econtext;
- TupleTableSlot *slot;
- HeapTuple tuple;
- RangeTblEntry *rte;
- List *qual;
- List *constlist;
- Relation rel;
- Node *expr;
- char *name;
-
- if (constr->name)
- name = constr->name;
- else
- name = "<unnamed>";
-
- constlist = makeList1(constr);
-
- rel = heap_openr(relationName, AccessExclusiveLock);
-
- /* make sure it is not a view */
- if (rel->rd_rel->relkind == RELKIND_VIEW)
- elog(ERROR, "ALTER TABLE: cannot add constraint to a view");
-
- /*
- * Scan all of the rows, looking for a false match
- */
- scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
- AssertState(scan != NULL);
-
- /*
- * We need to make a parse state and range table to allow
- * us to transformExpr and fix_opids to get a version of
- * the expression we can pass to ExecQual
- */
- pstate = make_parsestate(NULL);
- rte = addRangeTableEntry(pstate, relationName, NULL,
- false, true);
- addRTEtoQuery(pstate, rte, true, true);
-
- /* Convert the A_EXPR in raw_expr into an EXPR */
- expr = transformExpr(pstate, constr->raw_expr,
- EXPR_COLUMN_FIRST);
-
- /*
- * Make sure it yields a boolean result.
- */
- if (exprType(expr) != BOOLOID)
- elog(ERROR, "CHECK '%s' does not yield boolean result",
- name);
-
- /*
- * Make sure no outside relations are referred to.
- */
- if (length(pstate->p_rtable) != 1)
- elog(ERROR, "Only relation '%s' can be referenced in CHECK",
- relationName);
-
- /*
- * Might as well try to reduce any constant expressions.
- */
- expr = eval_const_expressions(expr);
-
- /* And fix the opids */
- fix_opids(expr);
-
- qual = makeList1(expr);
-
- /* Make tuple slot to hold tuples */
- slot = MakeTupleTableSlot();
- ExecSetSlotDescriptor(slot, RelationGetDescr(rel), false);
- /* Make an expression context for ExecQual */
- econtext = MakeExprContext(slot, CurrentMemoryContext);
-
- /*
- * Scan through the rows now, checking the expression
- * at each row.
- */
- while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
- if (!ExecQual(qual, econtext, true))
+ case CONSTR_CHECK:
{
- successful=false;
- break;
- }
- ResetExprContext(econtext);
- }
+ ParseState *pstate;
+ bool successful = true;
+ HeapScanDesc scan;
+ ExprContext *econtext;
+ TupleTableSlot *slot;
+ HeapTuple tuple;
+ RangeTblEntry *rte;
+ List *qual;
+ List *constlist;
+ Relation rel;
+ Node *expr;
+ char *name;
+
+ if (constr->name)
+ name = constr->name;
+ else
+ name = "<unnamed>";
+
+ constlist = makeList1(constr);
+
+ rel = heap_openr(relationName, AccessExclusiveLock);
+
+ /* make sure it is not a view */
+ if (rel->rd_rel->relkind == RELKIND_VIEW)
+ elog(ERROR, "ALTER TABLE: cannot add constraint to a view");
+
+ /*
+ * Scan all of the rows, looking for a false
+ * match
+ */
+ scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
+ AssertState(scan != NULL);
+
+ /*
+ * We need to make a parse state and range
+ * table to allow us to transformExpr and
+ * fix_opids to get a version of the
+ * expression we can pass to ExecQual
+ */
+ pstate = make_parsestate(NULL);
+ rte = addRangeTableEntry(pstate, relationName, NULL,
+ false, true);
+ addRTEtoQuery(pstate, rte, true, true);
+
+ /* Convert the A_EXPR in raw_expr into an EXPR */
+ expr = transformExpr(pstate, constr->raw_expr,
+ EXPR_COLUMN_FIRST);
+
+ /*
+ * Make sure it yields a boolean result.
+ */
+ if (exprType(expr) != BOOLOID)
+ elog(ERROR, "CHECK '%s' does not yield boolean result",
+ name);
+
+ /*
+ * Make sure no outside relations are referred
+ * to.
+ */
+ if (length(pstate->p_rtable) != 1)
+ elog(ERROR, "Only relation '%s' can be referenced in CHECK",
+ relationName);
+
+ /*
+ * Might as well try to reduce any constant
+ * expressions.
+ */
+ expr = eval_const_expressions(expr);
+
+ /* And fix the opids */
+ fix_opids(expr);
+
+ qual = makeList1(expr);
+
+ /* Make tuple slot to hold tuples */
+ slot = MakeTupleTableSlot();
+ ExecSetSlotDescriptor(slot, RelationGetDescr(rel), false);
+ /* Make an expression context for ExecQual */
+ econtext = MakeExprContext(slot, CurrentMemoryContext);
+
+ /*
+ * Scan through the rows now, checking the
+ * expression at each row.
+ */
+ while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
+ {
+ ExecStoreTuple(tuple, slot, InvalidBuffer, false);
+ if (!ExecQual(qual, econtext, true))
+ {
+ successful = false;
+ break;
+ }
+ ResetExprContext(econtext);
+ }
- FreeExprContext(econtext);
- pfree(slot);
+ FreeExprContext(econtext);
+ pfree(slot);
- heap_endscan(scan);
+ heap_endscan(scan);
- if (!successful)
- {
- heap_close(rel, NoLock);
- elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
- }
- /*
- * Call AddRelationRawConstraints to do the real adding --
- * It duplicates some of the above, but does not check the
- * validity of the constraint against tuples already in
- * the table.
- */
- AddRelationRawConstraints(rel, NIL, constlist);
- heap_close(rel, NoLock);
- pfree(constlist);
-
- break;
+ if (!successful)
+ {
+ heap_close(rel, NoLock);
+ elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
+ }
+
+ /*
+ * Call AddRelationRawConstraints to do the
+ * real adding -- It duplicates some of the
+ * above, but does not check the validity of
+ * the constraint against tuples already in
+ * the table.
+ */
+ AddRelationRawConstraints(rel, NIL, constlist);
+ heap_close(rel, NoLock);
+ pfree(constlist);
+
+ break;
+ }
+ default:
+ elog(ERROR, "ALTER TABLE / ADD CONSTRAINT is not implemented for that constraint type.");
}
- default:
- elog(ERROR, "ALTER TABLE / ADD CONSTRAINT is not implemented for that constraint type.");
+ break;
}
- break;
- }
case T_FkConstraint:
- {
- FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
- Relation rel, pkrel;
- HeapScanDesc scan;
- HeapTuple tuple;
- Trigger trig;
- List *list;
- int count;
- List *indexoidlist,
- *indexoidscan;
- Form_pg_attribute *rel_attrs = NULL;
- int i;
- bool found = false;
-
- if (is_temp_rel_name(fkconstraint->pktable_name) &&
- !is_temp_rel_name(relationName))
- elog(ERROR, "ALTER TABLE / ADD CONSTRAINT: Unable to reference temporary table from permanent table constraint.");
-
- /*
- * Grab an exclusive lock on the pk table, so that someone
- * doesn't delete rows out from under us.
- */
-
- pkrel = heap_openr(fkconstraint->pktable_name, AccessExclusiveLock);
- if (pkrel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "referenced table \"%s\" not a relation",
- fkconstraint->pktable_name);
-
- /*
- * Grab an exclusive lock on the fk table, and then scan
- * through each tuple, calling the RI_FKey_Match_Ins
- * (insert trigger) as if that tuple had just been
- * inserted. If any of those fail, it should elog(ERROR)
- * and that's that.
- */
- rel = heap_openr(relationName, AccessExclusiveLock);
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "referencing table \"%s\" not a relation",
- relationName);
-
- /* First we check for limited correctness of the constraint */
-
- rel_attrs = pkrel->rd_att->attrs;
- indexoidlist = RelationGetIndexList(pkrel);
-
- foreach(indexoidscan, indexoidlist)
{
- Oid indexoid = lfirsti(indexoidscan);
- HeapTuple indexTuple;
- Form_pg_index indexStruct;
-
- indexTuple = SearchSysCache(INDEXRELID,
- ObjectIdGetDatum(indexoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(indexTuple))
- elog(ERROR, "transformFkeyGetPrimaryKey: index %u not found",
- indexoid);
- indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
-
- if (indexStruct->indisunique)
+ FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
+ Relation rel,
+ pkrel;
+ HeapScanDesc scan;
+ HeapTuple tuple;
+ Trigger trig;
+ List *list;
+ int count;
+ List *indexoidlist,
+ *indexoidscan;
+ Form_pg_attribute *rel_attrs = NULL;
+ int i;
+ bool found = false;
+
+ if (is_temp_rel_name(fkconstraint->pktable_name) &&
+ !is_temp_rel_name(relationName))
+ elog(ERROR, "ALTER TABLE / ADD CONSTRAINT: Unable to reference temporary table from permanent table constraint.");
+
+ /*
+ * Grab an exclusive lock on the pk table, so that someone
+ * doesn't delete rows out from under us.
+ */
+
+ pkrel = heap_openr(fkconstraint->pktable_name, AccessExclusiveLock);
+ if (pkrel->rd_rel->relkind != RELKIND_RELATION)
+ elog(ERROR, "referenced table \"%s\" not a relation",
+ fkconstraint->pktable_name);
+
+ /*
+ * Grab an exclusive lock on the fk table, and then scan
+ * through each tuple, calling the RI_FKey_Match_Ins
+ * (insert trigger) as if that tuple had just been
+ * inserted. If any of those fail, it should elog(ERROR)
+ * and that's that.
+ */
+ rel = heap_openr(relationName, AccessExclusiveLock);
+ if (rel->rd_rel->relkind != RELKIND_RELATION)
+ elog(ERROR, "referencing table \"%s\" not a relation",
+ relationName);
+
+ /*
+ * First we check for limited correctness of the
+ * constraint
+ */
+
+ rel_attrs = pkrel->rd_att->attrs;
+ indexoidlist = RelationGetIndexList(pkrel);
+
+ foreach(indexoidscan, indexoidlist)
{
- List *attrl;
-
- /* Make sure this index has the same number of keys -- It obviously
- * won't match otherwise. */
- for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
- if (i!=length(fkconstraint->pk_attrs))
- found=false;
- else {
- /* go through the fkconstraint->pk_attrs list */
- foreach(attrl, fkconstraint->pk_attrs)
- {
- Ident *attr=lfirst(attrl);
+ Oid indexoid = lfirsti(indexoidscan);
+ HeapTuple indexTuple;
+ Form_pg_index indexStruct;
+
+ indexTuple = SearchSysCache(INDEXRELID,
+ ObjectIdGetDatum(indexoid),
+ 0, 0, 0);
+ if (!HeapTupleIsValid(indexTuple))
+ elog(ERROR, "transformFkeyGetPrimaryKey: index %u not found",
+ indexoid);
+ indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
+
+ if (indexStruct->indisunique)
+ {
+ List *attrl;
+
+ /*
+ * Make sure this index has the same number of
+ * keys -- It obviously won't match otherwise.
+ */
+ for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
+ if (i != length(fkconstraint->pk_attrs))
found = false;
- for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
+ else
+ {
+ /* go through the fkconstraint->pk_attrs list */
+ foreach(attrl, fkconstraint->pk_attrs)
{
- int pkattno = indexStruct->indkey[i];
- if (pkattno>0)
+ Ident *attr = lfirst(attrl);
+
+ found = false;
+ for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
{
- char *name = NameStr(rel_attrs[pkattno-1]->attname);
- if (strcmp(name, attr->name)==0)
+ int pkattno = indexStruct->indkey[i];
+
+ if (pkattno > 0)
{
- found = true;
- break;
+ char *name = NameStr(rel_attrs[pkattno - 1]->attname);
+
+ if (strcmp(name, attr->name) == 0)
+ {
+ found = true;
+ break;
+ }
}
}
+ if (!found)
+ break;
}
- if (!found)
- break;
}
}
+ ReleaseSysCache(indexTuple);
+ if (found)
+ break;
}
- ReleaseSysCache(indexTuple);
- if (found)
- break;
- }
- if (!found)
- elog(ERROR, "UNIQUE constraint matching given keys for referenced table \"%s\" not found",
- fkconstraint->pktable_name);
+ if (!found)
+ elog(ERROR, "UNIQUE constraint matching given keys for referenced table \"%s\" not found",
+ fkconstraint->pktable_name);
- freeList(indexoidlist);
- heap_close(pkrel, NoLock);
+ freeList(indexoidlist);
+ heap_close(pkrel, NoLock);
- rel_attrs = rel->rd_att->attrs;
- if (fkconstraint->fk_attrs!=NIL) {
- List *fkattrs;
- Ident *fkattr;
+ rel_attrs = rel->rd_att->attrs;
+ if (fkconstraint->fk_attrs != NIL)
+ {
+ List *fkattrs;
+ Ident *fkattr;
- found = false;
- foreach(fkattrs, fkconstraint->fk_attrs) {
- int count;
found = false;
- fkattr=lfirst(fkattrs);
- for (count = 0; count < rel->rd_att->natts; count++) {
- char *name = NameStr(rel->rd_att->attrs[count]->attname);
- if (strcmp(name, fkattr->name)==0) {
- found = true;
- break;
+ foreach(fkattrs, fkconstraint->fk_attrs)
+ {
+ int count;
+
+ found = false;
+ fkattr = lfirst(fkattrs);
+ for (count = 0; count < rel->rd_att->natts; count++)
+ {
+ char *name = NameStr(rel->rd_att->attrs[count]->attname);
+
+ if (strcmp(name, fkattr->name) == 0)
+ {
+ found = true;
+ break;
+ }
}
+ if (!found)
+ break;
}
if (!found)
- break;
+ elog(ERROR, "columns referenced in foreign key constraint not found.");
}
- if (!found)
- elog(ERROR, "columns referenced in foreign key constraint not found.");
- }
- trig.tgoid = 0;
- if (fkconstraint->constr_name)
- trig.tgname = fkconstraint->constr_name;
- else
- trig.tgname = "<unknown>";
- trig.tgfoid = 0;
- trig.tgtype = 0;
- trig.tgenabled = TRUE;
- trig.tgisconstraint = TRUE;
- trig.tginitdeferred = FALSE;
- trig.tgdeferrable = FALSE;
-
- trig.tgargs = (char **) palloc(
- sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
- + length(fkconstraint->pk_attrs)));
-
- if (fkconstraint->constr_name)
- trig.tgargs[0] = fkconstraint->constr_name;
- else
- trig.tgargs[0] = "<unknown>";
- trig.tgargs[1] = (char *) relationName;
- trig.tgargs[2] = fkconstraint->pktable_name;
- trig.tgargs[3] = fkconstraint->match_type;
- count = 4;
- foreach(list, fkconstraint->fk_attrs)
+ trig.tgoid = 0;
+ if (fkconstraint->constr_name)
+ trig.tgname = fkconstraint->constr_name;
+ else
+ trig.tgname = "<unknown>";
+ trig.tgfoid = 0;
+ trig.tgtype = 0;
+ trig.tgenabled = TRUE;
+ trig.tgisconstraint = TRUE;
+ trig.tginitdeferred = FALSE;
+ trig.tgdeferrable = FALSE;
+
+ trig.tgargs = (char **) palloc(
+ sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
+ + length(fkconstraint->pk_attrs)));
+
+ if (fkconstraint->constr_name)
+ trig.tgargs[0] = fkconstraint->constr_name;
+ else
+ trig.tgargs[0] = "<unknown>";
+ trig.tgargs[1] = (char *) relationName;
+ trig.tgargs[2] = fkconstraint->pktable_name;
+ trig.tgargs[3] = fkconstraint->match_type;
+ count = 4;
+ foreach(list, fkconstraint->fk_attrs)
{
Ident *fk_at = lfirst(list);
trig.tgargs[count] = fk_at->name;
- count+=2;
+ count += 2;
}
- count = 5;
- foreach(list, fkconstraint->pk_attrs)
+ count = 5;
+ foreach(list, fkconstraint->pk_attrs)
{
Ident *pk_at = lfirst(list);
trig.tgargs[count] = pk_at->name;
- count+=2;
+ count += 2;
}
- trig.tgnargs = count-1;
+ trig.tgnargs = count - 1;
- scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
- AssertState(scan != NULL);
+ scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
+ AssertState(scan != NULL);
- while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
- /* Make a call to the check function */
- /* No parameters are passed, but we do set a context */
- FunctionCallInfoData fcinfo;
- TriggerData trigdata;
+ while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
+ {
+ /* Make a call to the check function */
+ /* No parameters are passed, but we do set a context */
+ FunctionCallInfoData fcinfo;
+ TriggerData trigdata;
- MemSet(&fcinfo, 0, sizeof(fcinfo));
- /* We assume RI_FKey_check_ins won't look at flinfo... */
+ MemSet(&fcinfo, 0, sizeof(fcinfo));
+ /* We assume RI_FKey_check_ins won't look at flinfo... */
- trigdata.type = T_TriggerData;
- trigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW;
- trigdata.tg_relation = rel;
- trigdata.tg_trigtuple = tuple;
- trigdata.tg_newtuple = NULL;
- trigdata.tg_trigger = &trig;
+ trigdata.type = T_TriggerData;
+ trigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW;
+ trigdata.tg_relation = rel;
+ trigdata.tg_trigtuple = tuple;
+ trigdata.tg_newtuple = NULL;
+ trigdata.tg_trigger = &trig;
- fcinfo.context = (Node *) &trigdata;
+ fcinfo.context = (Node *) &trigdata;
- RI_FKey_check_ins(&fcinfo);
- }
- heap_endscan(scan);
- heap_close(rel, NoLock); /* close rel but keep
- * lock! */
+ RI_FKey_check_ins(&fcinfo);
+ }
+ heap_endscan(scan);
+ heap_close(rel, NoLock); /* close rel but keep
+ * lock! */
- pfree(trig.tgargs);
- break;
- }
+ pfree(trig.tgargs);
+ break;
+ }
default:
elog(ERROR, "ALTER TABLE / ADD CONSTRAINT unable to determine type of constraint passed");
}
@@ -1464,15 +1486,15 @@ AlterTableDropConstraint(const char *relationName,
void
AlterTableOwner(const char *relationName, const char *newOwnerName)
{
- Relation class_rel;
- HeapTuple tuple;
+ Relation class_rel;
+ HeapTuple tuple;
int32 newOwnerSysid;
Relation idescs[Num_pg_class_indices];
/*
* first check that we are a superuser
*/
- if (! superuser())
+ if (!superuser())
elog(ERROR, "ALTER TABLE: permission denied");
/*
@@ -1537,21 +1559,21 @@ AlterTableOwner(const char *relationName, const char *newOwnerName)
void
AlterTableCreateToastTable(const char *relationName, bool silent)
{
- Relation rel;
- Oid myrelid;
- HeapTuple reltup;
- HeapTupleData classtuple;
- TupleDesc tupdesc;
- Relation class_rel;
- Buffer buffer;
- Relation ridescs[Num_pg_class_indices];
- Oid toast_relid;
- Oid toast_idxid;
- char toast_relname[NAMEDATALEN + 1];
- char toast_idxname[NAMEDATALEN + 1];
- Relation toast_idxrel;
- IndexInfo *indexInfo;
- Oid classObjectId[1];
+ Relation rel;
+ Oid myrelid;
+ HeapTuple reltup;
+ HeapTupleData classtuple;
+ TupleDesc tupdesc;
+ Relation class_rel;
+ Buffer buffer;
+ Relation ridescs[Num_pg_class_indices];
+ Oid toast_relid;
+ Oid toast_idxid;
+ char toast_relname[NAMEDATALEN + 1];
+ char toast_idxname[NAMEDATALEN + 1];
+ Relation toast_idxrel;
+ IndexInfo *indexInfo;
+ Oid classObjectId[1];
/*
* permissions checking. XXX exactly what is appropriate here?
@@ -1618,7 +1640,7 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
/*
* Check to see whether the table actually needs a TOAST table.
*/
- if (! needs_toast_table(rel))
+ if (!needs_toast_table(rel))
{
if (silent)
{
@@ -1652,10 +1674,11 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
"chunk_data",
BYTEAOID,
-1, 0, false);
+
/*
- * Ensure that the toast table doesn't itself get toasted,
- * or we'll be toast :-(. This is essential for chunk_data because
- * type bytea is toastable; hit the other two just to be sure.
+ * Ensure that the toast table doesn't itself get toasted, or we'll be
+ * toast :-(. This is essential for chunk_data because type bytea is
+ * toastable; hit the other two just to be sure.
*/
tupdesc->attrs[0]->attstorage = 'p';
tupdesc->attrs[1]->attstorage = 'p';
@@ -1733,7 +1756,7 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
}
/*
- * Check to see whether the table needs a TOAST table. It does only if
+ * Check to see whether the table needs a TOAST table. It does only if
* (1) there are any toastable attributes, and (2) the maximum length
* of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
* create a toast table for something like "f1 varchar(20)".)
@@ -1745,7 +1768,7 @@ needs_toast_table(Relation rel)
bool maxlength_unknown = false;
bool has_toastable_attrs = false;
TupleDesc tupdesc;
- Form_pg_attribute *att;
+ Form_pg_attribute *att;
int32 tuple_length;
int i;
@@ -1762,8 +1785,8 @@ needs_toast_table(Relation rel)
}
else
{
- int32 maxlen = type_maximum_size(att[i]->atttypid,
- att[i]->atttypmod);
+ int32 maxlen = type_maximum_size(att[i]->atttypid,
+ att[i]->atttypmod);
if (maxlen < 0)
maxlength_unknown = true;
@@ -1798,7 +1821,7 @@ LockTableCommand(LockStmt *lockstmt)
rel = heap_openr(lockstmt->relname, NoLock);
if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "LOCK TABLE: %s is not a table", lockstmt->relname);
+ elog(ERROR, "LOCK TABLE: %s is not a table", lockstmt->relname);
if (lockstmt->mode == AccessShareLock)
aclresult = pg_aclcheck(lockstmt->relname, GetUserId(), ACL_RD);
@@ -1817,9 +1840,9 @@ LockTableCommand(LockStmt *lockstmt)
static bool
is_relation(char *name)
{
- Relation rel = heap_openr(name, NoLock);
+ Relation rel = heap_openr(name, NoLock);
- bool retval = (rel->rd_rel->relkind == RELKIND_RELATION);
+ bool retval = (rel->rd_rel->relkind == RELKIND_RELATION);
heap_close(rel, NoLock);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index 46e8b8057ec..06397ab323f 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1999, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.26 2001/01/23 04:32:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.27 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@ static void CommentAggregate(char *aggregate, List *arguments, char *comment);
static void CommentProc(char *function, List *arguments, char *comment);
static void CommentOperator(char *opname, List *arguments, char *comment);
static void CommentTrigger(char *trigger, char *relation, char *comments);
-static void CreateComments(Oid oid, char *comment);
+static void CreateComments(Oid oid, char *comment);
/*------------------------------------------------------------------
* CommentObject --
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index b518ef572e1..f586869b078 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.134 2001/03/14 21:47:50 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.135 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,6 +76,7 @@ static StringInfoData attribute_buf;
#ifdef MULTIBYTE
static int client_encoding;
static int server_encoding;
+
#endif
@@ -285,6 +286,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
elog(ERROR, "You must have Postgres superuser privilege to do a COPY "
"directly to or from a file. Anyone can COPY to stdout or "
"from stdin. Psql's \\copy command also works for anyone.");
+
/*
* This restriction is unfortunate, but necessary until the frontend
* COPY protocol is redesigned to be binary-safe...
@@ -344,8 +346,8 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
mode_t oumask; /* Pre-existing umask value */
/*
- * Prevent write to relative path ... too easy to shoot oneself
- * in the foot by overwriting a database file ...
+ * Prevent write to relative path ... too easy to shoot
+ * oneself in the foot by overwriting a database file ...
*/
if (filename[0] != '/')
elog(ERROR, "Relative path not allowed for server side"
@@ -408,7 +410,10 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
attr_count = rel->rd_att->natts;
attr = rel->rd_att->attrs;
- /* For binary copy we really only need isvarlena, but compute it all... */
+ /*
+ * For binary copy we really only need isvarlena, but compute it
+ * all...
+ */
out_functions = (FmgrInfo *) palloc(attr_count * sizeof(FmgrInfo));
elements = (Oid *) palloc(attr_count * sizeof(Oid));
isvarlena = (bool *) palloc(attr_count * sizeof(bool));
@@ -417,7 +422,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
Oid out_func_oid;
if (!getTypeOutputInfo(attr[i]->atttypid,
- &out_func_oid, &elements[i], &isvarlena[i]))
+ &out_func_oid, &elements[i], &isvarlena[i]))
elog(ERROR, "COPY: couldn't lookup info for type %u",
attr[i]->atttypid);
fmgr_info(out_func_oid, &out_functions[i]);
@@ -454,7 +459,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (binary)
{
/* Binary per-tuple header */
- int16 fld_count = attr_count;
+ int16 fld_count = attr_count;
CopySendData(&fld_count, sizeof(int16), fp);
/* Send OID if wanted --- note fld_count doesn't include it */
@@ -471,7 +476,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (oids)
{
string = DatumGetCString(DirectFunctionCall1(oidout,
- ObjectIdGetDatum(tuple->t_data->t_oid)));
+ ObjectIdGetDatum(tuple->t_data->t_oid)));
CopySendString(string, fp);
pfree(string);
need_delim = true;
@@ -497,20 +502,22 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
{
if (!binary)
{
- CopySendString(null_print, fp); /* null indicator */
+ CopySendString(null_print, fp); /* null indicator */
}
else
{
- fld_size = 0; /* null marker */
+ fld_size = 0; /* null marker */
CopySendData(&fld_size, sizeof(int16), fp);
}
}
else
{
+
/*
- * If we have a toasted datum, forcibly detoast it to avoid
- * memory leakage inside the type's output routine (or
- * for binary case, becase we must output untoasted value).
+ * If we have a toasted datum, forcibly detoast it to
+ * avoid memory leakage inside the type's output routine
+ * (or for binary case, becase we must output untoasted
+ * value).
*/
if (isvarlena[i])
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
@@ -520,9 +527,9 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (!binary)
{
string = DatumGetCString(FunctionCall3(&out_functions[i],
- value,
- ObjectIdGetDatum(elements[i]),
- Int32GetDatum(attr[i]->atttypmod)));
+ value,
+ ObjectIdGetDatum(elements[i]),
+ Int32GetDatum(attr[i]->atttypmod)));
CopyAttributeOut(fp, string, delim);
pfree(string);
}
@@ -552,8 +559,9 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
- * We need this horsing around because we don't know
- * how shorter data values are aligned within a Datum.
+ * We need this horsing around because we don't
+ * know how shorter data values are aligned within
+ * a Datum.
*/
store_att_byval(&datumBuf, value, fld_size);
CopySendData(&datumBuf,
@@ -577,7 +585,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (binary)
{
/* Generate trailer for a binary copy */
- int16 fld_count = -1;
+ int16 fld_count = -1;
CopySendData(&fld_count, sizeof(int16), fp);
}
@@ -609,7 +617,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
int done = 0;
char *string;
ResultRelInfo *resultRelInfo;
- EState *estate = CreateExecutorState(); /* for ExecConstraints() */
+ EState *estate = CreateExecutorState(); /* for ExecConstraints() */
TupleTable tupleTable;
TupleTableSlot *slot;
Oid loaded_oid = InvalidOid;
@@ -622,11 +630,11 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
/*
* We need a ResultRelInfo so we can use the regular executor's
- * index-entry-making machinery. (There used to be a huge amount
- * of code here that basically duplicated execUtils.c ...)
+ * index-entry-making machinery. (There used to be a huge amount of
+ * code here that basically duplicated execUtils.c ...)
*/
resultRelInfo = makeNode(ResultRelInfo);
- resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
+ resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
resultRelInfo->ri_RelationDesc = rel;
ExecOpenIndices(resultRelInfo);
@@ -673,7 +681,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: bogus file header (missing flags)");
file_has_oids = (tmp & (1 << 16)) != 0;
- tmp &= ~ (1 << 16);
+ tmp &= ~(1 << 16);
if ((tmp >> 16) != 0)
elog(ERROR, "COPY BINARY: unrecognized critical flags in header");
/* Header extension length */
@@ -727,7 +735,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
else
{
loaded_oid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(string)));
+ CStringGetDatum(string)));
if (loaded_oid == InvalidOid)
elog(ERROR, "COPY TEXT: Invalid Oid");
}
@@ -747,8 +755,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
{
values[i] = FunctionCall3(&in_functions[i],
CStringGetDatum(string),
- ObjectIdGetDatum(elements[i]),
- Int32GetDatum(attr[i]->atttypmod));
+ ObjectIdGetDatum(elements[i]),
+ Int32GetDatum(attr[i]->atttypmod));
nulls[i] = ' ';
}
}
@@ -757,8 +765,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
}
else
{ /* binary */
- int16 fld_count,
- fld_size;
+ int16 fld_count,
+ fld_size;
CopyGetData(&fld_count, sizeof(int16), fp);
if (CopyGetEof(fp) ||
@@ -791,15 +799,15 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: unexpected EOF");
if (fld_size == 0)
- continue; /* it's NULL; nulls[i] already set */
+ continue; /* it's NULL; nulls[i] already set */
if (fld_size != attr[i]->attlen)
elog(ERROR, "COPY BINARY: sizeof(field %d) is %d, expected %d",
- i+1, (int) fld_size, (int) attr[i]->attlen);
+ i + 1, (int) fld_size, (int) attr[i]->attlen);
if (fld_size == -1)
{
/* varlena field */
- int32 varlena_size;
- Pointer varlena_ptr;
+ int32 varlena_size;
+ Pointer varlena_ptr;
CopyGetData(&varlena_size, sizeof(int32), fp);
if (CopyGetEof(fp))
@@ -818,7 +826,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
else if (!attr[i]->attbyval)
{
/* fixed-length pass-by-reference */
- Pointer refval_ptr;
+ Pointer refval_ptr;
Assert(fld_size > 0);
refval_ptr = (Pointer) palloc(fld_size);
@@ -833,8 +841,9 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
- * We need this horsing around because we don't know
- * how shorter data values are aligned within a Datum.
+ * We need this horsing around because we don't
+ * know how shorter data values are aligned within
+ * a Datum.
*/
Assert(fld_size > 0 && fld_size <= sizeof(Datum));
CopyGetData(&datumBuf, fld_size, fp);
@@ -1163,6 +1172,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
char *string_start;
int mblen;
int i;
+
#endif
#ifdef MULTIBYTE
@@ -1182,7 +1192,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
#endif
#ifdef MULTIBYTE
- for (; (mblen = (server_encoding == client_encoding? 1 : pg_encoding_mblen(client_encoding, string))) &&
+ for (; (mblen = (server_encoding == client_encoding ? 1 : pg_encoding_mblen(client_encoding, string))) &&
((c = *string) != '\0'); string += mblen)
#else
for (; (c = *string) != '\0'; string++)
@@ -1199,7 +1209,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
}
#ifdef MULTIBYTE
- if (client_encoding != server_encoding)
+ if (client_encoding != server_encoding)
pfree(string_start); /* pfree pg_server_to_client result */
#endif
}
diff --git a/src/backend/commands/creatinh.c b/src/backend/commands/creatinh.c
index a043cf0b8e0..c4a5eaa00e9 100644
--- a/src/backend/commands/creatinh.c
+++ b/src/backend/commands/creatinh.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.72 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.73 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@
static int checkAttrExists(const char *attributeName,
const char *attributeType, List *schema);
static List *MergeAttributes(List *schema, List *supers, bool istemp,
- List **supOids, List **supconstr);
+ List **supOids, List **supconstr);
static void StoreCatalogInheritance(Oid relationId, List *supers);
static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
@@ -150,10 +150,10 @@ DefineRelation(CreateStmt *stmt, char relkind)
CommandCounterIncrement();
/*
- * Open the new relation and acquire exclusive lock on it. This isn't
+ * Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't
- * see the new rel anyway until we commit), but it keeps the lock manager
- * from complaining about deadlock risks.
+ * see the new rel anyway until we commit), but it keeps the lock
+ * manager from complaining about deadlock risks.
*/
rel = heap_openr(relname, AccessExclusiveLock);
@@ -242,7 +242,7 @@ TruncateRelation(char *name)
* Varattnos of pg_relcheck.rcbin should be rewritten when
* subclasses inherit the constraints from the super class.
* Note that these functions rewrite varattnos while walking
- * through a node tree.
+ * through a node tree.
*/
static bool
change_varattnos_walker(Node *node, const AttrNumber *newattno)
@@ -251,15 +251,15 @@ change_varattnos_walker(Node *node, const AttrNumber *newattno)
return false;
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
if (var->varlevelsup == 0 && var->varno == 1)
{
+
/*
- * ??? the following may be a problem when the
- * node is multiply referenced though
- * stringToNode() doesn't create such a node
- * currently.
+ * ??? the following may be a problem when the node is
+ * multiply referenced though stringToNode() doesn't create
+ * such a node currently.
*/
Assert(newattno[var->varattno - 1] > 0);
var->varattno = newattno[var->varattno - 1];
@@ -373,9 +373,12 @@ MergeAttributes(List *schema, List *supers, bool istemp,
AttrNumber attrno;
TupleDesc tupleDesc;
TupleConstr *constr;
- AttrNumber *newattno, *partialAttidx;
- Node *expr;
- int i, attidx, attno_exist;
+ AttrNumber *newattno,
+ *partialAttidx;
+ Node *expr;
+ int i,
+ attidx,
+ attno_exist;
relation = heap_openr(name, AccessShareLock);
@@ -385,7 +388,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && is_temp_rel_name(name))
elog(ERROR, "CREATE TABLE: cannot inherit from temp relation \"%s\"", name);
- /* We should have an UNDER permission flag for this, but for now,
+ /*
+ * We should have an UNDER permission flag for this, but for now,
* demand that creator of a child table own the parent.
*/
if (!pg_ownercheck(GetUserId(), name, RELNAME))
@@ -397,14 +401,15 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/* allocate a new attribute number table and initialize */
newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)
- newattno [i] = 0;
+ newattno[i] = 0;
+
/*
- * searching and storing order are different.
- * another table is needed.
- */
+ * searching and storing order are different. another table is
+ * needed.
+ */
partialAttidx = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)
- partialAttidx [i] = 0;
+ partialAttidx[i] = 0;
constr = tupleDesc->constr;
attidx = 0;
@@ -577,9 +582,9 @@ StoreCatalogInheritance(Oid relationId, List *supers)
Datum datum[Natts_pg_inherits];
char nullarr[Natts_pg_inherits];
- datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
- datum[1] = ObjectIdGetDatum(entryOid); /* inhparent */
- datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
+ datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
+ datum[1] = ObjectIdGetDatum(entryOid); /* inhparent */
+ datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
nullarr[0] = ' ';
nullarr[1] = ' ';
@@ -730,7 +735,7 @@ checkAttrExists(const char *attributeName, const char *attributeType,
List *schema)
{
List *s;
- int i = 0;
+ int i = 0;
foreach(s, schema)
{
@@ -756,9 +761,9 @@ checkAttrExists(const char *attributeName, const char *attributeType,
static void
setRelhassubclassInRelation(Oid relationId, bool relhassubclass)
{
- Relation relationRelation;
- HeapTuple tuple;
- Relation idescs[Num_pg_class_indices];
+ Relation relationRelation;
+ HeapTuple tuple;
+ Relation idescs[Num_pg_class_indices];
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index c450f1b400a..cd409781b2b 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.73 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.74 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,8 +36,8 @@
/* non-export function prototypes */
static bool get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
- int *encodingP, bool *dbIsTemplateP,
- Oid *dbLastSysOidP, char *dbpath);
+ int *encodingP, bool *dbIsTemplateP,
+ Oid *dbLastSysOidP, char *dbpath);
static bool get_user_info(Oid use_sysid, bool *use_super, bool *use_createdb);
static char *resolve_alt_dbpath(const char *dbpath, Oid dboid);
static bool remove_dbdirs(const char *real_loc, const char *altloc);
@@ -82,12 +82,12 @@ createdb(const char *dbname, const char *dbpath,
elog(ERROR, "CREATE DATABASE: may not be called in a transaction block");
/*
- * Check for db name conflict. There is a race condition here, since
+ * Check for db name conflict. There is a race condition here, since
* another backend could create the same DB name before we commit.
- * However, holding an exclusive lock on pg_database for the whole time
- * we are copying the source database doesn't seem like a good idea,
- * so accept possibility of race to create. We will check again after
- * we grab the exclusive lock.
+ * However, holding an exclusive lock on pg_database for the whole
+ * time we are copying the source database doesn't seem like a good
+ * idea, so accept possibility of race to create. We will check again
+ * after we grab the exclusive lock.
*/
if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL))
elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
@@ -96,15 +96,16 @@ createdb(const char *dbname, const char *dbpath,
* Lookup database (template) to be cloned.
*/
if (!dbtemplate)
- dbtemplate = "template1"; /* Default template database name */
+ dbtemplate = "template1"; /* Default template database name */
if (!get_db_info(dbtemplate, &src_dboid, &src_owner, &src_encoding,
&src_istemplate, &src_lastsysoid, src_dbpath))
elog(ERROR, "CREATE DATABASE: template \"%s\" does not exist",
dbtemplate);
+
/*
- * Permission check: to copy a DB that's not marked datistemplate,
- * you must be superuser or the owner thereof.
+ * Permission check: to copy a DB that's not marked datistemplate, you
+ * must be superuser or the owner thereof.
*/
if (!src_istemplate)
{
@@ -112,6 +113,7 @@ createdb(const char *dbname, const char *dbpath,
elog(ERROR, "CREATE DATABASE: permission to copy \"%s\" denied",
dbtemplate);
}
+
/*
* Determine physical path of source database
*/
@@ -133,14 +135,16 @@ createdb(const char *dbname, const char *dbpath,
if (encoding < 0)
encoding = src_encoding;
- /*
- * Preassign OID for pg_database tuple, so that we can compute db path.
+ /*
+ * Preassign OID for pg_database tuple, so that we can compute db
+ * path.
*/
dboid = newoid();
/*
- * Compute nominal location (where we will try to access the database),
- * and resolve alternate physical location if one is specified.
+ * Compute nominal location (where we will try to access the
+ * database), and resolve alternate physical location if one is
+ * specified.
*/
nominal_loc = GetDatabasePath(dboid);
alt_loc = resolve_alt_dbpath(dbpath, dboid);
@@ -155,8 +159,8 @@ createdb(const char *dbname, const char *dbpath,
/*
* Force dirty buffers out to disk, to ensure source database is
- * up-to-date for the copy. (We really only need to flush buffers
- * for the source database...)
+ * up-to-date for the copy. (We really only need to flush buffers for
+ * the source database...)
*/
BufferSync();
@@ -231,7 +235,8 @@ createdb(const char *dbname, const char *dbpath,
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
- tuple->t_data->t_oid = dboid; /* override heap_insert's OID selection */
+ tuple->t_data->t_oid = dboid; /* override heap_insert's OID
+ * selection */
heap_insert(pg_database_rel, tuple);
@@ -273,9 +278,9 @@ dropdb(const char *dbname)
bool db_istemplate;
bool use_super;
Oid db_id;
- char *alt_loc;
- char *nominal_loc;
- char dbpath[MAXPGPATH];
+ char *alt_loc;
+ char *nominal_loc;
+ char dbpath[MAXPGPATH];
Relation pgdbrel;
HeapScanDesc pgdbscan;
ScanKeyData key;
@@ -311,8 +316,8 @@ dropdb(const char *dbname)
elog(ERROR, "DROP DATABASE: permission denied");
/*
- * Disallow dropping a DB that is marked istemplate. This is just
- * to prevent people from accidentally dropping template0 or template1;
+ * Disallow dropping a DB that is marked istemplate. This is just to
+ * prevent people from accidentally dropping template0 or template1;
* they can do so if they're really determined ...
*/
if (db_istemplate)
@@ -338,6 +343,7 @@ dropdb(const char *dbname)
tup = heap_getnext(pgdbscan, 0);
if (!HeapTupleIsValid(tup))
{
+
/*
* This error should never come up since the existence of the
* database is checked earlier
@@ -437,7 +443,7 @@ get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
{
tmptext = DatumGetTextP(heap_getattr(tuple,
Anum_pg_database_datpath,
- RelationGetDescr(relation),
+ RelationGetDescr(relation),
&isnull));
if (!isnull)
{
@@ -481,11 +487,11 @@ get_user_info(Oid use_sysid, bool *use_super, bool *use_createdb)
static char *
-resolve_alt_dbpath(const char * dbpath, Oid dboid)
+resolve_alt_dbpath(const char *dbpath, Oid dboid)
{
- const char * prefix;
- char * ret;
- size_t len;
+ const char *prefix;
+ char *ret;
+ size_t len;
if (dbpath == NULL || dbpath[0] == '\0')
return NULL;
@@ -502,7 +508,8 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
else
{
/* must be environment variable */
- char * var = getenv(dbpath);
+ char *var = getenv(dbpath);
+
if (!var)
elog(ERROR, "Postmaster environment variable '%s' not set", dbpath);
if (var[0] != '/')
@@ -519,11 +526,11 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
static bool
-remove_dbdirs(const char * nominal_loc, const char * alt_loc)
+remove_dbdirs(const char *nominal_loc, const char *alt_loc)
{
- const char *target_dir;
- char buf[MAXPGPATH + 100];
- bool success = true;
+ const char *target_dir;
+ char buf[MAXPGPATH + 100];
+ bool success = true;
target_dir = alt_loc ? alt_loc : nominal_loc;
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index 4f5f8a47f64..c8a2726a8f7 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.52 2001/02/12 20:07:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.53 2001/03/22 03:59:22 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -70,7 +70,7 @@ case_translate_language_name(const char *input, char *output)
--------------------------------------------------------------------------*/
int i;
- for (i = 0; i < NAMEDATALEN-1 && input[i]; ++i)
+ for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
output[i] = tolower((unsigned char) input[i]);
output[i] = '\0';
@@ -110,12 +110,12 @@ compute_full_attributes(List *parameters,
Note: currently, only two of these parameters actually do anything:
* canCache means the optimizer's constant-folder is allowed to
- pre-evaluate the function when all its inputs are constants.
+ pre-evaluate the function when all its inputs are constants.
* isStrict means the function should not be called when any NULL
- inputs are present; instead a NULL result value should be assumed.
+ inputs are present; instead a NULL result value should be assumed.
- The other four parameters are not used anywhere. They used to be
+ The other four parameters are not used anywhere. They used to be
used in the "expensive functions" optimizer, but that's been dead code
for a long time.
@@ -217,21 +217,26 @@ void
CreateFunction(ProcedureStmt *stmt, CommandDest dest)
{
char *probin_str;
+
/* pathname of executable file that executes this function, if any */
char *prosrc_str;
+
/* SQL that executes this function, if any */
char *prorettype;
+
/* Type of return value (or member of set of values) from function */
char languageName[NAMEDATALEN];
+
/*
- * name of language of function, with case adjusted: "C",
- * "internal", "sql", etc.
+ * name of language of function, with case adjusted: "C", "internal",
+ * "sql", etc.
*/
bool returnsSet;
+
/* The function returns a set of values, as opposed to a singleton. */
/*
@@ -257,7 +262,7 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
if (!superuser())
elog(ERROR,
"Only users with Postgres superuser privilege are "
- "permitted to create a function in the '%s' language.\n\t"
+ "permitted to create a function in the '%s' language.\n\t"
"Others may use the 'sql' language "
"or the created procedural languages.",
languageName);
@@ -380,14 +385,14 @@ DefineOperator(char *oprName,
{
typeName1 = defGetString(defel);
if (IsA(defel->arg, TypeName)
- && ((TypeName *) defel->arg)->setof)
+ &&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for leftarg");
}
else if (strcasecmp(defel->defname, "rightarg") == 0)
{
typeName2 = defGetString(defel);
if (IsA(defel->arg, TypeName)
- && ((TypeName *) defel->arg)->setof)
+ &&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for rightarg");
}
else if (strcasecmp(defel->defname, "procedure") == 0)
@@ -478,8 +483,8 @@ DefineAggregate(char *aggName, List *parameters)
DefElem *defel = (DefElem *) lfirst(pl);
/*
- * sfunc1, stype1, and initcond1 are accepted as obsolete spellings
- * for sfunc, stype, initcond.
+ * sfunc1, stype1, and initcond1 are accepted as obsolete
+ * spellings for sfunc, stype, initcond.
*/
if (strcasecmp(defel->defname, "sfunc") == 0)
transfuncName = defGetString(defel);
@@ -515,12 +520,12 @@ DefineAggregate(char *aggName, List *parameters)
/*
* Most of the argument-checking is done inside of AggregateCreate
*/
- AggregateCreate(aggName, /* aggregate name */
- transfuncName, /* step function name */
- finalfuncName, /* final function name */
- baseType, /* type of data being aggregated */
- transType, /* transition data type */
- initval); /* initial condition */
+ AggregateCreate(aggName, /* aggregate name */
+ transfuncName, /* step function name */
+ finalfuncName, /* final function name */
+ baseType, /* type of data being aggregated */
+ transType, /* transition data type */
+ initval); /* initial condition */
}
/*
@@ -543,13 +548,13 @@ DefineType(char *typeName, List *parameters)
char delimiter = DEFAULT_TYPDELIM;
char *shadow_type;
List *pl;
- char alignment = 'i'; /* default alignment */
+ char alignment = 'i';/* default alignment */
char storage = 'p'; /* default storage in TOAST */
/*
- * Type names must be one character shorter than other names,
- * allowing room to create the corresponding array type name with
- * prepended "_".
+ * Type names must be one character shorter than other names, allowing
+ * room to create the corresponding array type name with prepended
+ * "_".
*/
if (strlen(typeName) > (NAMEDATALEN - 2))
{
@@ -692,14 +697,16 @@ defGetString(DefElem *def)
switch (nodeTag(def->arg))
{
case T_Integer:
- {
- char *str = palloc(32);
+ {
+ char *str = palloc(32);
- snprintf(str, 32, "%ld", (long) intVal(def->arg));
- return str;
- }
+ snprintf(str, 32, "%ld", (long) intVal(def->arg));
+ return str;
+ }
case T_Float:
- /* T_Float values are kept in string form, so this type cheat
+
+ /*
+ * T_Float values are kept in string form, so this type cheat
* works (and doesn't risk losing precision)
*/
return strVal(def->arg);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 31f24d88a6f..672ec54cb02 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.64 2001/01/27 01:41:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.65 2001/03/22 03:59:22 momjian Exp $
*
*/
@@ -271,7 +271,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
stringStringInfo(rte->relname));
if (strcmp(rte->eref->relname, rte->relname) != 0)
appendStringInfo(str, " %s",
- stringStringInfo(rte->eref->relname));
+ stringStringInfo(rte->eref->relname));
}
break;
case T_SubqueryScan:
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 6497da615b8..2d3e70c427b 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.45 2001/02/23 09:26:14 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.46 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,15 +49,15 @@ static void CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid);
static void CheckPredExpr(Node *predicate, List *rangeTable, Oid baseRelOid);
static void CheckPredClause(Expr *predicate, List *rangeTable, Oid baseRelOid);
static void FuncIndexArgs(IndexInfo *indexInfo, Oid *classOidP,
- IndexElem *funcIndex,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
+ IndexElem *funcIndex,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
static void NormIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
-static Oid GetAttrOpClass(IndexElem *attribute, Oid attrType,
- char *accessMethodName, Oid accessMethodId);
+ List *attList,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
+static Oid GetAttrOpClass(IndexElem *attribute, Oid attrType,
+ char *accessMethodName, Oid accessMethodId);
static char *GetDefaultOpClass(Oid atttypid);
/*
@@ -118,9 +118,9 @@ DefineIndex(char *heapRelationName,
accessMethodName);
/*
- * XXX Hardwired hacks to check for limitations on supported index types.
- * We really ought to be learning this info from entries in the pg_am
- * table, instead of having it wired in here!
+ * XXX Hardwired hacks to check for limitations on supported index
+ * types. We really ought to be learning this info from entries in the
+ * pg_am table, instead of having it wired in here!
*/
if (unique && accessMethodId != BTREE_AM_OID)
elog(ERROR, "DefineIndex: unique indices are only available with the btree access method");
@@ -161,7 +161,8 @@ DefineIndex(char *heapRelationName,
elog(ERROR, "Existing indexes are inactive. REINDEX first");
/*
- * Prepare arguments for index_create, primarily an IndexInfo structure
+ * Prepare arguments for index_create, primarily an IndexInfo
+ * structure
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_Predicate = (Node *) cnfPred;
@@ -207,7 +208,7 @@ DefineIndex(char *heapRelationName,
/*
* We update the relation's pg_class tuple even if it already has
- * relhasindex = true. This is needed to cause a shared-cache-inval
+ * relhasindex = true. This is needed to cause a shared-cache-inval
* message to be sent for the pg_class tuple, which will cause other
* backends to flush their relcache entries and in particular their
* cached lists of the indexes for this relation.
@@ -415,8 +416,8 @@ FuncIndexArgs(IndexInfo *indexInfo,
* has exact-match or binary-compatible input types.
* ----------------
*/
- if (! func_get_detail(funcIndex->name, nargs, argTypes,
- &funcid, &rettype, &retset, &true_typeids))
+ if (!func_get_detail(funcIndex->name, nargs, argTypes,
+ &funcid, &rettype, &retset, &true_typeids))
func_error("DefineIndex", funcIndex->name, nargs, argTypes, NULL);
if (retset)
@@ -425,7 +426,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
for (i = 0; i < nargs; i++)
{
if (argTypes[i] != true_typeids[i] &&
- ! IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i]))
+ !IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i]))
func_error("DefineIndex", funcIndex->name, nargs, argTypes,
"Index function must be binary-compatible with table datatype");
}
@@ -439,7 +440,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
indexInfo->ii_FuncOid = funcid;
/* Need to do the fmgr function lookup now, too */
- fmgr_info(funcid, & indexInfo->ii_FuncInfo);
+ fmgr_info(funcid, &indexInfo->ii_FuncInfo);
}
static void
@@ -477,7 +478,7 @@ NormIndexAttrs(IndexInfo *indexInfo,
indexInfo->ii_KeyAttrNumbers[attn] = attform->attnum;
classOidP[attn] = GetAttrOpClass(attribute, attform->atttypid,
- accessMethodName, accessMethodId);
+ accessMethodName, accessMethodId);
ReleaseSysCache(atttuple);
attn++;
@@ -515,8 +516,8 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
attribute->class);
/*
- * Assume the opclass is supported by this index access method
- * if we can find at least one relevant entry in pg_amop.
+ * Assume the opclass is supported by this index access method if we
+ * can find at least one relevant entry in pg_amop.
*/
ScanKeyEntryInitialize(&entry[0], 0,
Anum_pg_amop_amopid,
@@ -530,7 +531,7 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
relation = heap_openr(AccessMethodOperatorRelationName, AccessShareLock);
scan = heap_beginscan(relation, false, SnapshotNow, 2, entry);
- if (! HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
+ if (!HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
elog(ERROR, "DefineIndex: opclass \"%s\" not supported by access method \"%s\"",
attribute->class, accessMethodName);
@@ -540,17 +541,18 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
heap_close(relation, AccessShareLock);
/*
- * Make sure the operators associated with this opclass actually accept
- * the column data type. This prevents possible coredumps caused by
- * user errors like applying text_ops to an int4 column. We will accept
- * an opclass as OK if the operator's input datatype is binary-compatible
- * with the actual column datatype. Note we assume that all the operators
- * associated with an opclass accept the same datatypes, so checking the
- * first one we happened to find in the table is sufficient.
+ * Make sure the operators associated with this opclass actually
+ * accept the column data type. This prevents possible coredumps
+ * caused by user errors like applying text_ops to an int4 column. We
+ * will accept an opclass as OK if the operator's input datatype is
+ * binary-compatible with the actual column datatype. Note we assume
+ * that all the operators associated with an opclass accept the same
+ * datatypes, so checking the first one we happened to find in the
+ * table is sufficient.
*
* If the opclass was the default for the datatype, assume we can skip
- * this check --- that saves a few cycles in the most common case.
- * If pg_opclass is wrong then we're probably screwed anyway...
+ * this check --- that saves a few cycles in the most common case. If
+ * pg_opclass is wrong then we're probably screwed anyway...
*/
if (doTypeCheck)
{
@@ -560,11 +562,11 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
if (HeapTupleIsValid(tuple))
{
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tuple);
- Oid opInputType = (optup->oprkind == 'l') ?
- optup->oprright : optup->oprleft;
+ Oid opInputType = (optup->oprkind == 'l') ?
+ optup->oprright : optup->oprleft;
if (attrType != opInputType &&
- ! IS_BINARY_COMPATIBLE(attrType, opInputType))
+ !IS_BINARY_COMPATIBLE(attrType, opInputType))
elog(ERROR, "DefineIndex: opclass \"%s\" does not accept datatype \"%s\"",
attribute->class, typeidTypeName(attrType));
ReleaseSysCache(tuple);
@@ -660,7 +662,7 @@ ReindexIndex(const char *name, bool force /* currently unused */ )
if (IsIgnoringSystemIndexes())
overwrite = true;
if (!reindex_index(tuple->t_data->t_oid, force, overwrite))
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
elog(NOTICE, "index \"%s\" wasn't reindexed", name);
ReleaseSysCache(tuple);
@@ -752,18 +754,18 @@ ReindexDatabase(const char *dbname, bool force, bool all)
elog(ERROR, "REINDEX DATABASE: Can be executed only on the currently open database.");
/*
- * We cannot run inside a user transaction block; if we were
- * inside a transaction, then our commit- and
- * start-transaction-command calls would not have the intended effect!
+ * We cannot run inside a user transaction block; if we were inside a
+ * transaction, then our commit- and start-transaction-command calls
+ * would not have the intended effect!
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX DATABASE cannot run inside a BEGIN/END block");
/*
- * Create a memory context that will survive forced transaction commits
- * we do below. Since it is a child of QueryContext, it will go away
- * eventually even if we suffer an error; there's no need for special
- * abort cleanup logic.
+ * Create a memory context that will survive forced transaction
+ * commits we do below. Since it is a child of QueryContext, it will
+ * go away eventually even if we suffer an error; there's no need for
+ * special abort cleanup logic.
*/
private_context = AllocSetContextCreate(QueryContext,
"ReindexDatabase",
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index bbf008c918e..ca1dbf3cbe4 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -111,7 +111,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
i = 0;
values[i++] = PointerGetDatum(languageName);
- values[i++] = BoolGetDatum(true); /* lanispl */
+ values[i++] = BoolGetDatum(true); /* lanispl */
values[i++] = BoolGetDatum(stmt->pltrusted);
values[i++] = ObjectIdGetDatum(procTup->t_data->t_oid);
values[i++] = DirectFunctionCall1(textin,
diff --git a/src/backend/commands/remove.c b/src/backend/commands/remove.c
index 2c271758e08..da5ad74d8ba 100644
--- a/src/backend/commands/remove.c
+++ b/src/backend/commands/remove.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.59 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.60 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,8 +40,8 @@
*/
void
RemoveOperator(char *operatorName, /* operator name */
- char *typeName1, /* left argument type name */
- char *typeName2) /* right argument type name */
+ char *typeName1, /* left argument type name */
+ char *typeName2) /* right argument type name */
{
Relation relation;
HeapTuple tup;
diff --git a/src/backend/commands/rename.c b/src/backend/commands/rename.c
index 3630cdd0d19..52568f29f5f 100644
--- a/src/backend/commands/rename.c
+++ b/src/backend/commands/rename.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.55 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.56 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -189,15 +189,15 @@ renamerel(const char *oldrelname, const char *newrelname)
newrelname);
/*
- * Check for renaming a temp table, which only requires altering
- * the temp-table mapping, not the underlying table.
+ * Check for renaming a temp table, which only requires altering the
+ * temp-table mapping, not the underlying table.
*/
if (rename_temp_relation(oldrelname, newrelname))
return; /* all done... */
/*
- * Instead of using heap_openr(), do it the hard way, so that we
- * can rename indexes as well as regular relations.
+ * Instead of using heap_openr(), do it the hard way, so that we can
+ * rename indexes as well as regular relations.
*/
targetrelation = RelationNameGetRelation(oldrelname);
@@ -219,8 +219,9 @@ renamerel(const char *oldrelname, const char *newrelname)
heap_close(targetrelation, NoLock);
/*
- * Flush the relcache entry (easier than trying to change it at exactly
- * the right instant). It'll get rebuilt on next access to relation.
+ * Flush the relcache entry (easier than trying to change it at
+ * exactly the right instant). It'll get rebuilt on next access to
+ * relation.
*
* XXX What if relation is myxactonly?
*
@@ -244,8 +245,8 @@ renamerel(const char *oldrelname, const char *newrelname)
elog(ERROR, "renamerel: relation \"%s\" exists", newrelname);
/*
- * Update pg_class tuple with new relname. (Scribbling on reltup
- * is OK because it's a copy...)
+ * Update pg_class tuple with new relname. (Scribbling on reltup is
+ * OK because it's a copy...)
*/
StrNCpy(NameStr(((Form_pg_class) GETSTRUCT(reltup))->relname),
newrelname, NAMEDATALEN);
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 04398423b67..85a8b740048 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.51 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.52 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@
* so we pre-log a few fetches in advance. In the event of
* crash we can lose as much as we pre-logged.
*/
-#define SEQ_LOG_VALS 32
+#define SEQ_LOG_VALS 32
typedef struct sequence_magic
{
@@ -140,7 +140,7 @@ DefineSequence(CreateSeqStmt *seq)
case SEQ_COL_LOG:
typnam->name = "int4";
coldef->colname = "log_cnt";
- value[i - 1] = Int32GetDatum((int32)1);
+ value[i - 1] = Int32GetDatum((int32) 1);
break;
case SEQ_COL_CYCLE:
typnam->name = "char";
@@ -247,7 +247,7 @@ nextval(PG_FUNCTION_ARGS)
logit = true;
}
- while (fetch) /* try to fetch cache [+ log ] numbers */
+ while (fetch) /* try to fetch cache [+ log ] numbers */
{
/*
@@ -292,8 +292,8 @@ nextval(PG_FUNCTION_ARGS)
log--;
rescnt++;
last = next;
- if (rescnt == 1) /* if it's first result - */
- result = next; /* it's what to return */
+ if (rescnt == 1) /* if it's first result - */
+ result = next; /* it's what to return */
}
}
@@ -306,12 +306,12 @@ nextval(PG_FUNCTION_ARGS)
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
- XLogRecData rdata[2];
+ XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]);
@@ -319,17 +319,17 @@ nextval(PG_FUNCTION_ARGS)
seq->is_called = 't';
seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper;
- rdata[1].len = ((PageHeader)page)->pd_special -
- ((PageHeader)page)->pd_upper;
+ rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
+ rdata[1].len = ((PageHeader) page)->pd_special -
+ ((PageHeader) page)->pd_upper;
rdata[1].next = NULL;
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata);
+ recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
- if (fetch) /* not all numbers were fetched */
+ if (fetch) /* not all numbers were fetched */
log -= fetch;
}
@@ -374,15 +374,15 @@ currval(PG_FUNCTION_ARGS)
PG_RETURN_INT32(result);
}
-/*
+/*
* Main internal procedure that handles 2 & 3 arg forms of SETVAL.
*
* Note that the 3 arg version (which sets the is_called flag) is
* only for use in pg_dump, and setting the is_called flag may not
- * work if multiple users are attached to the database and referencing
+ * work if multiple users are attached to the database and referencing
* the sequence (unlikely if pg_dump is restoring it).
*
- * It is necessary to have the 3 arg version so that pg_dump can
+ * It is necessary to have the 3 arg version so that pg_dump can
* restore the state of a sequence exactly during data-only restores -
* it is the only way to clear the is_called flag in an existing
* sequence.
@@ -409,18 +409,19 @@ do_setval(char *seqname, int32 next, bool iscalled)
/* save info in local cache */
elm->last = next; /* last returned number */
- elm->cached = next; /* last cached number (forget cached values) */
+ elm->cached = next; /* last cached number (forget cached
+ * values) */
START_CRIT_SECTION();
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
- XLogRecData rdata[2];
+ XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]);
@@ -428,12 +429,12 @@ do_setval(char *seqname, int32 next, bool iscalled)
seq->is_called = 't';
seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper;
- rdata[1].len = ((PageHeader)page)->pd_special -
- ((PageHeader)page)->pd_upper;
+ rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
+ rdata[1].len = ((PageHeader) page)->pd_special -
+ ((PageHeader) page)->pd_upper;
rdata[1].next = NULL;
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata);
+ recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
@@ -496,7 +497,7 @@ static char *
get_seq_name(text *seqin)
{
char *rawname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(seqin)));
+ PointerGetDatum(seqin)));
int rawlen = strlen(rawname);
char *seqname;
@@ -511,6 +512,7 @@ get_seq_name(text *seqin)
else
{
seqname = rawname;
+
/*
* It's important that this match the identifier downcasing code
* used by backend/parser/scan.l.
@@ -752,15 +754,16 @@ get_param(DefElem *def)
return -1;
}
-void seq_redo(XLogRecPtr lsn, XLogRecord *record)
+void
+seq_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
- Relation reln;
- Buffer buffer;
- Page page;
- char *item;
- Size itemsz;
- xl_seq_rec *xlrec = (xl_seq_rec*) XLogRecGetData(record);
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ char *item;
+ Size itemsz;
+ xl_seq_rec *xlrec = (xl_seq_rec *) XLogRecGetData(record);
sequence_magic *sm;
if (info != XLOG_SEQ_LOG)
@@ -772,8 +775,8 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
buffer = XLogReadBuffer(true, reln, 0);
if (!BufferIsValid(buffer))
- elog(STOP, "seq_redo: can't read block of %u/%u",
- xlrec->node.tblNode, xlrec->node.relNode);
+ elog(STOP, "seq_redo: can't read block of %u/%u",
+ xlrec->node.tblNode, xlrec->node.relNode);
page = (Page) BufferGetPage(buffer);
@@ -781,10 +784,10 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
sm = (sequence_magic *) PageGetSpecialPointer(page);
sm->magic = SEQ_MAGIC;
- item = (char*)xlrec + sizeof(xl_seq_rec);
+ item = (char *) xlrec + sizeof(xl_seq_rec);
itemsz = record->xl_len - sizeof(xl_seq_rec);
itemsz = MAXALIGN(itemsz);
- if (PageAddItem(page, (Item)item, itemsz,
+ if (PageAddItem(page, (Item) item, itemsz,
FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(STOP, "seq_redo: failed to add item to page");
@@ -795,14 +798,16 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
return;
}
-void seq_undo(XLogRecPtr lsn, XLogRecord *record)
+void
+seq_undo(XLogRecPtr lsn, XLogRecord *record)
{
}
-void seq_desc(char *buf, uint8 xl_info, char* rec)
+void
+seq_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
- xl_seq_rec *xlrec = (xl_seq_rec*) rec;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
+ xl_seq_rec *xlrec = (xl_seq_rec *) rec;
if (info == XLOG_SEQ_LOG)
strcat(buf, "log: ");
@@ -813,5 +818,5 @@ void seq_desc(char *buf, uint8 xl_info, char* rec)
}
sprintf(buf + strlen(buf), "node %u/%u",
- xlrec->node.tblNode, xlrec->node.relNode);
+ xlrec->node.tblNode, xlrec->node.relNode);
}
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 4a6ddef9283..034b49887e7 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.88 2001/03/14 21:50:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.89 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,8 +36,8 @@ static void DescribeTrigger(TriggerDesc *trigdesc, Trigger *trigger);
static HeapTuple GetTupleForTrigger(EState *estate, ItemPointer tid,
TupleTableSlot **newSlot);
static HeapTuple ExecCallTriggerFunc(Trigger *trigger,
- TriggerData *trigdata,
- MemoryContext per_tuple_context);
+ TriggerData *trigdata,
+ MemoryContext per_tuple_context);
static void DeferredTriggerSaveEvent(Relation rel, int event,
HeapTuple oldtup, HeapTuple newtup);
@@ -87,7 +87,9 @@ CreateTrigger(CreateTrigStmt *stmt)
constrrelid = InvalidOid;
else
{
- /* NoLock is probably sufficient here, since we're only
+
+ /*
+ * NoLock is probably sufficient here, since we're only
* interested in getting the relation's OID...
*/
rel = heap_openr(stmt->constrrelname, NoLock);
@@ -192,7 +194,7 @@ CreateTrigger(CreateTrigStmt *stmt)
values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->trigname));
+ CStringGetDatum(stmt->trigname));
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
values[Anum_pg_trigger_tgenabled - 1] = BoolGetDatum(true);
@@ -211,7 +213,7 @@ CreateTrigger(CreateTrigStmt *stmt)
foreach(le, stmt->args)
{
- char *ar = ((Value*) lfirst(le))->val.str;
+ char *ar = ((Value *) lfirst(le))->val.str;
len += strlen(ar) + 4;
for (; *ar; ar++)
@@ -224,7 +226,7 @@ CreateTrigger(CreateTrigStmt *stmt)
args[0] = '\0';
foreach(le, stmt->args)
{
- char *s = ((Value*) lfirst(le))->val.str;
+ char *s = ((Value *) lfirst(le))->val.str;
char *d = args + strlen(args);
while (*s)
@@ -237,7 +239,7 @@ CreateTrigger(CreateTrigStmt *stmt)
}
values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
- CStringGetDatum(args));
+ CStringGetDatum(args));
}
else
{
@@ -569,15 +571,16 @@ RelationBuildTriggers(Relation relation)
sizeof(Trigger));
else
triggers = (Trigger *) repalloc(triggers,
- (found + 1) * sizeof(Trigger));
+ (found + 1) * sizeof(Trigger));
build = &(triggers[found]);
build->tgoid = htup->t_data->t_oid;
build->tgname = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname))));
+ DatumGetCString(DirectFunctionCall1(nameout,
+ NameGetDatum(&pg_trigger->tgname))));
build->tgfoid = pg_trigger->tgfoid;
- build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as uninitialized */
+ build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as
+ * uninitialized */
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
build->tgisconstraint = pg_trigger->tgisconstraint;
@@ -836,22 +839,22 @@ ExecCallTriggerFunc(Trigger *trigger,
TriggerData *trigdata,
MemoryContext per_tuple_context)
{
- FunctionCallInfoData fcinfo;
- Datum result;
- MemoryContext oldContext;
+ FunctionCallInfoData fcinfo;
+ Datum result;
+ MemoryContext oldContext;
/*
- * Fmgr lookup info is cached in the Trigger structure,
- * so that we need not repeat the lookup on every call.
+ * Fmgr lookup info is cached in the Trigger structure, so that we
+ * need not repeat the lookup on every call.
*/
if (trigger->tgfunc.fn_oid == InvalidOid)
fmgr_info(trigger->tgfoid, &trigger->tgfunc);
/*
- * Do the function evaluation in the per-tuple memory context,
- * so that leaked memory will be reclaimed once per tuple.
- * Note in particular that any new tuple created by the trigger function
- * will live till the end of the tuple cycle.
+ * Do the function evaluation in the per-tuple memory context, so that
+ * leaked memory will be reclaimed once per tuple. Note in particular
+ * that any new tuple created by the trigger function will live till
+ * the end of the tuple cycle.
*/
oldContext = MemoryContextSwitchTo(per_tuple_context);
@@ -868,8 +871,8 @@ ExecCallTriggerFunc(Trigger *trigger,
MemoryContextSwitchTo(oldContext);
/*
- * Trigger protocol allows function to return a null pointer,
- * but NOT to set the isnull result flag.
+ * Trigger protocol allows function to return a null pointer, but NOT
+ * to set the isnull result flag.
*/
if (fcinfo.isnull)
elog(ERROR, "ExecCallTriggerFunc: function %u returned NULL",
@@ -885,7 +888,7 @@ ExecBRInsertTriggers(EState *estate, Relation rel, HeapTuple trigtuple)
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_INSERT];
HeapTuple newtuple = trigtuple;
HeapTuple oldtuple;
- TriggerData LocTriggerData;
+ TriggerData LocTriggerData;
int i;
LocTriggerData.type = T_TriggerData;
@@ -915,9 +918,7 @@ ExecARInsertTriggers(EState *estate, Relation rel, HeapTuple trigtuple)
if (rel->trigdesc->n_after_row[TRIGGER_EVENT_INSERT] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE] > 0)
- {
DeferredTriggerSaveEvent(rel, TRIGGER_EVENT_INSERT, NULL, trigtuple);
- }
}
bool
@@ -1240,10 +1241,11 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
static void
deferredTriggerAddEvent(DeferredTriggerEvent event)
{
+
/*
* Since the event list could grow quite long, we keep track of the
- * list tail and append there, rather than just doing a stupid "lappend".
- * This avoids O(N^2) behavior for large numbers of events.
+ * list tail and append there, rather than just doing a stupid
+ * "lappend". This avoids O(N^2) behavior for large numbers of events.
*/
event->dte_next = NULL;
if (deftrig_event_tail == NULL)
@@ -1291,7 +1293,7 @@ deferredTriggerGetPreviousEvent(Oid relid, ItemPointer ctid)
if (previous == NULL)
elog(ERROR,
- "deferredTriggerGetPreviousEvent: event for tuple %s not found",
+ "deferredTriggerGetPreviousEvent: event for tuple %s not found",
DatumGetCString(DirectFunctionCall1(tidout,
PointerGetDatum(ctid))));
return previous;
@@ -1528,7 +1530,7 @@ DeferredTriggerBeginXact(void)
if (deftrig_cxt != NULL)
elog(ERROR,
- "DeferredTriggerBeginXact() called while inside transaction");
+ "DeferredTriggerBeginXact() called while inside transaction");
/* ----------
* Create the per transaction memory context and copy all states
@@ -1671,7 +1673,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
l = deftrig_dfl_trigstates;
while (l != NIL)
{
- List *next = lnext(l);
+ List *next = lnext(l);
pfree(lfirst(l));
pfree(l);
@@ -1700,7 +1702,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
l = deftrig_trigstates;
while (l != NIL)
{
- List *next = lnext(l);
+ List *next = lnext(l);
pfree(lfirst(l));
pfree(l);
@@ -1912,7 +1914,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
* Called by ExecAR...Triggers() to add the event to the queue.
*
* NOTE: should be called only if we've determined that an event must
- * be added to the queue. We must save *all* events if there is either
+ * be added to the queue. We must save *all* events if there is either
* an UPDATE or a DELETE deferred trigger; see uses of
* deferredTriggerGetPreviousEvent.
* ----------
@@ -2099,15 +2101,15 @@ DeferredTriggerSaveEvent(Relation rel, int event,
TRIGGER_DEFERRED_ROW_INSERTED)
elog(ERROR, "triggered data change violation "
"on relation \"%s\"",
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(rel->rd_rel->relname)))));
+ DatumGetCString(DirectFunctionCall1(nameout,
+ NameGetDatum(&(rel->rd_rel->relname)))));
if (prev_event->dte_item[i].dti_state &
TRIGGER_DEFERRED_KEY_CHANGED)
elog(ERROR, "triggered data change violation "
"on relation \"%s\"",
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(rel->rd_rel->relname)))));
+ DatumGetCString(DirectFunctionCall1(nameout,
+ NameGetDatum(&(rel->rd_rel->relname)))));
}
/* ----------
@@ -2142,7 +2144,7 @@ DeferredTriggerSaveEvent(Relation rel, int event,
elog(ERROR, "triggered data change violation "
"on relation \"%s\"",
DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(rel->rd_rel->relname)))));
+ NameGetDatum(&(rel->rd_rel->relname)))));
break;
}
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 378620cb3b7..ede41b64cc8 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.73 2001/01/24 19:42:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.74 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -122,7 +122,7 @@ write_password_file(Relation rel)
CRYPT_PWD_FILE_SEPSTR
"%s\n",
DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(DatumGetName(datum_n)))),
+ NameGetDatum(DatumGetName(datum_n)))),
null_p ? "" :
DatumGetCString(DirectFunctionCall1(textout, datum_p)),
null_v ? "\\N" :
@@ -248,7 +248,7 @@ CreateUser(CreateUserStmt *stmt)
* Build a tuple to insert
*/
new_record[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->user));
+ CStringGetDatum(stmt->user));
new_record[Anum_pg_shadow_usesysid - 1] = Int32GetDatum(havesysid ? stmt->sysid : max_id + 1);
AssertState(BoolIsValid(stmt->createdb));
@@ -312,7 +312,7 @@ CreateUser(CreateUserStmt *stmt)
* this in */
ags.action = +1;
ags.listUsers = makeList1(makeInteger(havesysid ?
- stmt->sysid : max_id + 1));
+ stmt->sysid : max_id + 1));
AlterGroup(&ags, "CREATE USER");
}
@@ -377,7 +377,7 @@ AlterUser(AlterUserStmt *stmt)
* Build a tuple to update, perusing the information just obtained
*/
new_record[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->user));
+ CStringGetDatum(stmt->user));
new_record_nulls[Anum_pg_shadow_usename - 1] = ' ';
/* sysid - leave as is */
@@ -561,7 +561,7 @@ DropUser(DropUserStmt *stmt)
elog(ERROR, "DROP USER: user \"%s\" owns database \"%s\", cannot be removed%s",
user,
DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(DatumGetName(datum)))),
+ NameGetDatum(DatumGetName(datum)))),
(length(stmt->users) > 1) ? " (no users removed)" : ""
);
}
@@ -603,6 +603,7 @@ DropUser(DropUserStmt *stmt)
}
heap_endscan(scan);
heap_close(pg_rel, AccessExclusiveLock);
+
/*
* Advance command counter so that later iterations of this loop
* will see the changes already made. This is essential if, for
@@ -873,7 +874,7 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
{
/* Get the uid of the proposed user to add. */
tuple = SearchSysCache(SHADOWNAME,
- PointerGetDatum(strVal(lfirst(item))),
+ PointerGetDatum(strVal(lfirst(item))),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "%s: user \"%s\" does not exist",
@@ -995,7 +996,7 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
{
/* Get the uid of the proposed user to drop. */
tuple = SearchSysCache(SHADOWNAME,
- PointerGetDatum(strVal(lfirst(item))),
+ PointerGetDatum(strVal(lfirst(item))),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "ALTER GROUP: user \"%s\" does not exist", strVal(lfirst(item)));
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 3606d05f741..078c9b53475 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.187 2001/03/14 08:40:57 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.188 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,11 +47,11 @@
#include "utils/syscache.h"
#include "utils/temprel.h"
-extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
- char *unused, int unlen);
-extern XLogRecPtr log_heap_move(Relation reln,
- Buffer oldbuf, ItemPointerData from,
- Buffer newbuf, HeapTuple newtup);
+extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
+ char *unused, int unlen);
+extern XLogRecPtr log_heap_move(Relation reln,
+ Buffer oldbuf, ItemPointerData from,
+ Buffer newbuf, HeapTuple newtup);
static MemoryContext vac_context = NULL;
@@ -78,9 +78,9 @@ static void vpage_insert(VacPageList vacpagelist, VacPage vpnew);
static void get_indices(Relation relation, int *nindices, Relation **Irel);
static void close_indices(int nindices, Relation *Irel);
static IndexInfo **get_index_desc(Relation onerel, int nindices,
- Relation *Irel);
+ Relation *Irel);
static void *vac_find_eq(void *bot, int nelem, int size, void *elm,
- int (*compar) (const void *, const void *));
+ int (*compar) (const void *, const void *));
static int vac_cmp_blk(const void *left, const void *right);
static int vac_cmp_offno(const void *left, const void *right);
static int vac_cmp_vtlinks(const void *left, const void *right);
@@ -120,9 +120,9 @@ vacuum(char *vacrel, bool verbose, bool analyze, List *anal_cols)
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of QueryContext, it will go away eventually
- * even if we suffer an error; there's no need for special abort
- * cleanup logic.
+ * Since it is a child of QueryContext, it will go away eventually even
+ * if we suffer an error; there's no need for special abort cleanup
+ * logic.
*/
vac_context = AllocSetContextCreate(QueryContext,
"Vacuum",
@@ -215,8 +215,8 @@ vacuum_shutdown()
/*
* Clean up working storage --- note we must do this after
- * StartTransactionCommand, else we might be trying to delete
- * the active context!
+ * StartTransactionCommand, else we might be trying to delete the
+ * active context!
*/
MemoryContextDelete(vac_context);
vac_context = NULL;
@@ -360,10 +360,10 @@ vacuum_rel(Oid relid)
{
Relation onerel;
LockRelId onerelid;
- VacPageListData vacuum_pages; /* List of pages to vacuum and/or clean
- * indices */
- VacPageListData fraged_pages; /* List of pages with space enough for
- * re-using */
+ VacPageListData vacuum_pages; /* List of pages to vacuum and/or
+ * clean indices */
+ VacPageListData fraged_pages; /* List of pages with space enough
+ * for re-using */
Relation *Irel;
int32 nindices,
i;
@@ -411,10 +411,10 @@ vacuum_rel(Oid relid)
}
/*
- * Get a session-level exclusive lock too. This will protect our
- * exclusive access to the relation across multiple transactions,
- * so that we can vacuum the relation's TOAST table (if any) secure
- * in the knowledge that no one is diddling the parent relation.
+ * Get a session-level exclusive lock too. This will protect our
+ * exclusive access to the relation across multiple transactions, so
+ * that we can vacuum the relation's TOAST table (if any) secure in
+ * the knowledge that no one is diddling the parent relation.
*
* NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the
@@ -458,10 +458,11 @@ vacuum_rel(Oid relid)
vacrelstats->hasindex = true;
else
vacrelstats->hasindex = false;
-#ifdef NOT_USED
+#ifdef NOT_USED
+
/*
- * reindex in VACUUM is dangerous under WAL.
- * ifdef out until it becomes safe.
+ * reindex in VACUUM is dangerous under WAL. ifdef out until it
+ * becomes safe.
*/
if (reindex)
{
@@ -470,7 +471,7 @@ vacuum_rel(Oid relid)
Irel = (Relation *) NULL;
activate_indexes_of_a_table(relid, false);
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* Clean/scan index relation(s) */
if (Irel != (Relation *) NULL)
@@ -506,6 +507,7 @@ vacuum_rel(Oid relid)
}
else
{
+
/*
* Flush dirty pages out to disk. We must do this even if we
* didn't do anything else, because we want to ensure that all
@@ -518,10 +520,10 @@ vacuum_rel(Oid relid)
i);
}
}
-#ifdef NOT_USED
+#ifdef NOT_USED
if (reindex)
activate_indexes_of_a_table(relid, true);
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* all done with this class, but hold lock until commit */
heap_close(onerel, NoLock);
@@ -537,11 +539,11 @@ vacuum_rel(Oid relid)
CommitTransactionCommand();
/*
- * If the relation has a secondary toast one, vacuum that too
- * while we still hold the session lock on the master table.
- * We don't need to propagate "analyze" to it, because the toaster
- * always uses hardcoded index access and statistics are
- * totally unimportant for toast relations
+ * If the relation has a secondary toast one, vacuum that too while we
+ * still hold the session lock on the master table. We don't need to
+ * propagate "analyze" to it, because the toaster always uses
+ * hardcoded index access and statistics are totally unimportant for
+ * toast relations
*/
if (toast_relid != InvalidOid)
vacuum_rel(toast_relid);
@@ -563,7 +565,7 @@ vacuum_rel(Oid relid)
*/
static void
scan_heap(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages)
+ VacPageList vacuum_pages, VacPageList fraged_pages)
{
BlockNumber nblocks,
blkno;
@@ -845,7 +847,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* dead tuples removed. Below we will apply
* PageRepairFragmentation to the copy, so that we can
* determine how much space will be available after
- * removal of dead tuples. But note we are NOT changing
+ * removal of dead tuples. But note we are NOT changing
* the real page yet...
*/
if (tempPage == (Page) NULL)
@@ -964,8 +966,8 @@ Re-using: Free/Avail. Space %lu/%lu; EndEmpty/Avail. Pages %u/%u. %s",
nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
new_pages, num_tuples, tups_vacuumed,
nkeep, vacrelstats->num_vtlinks, ncrash,
- nunused, (unsigned long)min_tlen, (unsigned long)max_tlen,
- (unsigned long)free_size, (unsigned long)usable_free_size,
+ nunused, (unsigned long) min_tlen, (unsigned long) max_tlen,
+ (unsigned long) free_size, (unsigned long) usable_free_size,
empty_end_pages, fraged_pages->num_pages,
show_rusage(&ru0));
@@ -984,8 +986,8 @@ Re-using: Free/Avail. Space %lu/%lu; EndEmpty/Avail. Pages %u/%u. %s",
*/
static void
repair_frag(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages,
- int nindices, Relation *Irel)
+ VacPageList vacuum_pages, VacPageList fraged_pages,
+ int nindices, Relation *Irel)
{
TransactionId myXID;
CommandId myCID;
@@ -1077,7 +1079,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
dowrite = false;
if (blkno == last_vacuum_block) /* it's reaped page */
{
- if (last_vacuum_page->offsets_free > 0) /* there are dead tuples */
+ if (last_vacuum_page->offsets_free > 0) /* there are dead tuples */
{ /* on this page - clean */
Assert(!isempty);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -1100,7 +1102,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
last_vacuum_block = -1;
}
if (num_fraged_pages > 0 &&
- fraged_pages->pagedesc[num_fraged_pages - 1]->blkno ==
+ fraged_pages->pagedesc[num_fraged_pages - 1]->blkno ==
(BlockNumber) blkno)
{
/* page is in fraged_pages too; remove it */
@@ -1142,8 +1144,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* If this (chain) tuple is moved by me already then I
- * have to check is it in vacpage or not - i.e. is it moved
- * while cleaning this page or some previous one.
+ * have to check is it in vacpage or not - i.e. is it
+ * moved while cleaning this page or some previous one.
*/
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
@@ -1232,8 +1234,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* xaction and this tuple is already deleted by
* me. Actually, upper part of chain should be
* removed and seems that this should be handled
- * in scan_heap(), but it's not implemented at
- * the moment and so we just stop shrinking here.
+ * in scan_heap(), but it's not implemented at the
+ * moment and so we just stop shrinking here.
*/
ReleaseBuffer(Cbuf);
pfree(vtmove);
@@ -1256,15 +1258,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
/*
- * if to_vacpage no longer has enough free space to be
- * useful, remove it from fraged_pages list
+ * if to_vacpage no longer has enough free space
+ * to be useful, remove it from fraged_pages list
*/
if (to_vacpage != NULL &&
- !enough_space(to_vacpage, vacrelstats->min_tlen))
+ !enough_space(to_vacpage, vacrelstats->min_tlen))
{
Assert(num_fraged_pages > to_item);
memmove(fraged_pages->pagedesc + to_item,
- fraged_pages->pagedesc + to_item + 1,
+ fraged_pages->pagedesc + to_item + 1,
sizeof(VacPage) * (num_fraged_pages - to_item - 1));
num_fraged_pages--;
}
@@ -1326,10 +1328,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vtld.new_tid = tp.t_self;
vtlp = (VTupleLink)
vac_find_eq((void *) (vacrelstats->vtlinks),
- vacrelstats->num_vtlinks,
- sizeof(VTupleLinkData),
- (void *) &vtld,
- vac_cmp_vtlinks);
+ vacrelstats->num_vtlinks,
+ sizeof(VTupleLinkData),
+ (void *) &vtld,
+ vac_cmp_vtlinks);
if (vtlp == NULL)
elog(ERROR, "Parent tuple was not found");
tp.t_self = vtlp->this_tid;
@@ -1416,7 +1418,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ItemPointerSetInvalid(&Ctid);
for (ti = 0; ti < num_vtmove; ti++)
{
- VacPage destvacpage = vtmove[ti].vacpage;
+ VacPage destvacpage = vtmove[ti].vacpage;
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
@@ -1460,21 +1462,22 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
*
* NOTE: a nasty bug used to lurk here. It is possible
* for the source and destination pages to be the same
- * (since this tuple-chain member can be on a page lower
- * than the one we're currently processing in the outer
- * loop). If that's true, then after vacuum_page() the
- * source tuple will have been moved, and tuple.t_data
- * will be pointing at garbage. Therefore we must do
- * everything that uses tuple.t_data BEFORE this step!!
+ * (since this tuple-chain member can be on a page
+ * lower than the one we're currently processing in
+ * the outer loop). If that's true, then after
+ * vacuum_page() the source tuple will have been
+ * moved, and tuple.t_data will be pointing at
+ * garbage. Therefore we must do everything that uses
+ * tuple.t_data BEFORE this step!!
*
* This path is different from the other callers of
- * vacuum_page, because we have already incremented the
- * vacpage's offsets_used field to account for the
+ * vacuum_page, because we have already incremented
+ * the vacpage's offsets_used field to account for the
* tuple(s) we expect to move onto the page. Therefore
- * vacuum_page's check for offsets_used == 0 is
- * wrong. But since that's a good debugging check for
- * all other callers, we work around it here rather
- * than remove it.
+ * vacuum_page's check for offsets_used == 0 is wrong.
+ * But since that's a good debugging check for all
+ * other callers, we work around it here rather than
+ * remove it.
*/
if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd)
{
@@ -1498,7 +1501,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (newoff == InvalidOffsetNumber)
{
elog(STOP, "moving chain: failed to add item with len = %lu to page %u",
- (unsigned long)tuple_len, destvacpage->blkno);
+ (unsigned long) tuple_len, destvacpage->blkno);
}
newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data);
@@ -1507,9 +1510,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ItemPointerSet(&(newtup.t_self), destvacpage->blkno, newoff);
{
- XLogRecPtr recptr =
- log_heap_move(onerel, Cbuf, tuple.t_self,
- cur_buffer, &newtup);
+ XLogRecPtr recptr =
+ log_heap_move(onerel, Cbuf, tuple.t_self,
+ cur_buffer, &newtup);
if (Cbuf != cur_buffer)
{
@@ -1526,7 +1529,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Set new tuple's t_ctid pointing to itself for last
- * tuple in chain, and to next tuple in chain otherwise.
+ * tuple in chain, and to next tuple in chain
+ * otherwise.
*/
if (!ItemPointerIsValid(&Ctid))
newtup.t_data->t_ctid = newtup.t_self;
@@ -1552,13 +1556,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (Irel != (Relation *) NULL)
{
+
/*
* XXX using CurrentMemoryContext here means
- * intra-vacuum memory leak for functional indexes.
- * Should fix someday.
+ * intra-vacuum memory leak for functional
+ * indexes. Should fix someday.
*
* XXX This code fails to handle partial indexes!
- * Probably should change it to use ExecOpenIndices.
+ * Probably should change it to use
+ * ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@@ -1653,8 +1659,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
elog(STOP, "\
failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
- (unsigned long)tuple_len, cur_page->blkno, (unsigned long)cur_page->free,
- cur_page->offsets_used, cur_page->offsets_free);
+ (unsigned long) tuple_len, cur_page->blkno, (unsigned long) cur_page->free,
+ cur_page->offsets_used, cur_page->offsets_free);
}
newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data);
@@ -1673,9 +1679,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
{
- XLogRecPtr recptr =
- log_heap_move(onerel, buf, tuple.t_self,
- cur_buffer, &newtup);
+ XLogRecPtr recptr =
+ log_heap_move(onerel, buf, tuple.t_self,
+ cur_buffer, &newtup);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
@@ -1698,13 +1704,13 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* insert index' tuples if needed */
if (Irel != (Relation *) NULL)
{
+
/*
- * XXX using CurrentMemoryContext here means
- * intra-vacuum memory leak for functional indexes.
- * Should fix someday.
+ * XXX using CurrentMemoryContext here means intra-vacuum
+ * memory leak for functional indexes. Should fix someday.
*
- * XXX This code fails to handle partial indexes!
- * Probably should change it to use ExecOpenIndices.
+ * XXX This code fails to handle partial indexes! Probably
+ * should change it to use ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@@ -1803,14 +1809,15 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
if (num_moved > 0)
{
+
/*
* We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our
* exclusive access to the relation. However, that would require
* a lot of extra code to close and re-open the relation, indices,
- * etc. For now, a quick hack: record status of current transaction
- * as committed, and continue.
+ * etc. For now, a quick hack: record status of current
+ * transaction as committed, and continue.
*/
RecordTransactionCommit();
}
@@ -1873,7 +1880,7 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
nblocks, blkno, num_moved,
show_rusage(&ru0));
- /*
+ /*
* Reflect the motion of system tuples to catalog cache here.
*/
CommandCounterIncrement();
@@ -1883,13 +1890,13 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* vacuum indices again if needed */
if (Irel != (Relation *) NULL)
{
- VacPage *vpleft,
+ VacPage *vpleft,
*vpright,
vpsave;
/* re-sort Nvacpagelist.pagedesc */
for (vpleft = Nvacpagelist.pagedesc,
- vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
+ vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
vpleft < vpright; vpleft++, vpright--)
{
vpsave = *vpleft;
@@ -1906,9 +1913,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
if (vacpage->blkno == (BlockNumber) (blkno - 1) &&
vacpage->offsets_free > 0)
{
- char unbuf[BLCKSZ];
- OffsetNumber *unused = (OffsetNumber*)unbuf;
- int uncnt;
+ char unbuf[BLCKSZ];
+ OffsetNumber *unused = (OffsetNumber *) unbuf;
+ int uncnt;
buf = ReadBuffer(onerel, vacpage->blkno);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -1943,8 +1950,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
- recptr = log_heap_clean(onerel, buf, (char*)unused,
- (char*)(&(unused[uncnt])) - (char*)unused);
+
+ recptr = log_heap_clean(onerel, buf, (char *) unused,
+ (char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
}
@@ -1962,9 +1970,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
- * we don't need to truncate, because we want to ensure that all tuples
- * have correct on-row commit status on disk (see bufmgr.c's comments
- * for FlushRelationBuffers()).
+ * we don't need to truncate, because we want to ensure that all
+ * tuples have correct on-row commit status on disk (see bufmgr.c's
+ * comments for FlushRelationBuffers()).
*/
i = FlushRelationBuffers(onerel, blkno);
if (i < 0)
@@ -2005,8 +2013,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
int i;
nblocks = vacuum_pages->num_pages;
- nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with
- * them */
+ nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with them */
for (i = 0, vacpage = vacuum_pages->pagedesc; i < nblocks; i++, vacpage++)
{
@@ -2022,9 +2029,9 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
- * we don't need to truncate, because we want to ensure that all tuples
- * have correct on-row commit status on disk (see bufmgr.c's comments
- * for FlushRelationBuffers()).
+ * we don't need to truncate, because we want to ensure that all
+ * tuples have correct on-row commit status on disk (see bufmgr.c's
+ * comments for FlushRelationBuffers()).
*/
Assert(vacrelstats->num_pages >= vacuum_pages->empty_end_pages);
nblocks = vacrelstats->num_pages - vacuum_pages->empty_end_pages;
@@ -2042,7 +2049,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
vacrelstats->num_pages, nblocks);
nblocks = smgrtruncate(DEFAULT_SMGR, onerel, nblocks);
Assert(nblocks >= 0);
- vacrelstats->num_pages = nblocks; /* set new number of blocks */
+ vacrelstats->num_pages = nblocks; /* set new number of
+ * blocks */
}
}
@@ -2053,12 +2061,12 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
static void
vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
{
- char unbuf[BLCKSZ];
- OffsetNumber *unused = (OffsetNumber*)unbuf;
- int uncnt;
- Page page = BufferGetPage(buffer);
- ItemId itemid;
- int i;
+ char unbuf[BLCKSZ];
+ OffsetNumber *unused = (OffsetNumber *) unbuf;
+ int uncnt;
+ Page page = BufferGetPage(buffer);
+ ItemId itemid;
+ int i;
/* There shouldn't be any tuples moved onto the page yet! */
Assert(vacpage->offsets_used == 0);
@@ -2072,8 +2080,9 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
- recptr = log_heap_clean(onerel, buffer, (char*)unused,
- (char*)(&(unused[uncnt])) - (char*)unused);
+
+ recptr = log_heap_clean(onerel, buffer, (char *) unused,
+ (char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
}
@@ -2220,8 +2229,8 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist)
vp = &vacpage;
vpp = (VacPage *) vac_find_eq((void *) (vacpagelist->pagedesc),
- vacpagelist->num_pages, sizeof(VacPage), (void *) &vp,
- vac_cmp_blk);
+ vacpagelist->num_pages, sizeof(VacPage), (void *) &vp,
+ vac_cmp_blk);
if (vpp == (VacPage *) NULL)
return (VacPage) NULL;
@@ -2235,8 +2244,8 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist)
}
voff = (OffsetNumber *) vac_find_eq((void *) (vp->offsets),
- vp->offsets_free, sizeof(OffsetNumber), (void *) &ioffno,
- vac_cmp_offno);
+ vp->offsets_free, sizeof(OffsetNumber), (void *) &ioffno,
+ vac_cmp_offno);
if (voff == (OffsetNumber *) NULL)
return (VacPage) NULL;
@@ -2265,7 +2274,7 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist)
*/
static void
update_relstats(Oid relid, int num_pages, int num_tuples, bool hasindex,
- VRelStats *vacrelstats)
+ VRelStats *vacrelstats)
{
Relation rd;
HeapTupleData rtup;
@@ -2313,7 +2322,7 @@ update_relstats(Oid relid, int num_pages, int num_tuples, bool hasindex,
static void
reap_page(VacPageList vacpagelist, VacPage vacpage)
{
- VacPage newvacpage;
+ VacPage newvacpage;
/* allocate a VacPageData entry */
newvacpage = (VacPage) palloc(sizeof(VacPageData) + vacpage->offsets_free * sizeof(OffsetNumber));
@@ -2354,7 +2363,7 @@ vpage_insert(VacPageList vacpagelist, VacPage vpnew)
static void *
vac_find_eq(void *bot, int nelem, int size, void *elm,
- int (*compar) (const void *, const void *))
+ int (*compar) (const void *, const void *))
{
int res;
int last = nelem - 1;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 6f07bff095d..cc5f64f41a0 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.45 2001/01/24 19:42:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.46 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -453,6 +453,7 @@ parse_DefaultXactIsoLevel(char *value)
{
#if 0
TransactionState s = CurrentTransactionState;
+
#endif
if (value == NULL)
@@ -632,7 +633,7 @@ parse_client_encoding(char *value)
}
#else
if (value &&
- strcasecmp(value, pg_encoding_to_char(pg_get_client_encoding())) != 0)
+ strcasecmp(value, pg_encoding_to_char(pg_get_client_encoding())) != 0)
elog(ERROR, "Client encoding %s is not supported", value);
#endif
return TRUE;
@@ -701,28 +702,27 @@ reset_server_encoding(void)
void
SetPGVariable(const char *name, const char *value)
{
- char *mvalue = value ? pstrdup(value) : ((char*) NULL);
-
- /*
- * Special cases ought to be removed and handled separately
- * by TCOP
- */
- if (strcasecmp(name, "datestyle")==0)
- parse_date(mvalue);
- else if (strcasecmp(name, "timezone")==0)
- parse_timezone(mvalue);
- else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
- parse_DefaultXactIsoLevel(mvalue);
- else if (strcasecmp(name, "XactIsoLevel")==0)
- parse_XactIsoLevel(mvalue);
- else if (strcasecmp(name, "client_encoding")==0)
- parse_client_encoding(mvalue);
- else if (strcasecmp(name, "server_encoding")==0)
- parse_server_encoding(mvalue);
- else if (strcasecmp(name, "random_seed")==0)
- parse_random_seed(mvalue);
- else
- SetConfigOption(name, value, superuser() ? PGC_SUSET : PGC_USERSET);
+ char *mvalue = value ? pstrdup(value) : ((char *) NULL);
+
+ /*
+ * Special cases ought to be removed and handled separately by TCOP
+ */
+ if (strcasecmp(name, "datestyle") == 0)
+ parse_date(mvalue);
+ else if (strcasecmp(name, "timezone") == 0)
+ parse_timezone(mvalue);
+ else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
+ parse_DefaultXactIsoLevel(mvalue);
+ else if (strcasecmp(name, "XactIsoLevel") == 0)
+ parse_XactIsoLevel(mvalue);
+ else if (strcasecmp(name, "client_encoding") == 0)
+ parse_client_encoding(mvalue);
+ else if (strcasecmp(name, "server_encoding") == 0)
+ parse_server_encoding(mvalue);
+ else if (strcasecmp(name, "random_seed") == 0)
+ parse_random_seed(mvalue);
+ else
+ SetConfigOption(name, value, superuser() ? PGC_SUSET : PGC_USERSET);
if (mvalue)
pfree(mvalue);
@@ -732,44 +732,45 @@ SetPGVariable(const char *name, const char *value)
void
GetPGVariable(const char *name)
{
- if (strcasecmp(name, "datestyle")==0)
- show_date();
- else if (strcasecmp(name, "timezone")==0)
- show_timezone();
- else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
- show_DefaultXactIsoLevel();
- else if (strcasecmp(name, "XactIsoLevel")==0)
- show_XactIsoLevel();
- else if (strcasecmp(name, "client_encoding")==0)
- show_client_encoding();
- else if (strcasecmp(name, "server_encoding")==0)
- show_server_encoding();
- else if (strcasecmp(name, "random_seed")==0)
- show_random_seed();
- else
- {
- const char * val = GetConfigOption(name);
- elog(NOTICE, "%s is %s", name, val);
- }
-}
+ if (strcasecmp(name, "datestyle") == 0)
+ show_date();
+ else if (strcasecmp(name, "timezone") == 0)
+ show_timezone();
+ else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
+ show_DefaultXactIsoLevel();
+ else if (strcasecmp(name, "XactIsoLevel") == 0)
+ show_XactIsoLevel();
+ else if (strcasecmp(name, "client_encoding") == 0)
+ show_client_encoding();
+ else if (strcasecmp(name, "server_encoding") == 0)
+ show_server_encoding();
+ else if (strcasecmp(name, "random_seed") == 0)
+ show_random_seed();
+ else
+ {
+ const char *val = GetConfigOption(name);
+
+ elog(NOTICE, "%s is %s", name, val);
+ }
+}
void
ResetPGVariable(const char *name)
{
- if (strcasecmp(name, "datestyle")==0)
- reset_date();
- else if (strcasecmp(name, "timezone")==0)
- reset_timezone();
- else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
- reset_DefaultXactIsoLevel();
- else if (strcasecmp(name, "XactIsoLevel")==0)
- reset_XactIsoLevel();
- else if (strcasecmp(name, "client_encoding")==0)
- reset_client_encoding();
- else if (strcasecmp(name, "server_encoding")==0)
- reset_server_encoding();
- else if (strcasecmp(name, "random_seed")==0)
- reset_random_seed();
- else
- SetConfigOption(name, NULL, superuser() ? PGC_SUSET : PGC_USERSET);
-}
+ if (strcasecmp(name, "datestyle") == 0)
+ reset_date();
+ else if (strcasecmp(name, "timezone") == 0)
+ reset_timezone();
+ else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
+ reset_DefaultXactIsoLevel();
+ else if (strcasecmp(name, "XactIsoLevel") == 0)
+ reset_XactIsoLevel();
+ else if (strcasecmp(name, "client_encoding") == 0)
+ reset_client_encoding();
+ else if (strcasecmp(name, "server_encoding") == 0)
+ reset_server_encoding();
+ else if (strcasecmp(name, "random_seed") == 0)
+ reset_random_seed();
+ else
+ SetConfigOption(name, NULL, superuser() ? PGC_SUSET : PGC_USERSET);
+}
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index 99481d4d54b..320f2c08e92 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: view.c,v 1.53 2001/01/24 19:42:53 momjian Exp $
+ * $Id: view.c,v 1.54 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@ DefineVirtualRelation(char *relname, List *tlist)
TargetEntry *entry = lfirst(t);
Resdom *res = entry->resdom;
- if (! res->resjunk)
+ if (!res->resjunk)
{
char *resname = res->resname;
char *restypename = typeidTypeName(res->restype);
@@ -118,9 +118,9 @@ MakeRetrieveViewRuleName(char *viewName)
snprintf(buf, buflen, "_RET%s", viewName);
/* clip to less than NAMEDATALEN bytes, if necessary */
#ifdef MULTIBYTE
- maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN-1);
+ maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN - 1);
#else
- maxlen = NAMEDATALEN-1;
+ maxlen = NAMEDATALEN - 1;
#endif
if (maxlen < buflen)
buf[maxlen] = '\0';
@@ -211,12 +211,12 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
*rt_entry2;
/*
- * Make a copy of the given parsetree. It's not so much that we
- * don't want to scribble on our input, it's that the parser has
- * a bad habit of outputting multiple links to the same subtree
- * for constructs like BETWEEN, and we mustn't have OffsetVarNodes
- * increment the varno of a Var node twice. copyObject will expand
- * any multiply-referenced subtree into multiple copies.
+ * Make a copy of the given parsetree. It's not so much that we don't
+ * want to scribble on our input, it's that the parser has a bad habit
+ * of outputting multiple links to the same subtree for constructs
+ * like BETWEEN, and we mustn't have OffsetVarNodes increment the
+ * varno of a Var node twice. copyObject will expand any
+ * multiply-referenced subtree into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@@ -261,6 +261,7 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
void
DefineView(char *viewName, Query *viewParse)
{
+
/*
* Create the "view" relation NOTE: if it already exists, the xact
* will be aborted.
@@ -295,9 +296,10 @@ DefineView(char *viewName, Query *viewParse)
void
RemoveView(char *viewName)
{
+
/*
- * We just have to drop the relation; the associated rules will
- * be cleaned up automatically.
+ * We just have to drop the relation; the associated rules will be
+ * cleaned up automatically.
*/
heap_drop_with_catalog(viewName, allowSystemTableMods);
}
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 0888e2638b2..a9c5bd40372 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: execAmi.c,v 1.56 2001/01/24 19:42:53 momjian Exp $
+ * $Id: execAmi.c,v 1.57 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
* ExecInsert \ executor interface / aminsert
* ExecReScanR / to access methods \ amrescan
* ExecMarkPos / \ ammarkpos
- * ExecRestrPos / \ amrestpos
+ * ExecRestrPos / \ amrestpos
*/
#include "postgres.h"
@@ -91,7 +91,7 @@ ExecOpenScanR(Oid relOid,
* on whether this is a heap relation or an index relation.
*
* For a table, acquire AccessShareLock for the duration of the query
- * execution. For indexes, acquire no lock here; the index machinery
+ * execution. For indexes, acquire no lock here; the index machinery
* does its own locks and unlocks. (We rely on having some kind of
* lock on the parent table to ensure the index won't go away!)
* ----------------
@@ -413,7 +413,7 @@ ExecMarkPos(Plan *node)
{
switch (nodeTag(node))
{
- case T_SeqScan:
+ case T_SeqScan:
ExecSeqMarkPos((SeqScan *) node);
break;
@@ -455,7 +455,7 @@ ExecRestrPos(Plan *node)
{
switch (nodeTag(node))
{
- case T_SeqScan:
+ case T_SeqScan:
ExecSeqRestrPos((SeqScan *) node);
break;
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index f23ba273462..d288a8de735 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.25 2001/01/29 00:39:17 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.26 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -265,6 +265,7 @@ ExecInitJunkFilter(List *targetList, TupleDesc tupType)
void
ExecFreeJunkFilter(JunkFilter *junkfilter)
{
+
/*
* Since the junkfilter is inside its own context, we just have to
* delete the context and we're set.
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 929134209ba..fc1dccd0467 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -27,7 +27,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.138 2001/01/29 00:39:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.139 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,15 +51,15 @@ static TupleDesc InitPlan(CmdType operation,
Plan *plan,
EState *estate);
static void initResultRelInfo(ResultRelInfo *resultRelInfo,
- Index resultRelationIndex,
- List *rangeTable,
- CmdType operation);
+ Index resultRelationIndex,
+ List *rangeTable,
+ CmdType operation);
static void EndPlan(Plan *plan, EState *estate);
static TupleTableSlot *ExecutePlan(EState *estate, Plan *plan,
- CmdType operation,
- long numberTuples,
- ScanDirection direction,
- DestReceiver *destfunc);
+ CmdType operation,
+ long numberTuples,
+ ScanDirection direction,
+ DestReceiver *destfunc);
static void ExecRetrieve(TupleTableSlot *slot,
DestReceiver *destfunc,
EState *estate);
@@ -72,9 +72,9 @@ static void ExecReplace(TupleTableSlot *slot, ItemPointer tupleid,
static TupleTableSlot *EvalPlanQualNext(EState *estate);
static void EndEvalPlanQual(EState *estate);
static void ExecCheckQueryPerms(CmdType operation, Query *parseTree,
- Plan *plan);
+ Plan *plan);
static void ExecCheckPlanPerms(Plan *plan, List *rangeTable,
- CmdType operation);
+ CmdType operation);
static void ExecCheckRTPerms(List *rangeTable, CmdType operation);
static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
@@ -91,7 +91,7 @@ static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
* be returned by the query.
*
* NB: the CurrentMemoryContext when this is called must be the context
- * to be used as the per-query context for the query plan. ExecutorRun()
+ * to be used as the per-query context for the query plan. ExecutorRun()
* and ExecutorEnd() must be called in this same memory context.
* ----------------------------------------------------------------
*/
@@ -287,6 +287,7 @@ ExecutorEnd(QueryDesc *queryDesc, EState *estate)
static void
ExecCheckQueryPerms(CmdType operation, Query *parseTree, Plan *plan)
{
+
/*
* Check RTEs in the query's primary rangetable.
*/
@@ -339,7 +340,7 @@ ExecCheckPlanPerms(Plan *plan, List *rangeTable, CmdType operation)
{
case T_SubqueryScan:
{
- SubqueryScan *scan = (SubqueryScan *) plan;
+ SubqueryScan *scan = (SubqueryScan *) plan;
RangeTblEntry *rte;
/* Recursively check the subquery */
@@ -405,12 +406,13 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
relName = rte->relname;
/*
- * userid to check as: current user unless we have a setuid indication.
+ * userid to check as: current user unless we have a setuid
+ * indication.
*
- * Note: GetUserId() is presently fast enough that there's no harm
- * in calling it separately for each RTE. If that stops being true,
- * we could call it once in ExecCheckQueryPerms and pass the userid
- * down from there. But for now, no need for the extra clutter.
+ * Note: GetUserId() is presently fast enough that there's no harm in
+ * calling it separately for each RTE. If that stops being true, we
+ * could call it once in ExecCheckQueryPerms and pass the userid down
+ * from there. But for now, no need for the extra clutter.
*/
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
@@ -426,6 +428,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
if (rte->checkForWrite)
{
+
/*
* Note: write access in a SELECT context means SELECT FOR UPDATE.
* Right now we don't distinguish that from true update as far as
@@ -519,6 +522,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (resultRelations != NIL)
{
+
/*
* Multiple result relations (due to inheritance)
* parseTree->resultRelations identifies them all
@@ -541,8 +545,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
else
{
+
/*
- * Single result relation identified by parseTree->resultRelation
+ * Single result relation identified by
+ * parseTree->resultRelation
*/
numResultRelations = 1;
resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
@@ -559,6 +565,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
else
{
+
/*
* if no result relation, then set state appropriately
*/
@@ -616,10 +623,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
tupType = ExecGetTupType(plan); /* tuple descriptor */
/*
- * Initialize the junk filter if needed. SELECT and INSERT queries need
- * a filter if there are any junk attrs in the tlist. UPDATE and
- * DELETE always need one, since there's always a junk 'ctid' attribute
- * present --- no need to look first.
+ * Initialize the junk filter if needed. SELECT and INSERT queries
+ * need a filter if there are any junk attrs in the tlist. UPDATE and
+ * DELETE always need one, since there's always a junk 'ctid'
+ * attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@@ -650,11 +657,12 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (junk_filter_needed)
{
+
/*
- * If there are multiple result relations, each one needs
- * its own junk filter. Note this is only possible for
- * UPDATE/DELETE, so we can't be fooled by some needing
- * a filter and some not.
+ * If there are multiple result relations, each one needs its
+ * own junk filter. Note this is only possible for
+ * UPDATE/DELETE, so we can't be fooled by some needing a
+ * filter and some not.
*/
if (parseTree->resultRelations != NIL)
{
@@ -678,6 +686,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
resultRelInfo++;
subplans = lnext(subplans);
}
+
/*
* Set active junkfilter too; at this point ExecInitAppend
* has already selected an active result relation...
@@ -750,10 +759,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the into relation.
- * Note that AlterTableCreateToastTable ends with
- * CommandCounterIncrement(), so that the TOAST table will
- * be visible for insertion.
+ * If necessary, create a TOAST table for the into
+ * relation. Note that AlterTableCreateToastTable ends
+ * with CommandCounterIncrement(), so that the TOAST table
+ * will be visible for insertion.
*/
AlterTableCreateToastTable(intoName, true);
@@ -817,9 +826,8 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
/*
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
- * index entries for the tuples we add/update. We need not do
- * this for a DELETE, however, since deletion doesn't affect
- * indexes.
+ * index entries for the tuples we add/update. We need not do this
+ * for a DELETE, however, since deletion doesn't affect indexes.
*/
if (resultRelationDesc->rd_rel->relhasindex &&
operation != CMD_DELETE)
@@ -857,8 +865,8 @@ EndPlan(Plan *plan, EState *estate)
estate->es_tupleTable = NULL;
/*
- * close the result relation(s) if any, but hold locks
- * until xact commit. Also clean up junkfilters if present.
+ * close the result relation(s) if any, but hold locks until xact
+ * commit. Also clean up junkfilters if present.
*/
resultRelInfo = estate->es_result_relations;
for (i = estate->es_num_result_relations; i > 0; i--)
@@ -1033,7 +1041,7 @@ lnext: ;
/*
* Unlike the UPDATE/DELETE case, a null result is
* possible here, when the referenced table is on the
- * nullable side of an outer join. Ignore nulls.
+ * nullable side of an outer join. Ignore nulls.
*/
if (isNull)
continue;
@@ -1216,7 +1224,7 @@ ExecAppend(TupleTableSlot *slot,
/* BEFORE ROW INSERT Triggers */
if (resultRelationDesc->trigdesc &&
- resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
+ resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
@@ -1227,11 +1235,12 @@ ExecAppend(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */
{
+
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
- * memory context, and therefore will go away by itself.
- * The tuple table slot should not try to clear it.
+ * memory context, and therefore will go away by itself. The
+ * tuple table slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@@ -1294,7 +1303,7 @@ ExecDelete(TupleTableSlot *slot,
/* BEFORE ROW DELETE Triggers */
if (resultRelationDesc->trigdesc &&
- resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
+ resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
{
bool dodelete;
@@ -1323,7 +1332,7 @@ ldelete:;
else if (!(ItemPointerEquals(tupleid, &ctid)))
{
TupleTableSlot *epqslot = EvalPlanQual(estate,
- resultRelInfo->ri_RangeTableIndex, &ctid);
+ resultRelInfo->ri_RangeTableIndex, &ctid);
if (!TupIsNull(epqslot))
{
@@ -1400,7 +1409,7 @@ ExecReplace(TupleTableSlot *slot,
/* BEFORE ROW UPDATE Triggers */
if (resultRelationDesc->trigdesc &&
- resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
+ resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
{
HeapTuple newtuple;
@@ -1411,11 +1420,12 @@ ExecReplace(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */
{
+
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
- * memory context, and therefore will go away by itself.
- * The tuple table slot should not try to clear it.
+ * memory context, and therefore will go away by itself. The
+ * tuple table slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@@ -1447,7 +1457,7 @@ lreplace:;
else if (!(ItemPointerEquals(tupleid, &ctid)))
{
TupleTableSlot *epqslot = EvalPlanQual(estate,
- resultRelInfo->ri_RangeTableIndex, &ctid);
+ resultRelInfo->ri_RangeTableIndex, &ctid);
if (!TupIsNull(epqslot))
{
@@ -1469,10 +1479,10 @@ lreplace:;
/*
* Note: instead of having to update the old index tuples associated
- * with the heap tuple, all we do is form and insert new index
- * tuples. This is because replaces are actually deletes and inserts
- * and index tuple deletion is done automagically by the vacuum
- * daemon. All we do is insert new index tuples. -cim 9/27/89
+ * with the heap tuple, all we do is form and insert new index tuples.
+ * This is because replaces are actually deletes and inserts and index
+ * tuple deletion is done automagically by the vacuum daemon. All we
+ * do is insert new index tuples. -cim 9/27/89
*/
/*
@@ -1525,8 +1535,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
}
/*
- * We will use the EState's per-tuple context for evaluating constraint
- * expressions (creating it if it's not already there).
+ * We will use the EState's per-tuple context for evaluating
+ * constraint expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);
@@ -1568,10 +1578,10 @@ ExecConstraints(char *caller, ResultRelInfo *resultRelInfo,
for (attrChk = 1; attrChk <= natts; attrChk++)
{
- if (rel->rd_att->attrs[attrChk-1]->attnotnull &&
+ if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
heap_attisnull(tuple, attrChk))
elog(ERROR, "%s: Fail to add null value in not null attribute %s",
- caller, NameStr(rel->rd_att->attrs[attrChk-1]->attname));
+ caller, NameStr(rel->rd_att->attrs[attrChk - 1]->attname));
}
}
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index bab2851df9d..79873073b7a 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.83 2001/01/29 00:39:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.84 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,22 +46,22 @@
/* static function decls */
static Datum ExecEvalAggref(Aggref *aggref, ExprContext *econtext,
- bool *isNull);
+ bool *isNull);
static Datum ExecEvalArrayRef(ArrayRef *arrayRef, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull);
static Datum ExecEvalOper(Expr *opClause, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalFunc(Expr *funcClause, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static ExprDoneCond ExecEvalFuncArgs(FunctionCachePtr fcache,
- List *argList,
- ExprContext *econtext);
+ List *argList,
+ ExprContext *econtext);
static Datum ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull);
static Datum ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull);
static Datum ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull);
static Datum ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
/*----------
@@ -77,7 +77,7 @@ static Datum ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext,
* done in versions up through 7.0) then an assignment like
* UPDATE table SET arrayfield[4] = NULL
* will result in setting the whole array to NULL, which is certainly not
- * very desirable. By returning the source array we make the assignment
+ * very desirable. By returning the source array we make the assignment
* into a no-op, instead. (Eventually we need to redesign arrays so that
* individual elements can be NULL, but for now, let's try to protect users
* from shooting themselves in the foot.)
@@ -112,10 +112,11 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
isDone));
+
/*
* If refexpr yields NULL, result is always NULL, for now anyway.
- * (This means you cannot assign to an element or slice of an array
- * that's NULL; it'll just stay NULL.)
+ * (This means you cannot assign to an element or slice of an
+ * array that's NULL; it'll just stay NULL.)
*/
if (*isNull)
return (Datum) NULL;
@@ -147,7 +148,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
/* If any index expr yields NULL, result is NULL or source array */
if (*isNull)
{
- if (! isAssignment || array_source == NULL)
+ if (!isAssignment || array_source == NULL)
return (Datum) NULL;
*isNull = false;
return PointerGetDatum(array_source);
@@ -166,10 +167,14 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
NULL));
- /* If any index expr yields NULL, result is NULL or source array */
+
+ /*
+ * If any index expr yields NULL, result is NULL or source
+ * array
+ */
if (*isNull)
{
- if (! isAssignment || array_source == NULL)
+ if (!isAssignment || array_source == NULL)
return (Datum) NULL;
*isNull = false;
return PointerGetDatum(array_source);
@@ -189,9 +194,10 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
NULL);
+
/*
- * For now, can't cope with inserting NULL into an array,
- * so make it a no-op per discussion above...
+ * For now, can't cope with inserting NULL into an array, so make
+ * it a no-op per discussion above...
*/
if (*isNull)
{
@@ -202,7 +208,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
}
if (array_source == NULL)
- return sourceData; /* XXX do something else? */
+ return sourceData; /* XXX do something else? */
if (lIndex == NULL)
resultArray = array_set(array_source, i,
@@ -215,7 +221,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
else
resultArray = array_set_slice(array_source, i,
upper.indx, lower.indx,
- (ArrayType *) DatumGetPointer(sourceData),
+ (ArrayType *) DatumGetPointer(sourceData),
arrayRef->refelembyval,
arrayRef->refelemlength,
arrayRef->refattrlength,
@@ -587,12 +593,12 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
int i;
List *arg;
- argIsDone = ExprSingleResult; /* default assumption */
+ argIsDone = ExprSingleResult; /* default assumption */
i = 0;
foreach(arg, argList)
{
- ExprDoneCond thisArgIsDone;
+ ExprDoneCond thisArgIsDone;
fcache->fcinfo.arg[i] = ExecEvalExpr((Node *) lfirst(arg),
econtext,
@@ -601,10 +607,12 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
if (thisArgIsDone != ExprSingleResult)
{
+
/*
* We allow only one argument to have a set value; we'd need
- * much more complexity to keep track of multiple set arguments
- * (cf. ExecTargetList) and it doesn't seem worth it.
+ * much more complexity to keep track of multiple set
+ * arguments (cf. ExecTargetList) and it doesn't seem worth
+ * it.
*/
if (argIsDone != ExprSingleResult)
elog(ERROR, "Functions and operators can take only one set argument");
@@ -632,15 +640,15 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
bool *isNull,
ExprDoneCond *isDone)
{
- Datum result;
- ExprDoneCond argDone;
- int i;
+ Datum result;
+ ExprDoneCond argDone;
+ int i;
/*
* arguments is a list of expressions to evaluate before passing to
* the function manager. We skip the evaluation if it was already
- * done in the previous call (ie, we are continuing the evaluation
- * of a set-valued function). Otherwise, collect the current argument
+ * done in the previous call (ie, we are continuing the evaluation of
+ * a set-valued function). Otherwise, collect the current argument
* values into fcache->fcinfo.
*/
if (fcache->fcinfo.nargs > 0 && !fcache->argsValid)
@@ -664,28 +672,30 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
*/
if (fcache->func.fn_retset || fcache->hasSetArg)
{
+
/*
- * We need to return a set result. Complain if caller not ready
+ * We need to return a set result. Complain if caller not ready
* to accept one.
*/
if (isDone == NULL)
elog(ERROR, "Set-valued function called in context that cannot accept a set");
/*
- * This loop handles the situation where we have both a set argument
- * and a set-valued function. Once we have exhausted the function's
- * value(s) for a particular argument value, we have to get the next
- * argument value and start the function over again. We might have
- * to do it more than once, if the function produces an empty result
- * set for a particular input value.
+ * This loop handles the situation where we have both a set
+ * argument and a set-valued function. Once we have exhausted the
+ * function's value(s) for a particular argument value, we have to
+ * get the next argument value and start the function over again.
+ * We might have to do it more than once, if the function produces
+ * an empty result set for a particular input value.
*/
for (;;)
{
+
/*
* If function is strict, and there are any NULL arguments,
* skip calling the function (at least for this set of args).
*/
- bool callit = true;
+ bool callit = true;
if (fcache->func.fn_strict)
{
@@ -716,13 +726,15 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
if (*isDone != ExprEndResult)
{
+
/*
- * Got a result from current argument. If function itself
- * returns set, flag that we want to reuse current argument
- * values on next call.
+ * Got a result from current argument. If function itself
+ * returns set, flag that we want to reuse current
+ * argument values on next call.
*/
if (fcache->func.fn_retset)
fcache->argsValid = true;
+
/*
* Make sure we say we are returning a set, even if the
* function itself doesn't return sets.
@@ -762,11 +774,12 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
}
else
{
+
/*
* Non-set case: much easier.
*
- * If function is strict, and there are any NULL arguments,
- * skip calling the function and return NULL.
+ * If function is strict, and there are any NULL arguments, skip
+ * calling the function and return NULL.
*/
if (fcache->func.fn_strict)
{
@@ -852,9 +865,9 @@ ExecEvalFunc(Expr *funcClause,
FunctionCachePtr fcache;
/*
- * we extract the oid of the function associated with the func node and
- * then pass the work onto ExecMakeFunctionResult which evaluates the
- * arguments and returns the result of calling the function on the
+ * we extract the oid of the function associated with the func node
+ * and then pass the work onto ExecMakeFunctionResult which evaluates
+ * the arguments and returns the result of calling the function on the
* evaluated arguments.
*
* this is nearly identical to the ExecEvalOper code.
@@ -915,7 +928,7 @@ ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull)
* evaluation of 'not' is simple.. expr is false, then return 'true'
* and vice versa.
*/
- return BoolGetDatum(! DatumGetBool(expr_value));
+ return BoolGetDatum(!DatumGetBool(expr_value));
}
/* ----------------------------------------------------------------
@@ -999,7 +1012,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
*/
if (*isNull)
AnyNull = true; /* remember we got a null */
- else if (! DatumGetBool(clause_value))
+ else if (!DatumGetBool(clause_value))
return clause_value;
}
@@ -1079,7 +1092,7 @@ ExecEvalFieldSelect(FieldSelect *fselect,
bool *isNull,
ExprDoneCond *isDone)
{
- Datum result;
+ Datum result;
TupleTableSlot *resSlot;
result = ExecEvalExpr(fselect->arg, econtext, isNull, isDone);
@@ -1111,7 +1124,7 @@ ExecEvalFieldSelect(FieldSelect *fselect,
*
* A caller that can only accept a singleton (non-set) result should pass
* NULL for isDone; if the expression computes a set result then an elog()
- * error will be reported. If the caller does pass an isDone pointer then
+ * error will be reported. If the caller does pass an isDone pointer then
* *isDone is set to one of these three states:
* ExprSingleResult singleton result (not a set)
* ExprMultipleResult return value is one element of a set
@@ -1127,7 +1140,7 @@ ExecEvalFieldSelect(FieldSelect *fselect,
* The caller should already have switched into the temporary memory
* context econtext->ecxt_per_tuple_memory. The convenience entry point
* ExecEvalExprSwitchContext() is provided for callers who don't prefer to
- * do the switch in an outer loop. We do not do the switch here because
+ * do the switch in an outer loop. We do not do the switch here because
* it'd be a waste of cycles during recursive entries to ExecEvalExpr().
*
* This routine is an inner loop routine and must be as fast as possible.
@@ -1353,15 +1366,15 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
{
if (resultForNull == false)
{
- result = false; /* treat NULL as FALSE */
+ result = false; /* treat NULL as FALSE */
break;
}
}
else
{
- if (! DatumGetBool(expr_value))
+ if (!DatumGetBool(expr_value))
{
- result = false; /* definitely FALSE */
+ result = false; /* definitely FALSE */
break;
}
}
@@ -1383,7 +1396,7 @@ ExecTargetListLength(List *targetlist)
foreach(tl, targetlist)
{
- TargetEntry *curTle = (TargetEntry *) lfirst(tl);
+ TargetEntry *curTle = (TargetEntry *) lfirst(tl);
if (curTle->resdom != NULL)
len++;
@@ -1404,17 +1417,15 @@ ExecCleanTargetListLength(List *targetlist)
foreach(tl, targetlist)
{
- TargetEntry *curTle = (TargetEntry *) lfirst(tl);
+ TargetEntry *curTle = (TargetEntry *) lfirst(tl);
if (curTle->resdom != NULL)
{
- if (! curTle->resdom->resjunk)
+ if (!curTle->resdom->resjunk)
len++;
}
else
- {
len += curTle->fjoin->fj_nNodes;
- }
}
return len;
}
@@ -1440,6 +1451,7 @@ ExecTargetList(List *targetlist,
ExprDoneCond *isDone)
{
MemoryContext oldContext;
+
#define NPREALLOCDOMAINS 64
char nullsArray[NPREALLOCDOMAINS];
bool fjIsNullArray[NPREALLOCDOMAINS];
@@ -1484,10 +1496,11 @@ ExecTargetList(List *targetlist,
* we have a really large targetlist. otherwise we use the stack.
*
* We also allocate a bool array that is used to hold fjoin result state,
- * and another array that holds the isDone status for each targetlist item.
- * The isDone status is needed so that we can iterate, generating multiple
- * tuples, when one or more tlist items return sets. (We expect the caller
- * to call us again if we return *isDone = ExprMultipleResult.)
+ * and another array that holds the isDone status for each targetlist
+ * item. The isDone status is needed so that we can iterate,
+ * generating multiple tuples, when one or more tlist items return
+ * sets. (We expect the caller to call us again if we return *isDone
+ * = ExprMultipleResult.)
*/
if (nodomains > NPREALLOCDOMAINS)
{
@@ -1507,7 +1520,7 @@ ExecTargetList(List *targetlist,
*/
if (isDone)
- *isDone = ExprSingleResult; /* until proven otherwise */
+ *isDone = ExprSingleResult; /* until proven otherwise */
haveDoneSets = false; /* any exhausted set exprs in tlist? */
@@ -1554,8 +1567,10 @@ ExecTargetList(List *targetlist,
ExecEvalFjoin(tle, econtext, fjIsNull, isDone);
- /* XXX this is wrong, but since fjoin code is completely broken
- * anyway, I'm not going to worry about it now --- tgl 8/23/00
+ /*
+ * XXX this is wrong, but since fjoin code is completely
+ * broken anyway, I'm not going to worry about it now --- tgl
+ * 8/23/00
*/
if (isDone && *isDone == ExprEndResult)
{
@@ -1594,6 +1609,7 @@ ExecTargetList(List *targetlist,
if (haveDoneSets)
{
+
/*
* note: can't get here unless we verified isDone != NULL
*/
@@ -1601,7 +1617,8 @@ ExecTargetList(List *targetlist,
{
/*
- * all sets are done, so report that tlist expansion is complete.
+ * all sets are done, so report that tlist expansion is
+ * complete.
*/
*isDone = ExprEndResult;
MemoryContextSwitchTo(oldContext);
@@ -1612,7 +1629,7 @@ ExecTargetList(List *targetlist,
{
/*
- * We have some done and some undone sets. Restart the done
+ * We have some done and some undone sets. Restart the done
* ones so that we can deliver a tuple (if possible).
*/
foreach(tl, targetlist)
@@ -1628,7 +1645,7 @@ ExecTargetList(List *targetlist,
values[resind] = ExecEvalExpr(tle->expr,
econtext,
&isNull,
- &itemIsDone[resind]);
+ &itemIsDone[resind]);
nulls[resind] = isNull ? 'n' : ' ';
if (itemIsDone[resind] == ExprEndResult)
@@ -1644,10 +1661,11 @@ ExecTargetList(List *targetlist,
}
}
}
+
/*
- * If we cannot make a tuple because some sets are empty,
- * we still have to cycle the nonempty sets to completion,
- * else resources will not be released from subplans etc.
+ * If we cannot make a tuple because some sets are empty, we
+ * still have to cycle the nonempty sets to completion, else
+ * resources will not be released from subplans etc.
*/
if (*isDone == ExprEndResult)
{
@@ -1752,8 +1770,8 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone)
/*
* store the tuple in the projection slot and return the slot.
*/
- return ExecStoreTuple(newTuple, /* tuple to store */
- slot, /* slot to store in */
- InvalidBuffer, /* tuple has no buffer */
+ return ExecStoreTuple(newTuple, /* tuple to store */
+ slot, /* slot to store in */
+ InvalidBuffer, /* tuple has no buffer */
true);
}
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 3a2e79dbdd4..58a3b5edea4 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.15 2001/01/24 19:42:54 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.16 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,7 +46,7 @@
*/
TupleTableSlot *
ExecScan(Scan *node,
- ExecScanAccessMtd accessMtd) /* function returning a tuple */
+ ExecScanAccessMtd accessMtd) /* function returning a tuple */
{
CommonScanState *scanstate;
EState *estate;
@@ -81,7 +81,7 @@ ExecScan(Scan *node,
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a scan tuple.
* ----------------
*/
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index e5f1a269d81..3e75aef337c 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.46 2001/01/29 00:39:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.47 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@
* TupIsNull - true when slot contains no tuple(Macro)
*
* CONVENIENCE INITIALIZATION ROUTINES
- * ExecInitResultTupleSlot \ convenience routines to initialize
+ * ExecInitResultTupleSlot \ convenience routines to initialize
* ExecInitScanTupleSlot \ the various tuple slots for nodes
* ExecInitExtraTupleSlot / which store copies of tuples.
* ExecInitNullTupleSlot /
@@ -422,7 +422,7 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
slot->val = (HeapTuple) NULL;
- slot->ttc_shouldFree = true; /* probably useless code... */
+ slot->ttc_shouldFree = true;/* probably useless code... */
/* ----------------
* Drop the pin on the referenced buffer, if there is one.
@@ -446,7 +446,7 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
void
ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
TupleDesc tupdesc, /* new tuple descriptor */
- bool shouldFree) /* is desc owned by slot? */
+ bool shouldFree) /* is desc owned by slot? */
{
if (slot->ttc_shouldFreeDesc &&
slot->ttc_tupleDescriptor != NULL)
@@ -482,7 +482,7 @@ ExecSetSlotDescriptorIsNew(TupleTableSlot *slot, /* slot to change */
* ExecInit{Result,Scan,Extra}TupleSlot
*
* These are convenience routines to initialize the specified slot
- * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
+ * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
* is used for initializing special-purpose slots.
* --------------------------------
*/
@@ -541,11 +541,13 @@ ExecInitExtraTupleSlot(EState *estate)
TupleTableSlot *
ExecInitNullTupleSlot(EState *estate, TupleDesc tupType)
{
- TupleTableSlot* slot = ExecInitExtraTupleSlot(estate);
+ TupleTableSlot *slot = ExecInitExtraTupleSlot(estate);
+
/*
* Since heap_getattr() will treat attributes beyond a tuple's t_natts
- * as being NULL, we can make an all-nulls tuple just by making it be of
- * zero length. However, the slot descriptor must match the real tupType.
+ * as being NULL, we can make an all-nulls tuple just by making it be
+ * of zero length. However, the slot descriptor must match the real
+ * tupType.
*/
HeapTuple nullTuple;
Datum values[1];
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 6b030b64a0e..6ee0d2e26ed 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.73 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.74 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,6 +148,7 @@ ExecAssignExprContext(EState *estate, CommonState *commonstate)
econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = CurrentMemoryContext;
+
/*
* Create working memory for expression evaluation in this context.
*/
@@ -184,14 +185,16 @@ MakeExprContext(TupleTableSlot *slot,
econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = queryContext;
+
/*
* We make the temporary context a child of current working context,
* not of the specified queryContext. This seems reasonable but I'm
* not totally sure about it...
*
* Expression contexts made via this routine typically don't live long
- * enough to get reset, so specify a minsize of 0. That avoids alloc'ing
- * any memory in the common case where expr eval doesn't use any.
+ * enough to get reset, so specify a minsize of 0. That avoids
+ * alloc'ing any memory in the common case where expr eval doesn't use
+ * any.
*/
econtext->ecxt_per_tuple_memory =
AllocSetContextCreate(CurrentMemoryContext,
@@ -209,7 +212,7 @@ MakeExprContext(TupleTableSlot *slot,
/*
* Free an ExprContext made by MakeExprContext, including the temporary
- * context used for expression evaluation. Note this will cause any
+ * context used for expression evaluation. Note this will cause any
* pass-by-reference expression result to go away!
*/
void
@@ -447,7 +450,7 @@ ExecAssignScanTypeFromOuterPlan(Plan *node, CommonScanState *csstate)
* resultRelInfo->ri_RelationDesc.
*
* This used to be horribly ugly code, and slow too because it
- * did a sequential scan of pg_index. Now we rely on the relcache
+ * did a sequential scan of pg_index. Now we rely on the relcache
* to cache a list of the OIDs of the indices associated with any
* specific relation, and we use the pg_index syscache to get the
* entries we need from pg_index.
@@ -467,7 +470,7 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
resultRelInfo->ri_NumIndices = 0;
/* checks for disabled indexes */
- if (! RelationGetForm(resultRelation)->relhasindex)
+ if (!RelationGetForm(resultRelation)->relhasindex)
return;
if (IsIgnoringSystemIndexes() &&
IsSystemRelationName(RelationGetRelationName(resultRelation)))
@@ -635,8 +638,9 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
heapDescriptor = RelationGetDescr(heapRelation);
/*
- * We will use the EState's per-tuple context for evaluating predicates
- * and functional-index functions (creating it if it's not already there).
+ * We will use the EState's per-tuple context for evaluating
+ * predicates and functional-index functions (creating it if it's not
+ * already there).
*/
econtext = GetPerTupleExprContext(estate);
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 575f33d84b6..4cc1dc27926 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.43 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.44 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,15 +70,15 @@ typedef SQLFunctionCache *SQLFunctionCachePtr;
/* non-export function prototypes */
static execution_state *init_execution_state(char *src,
- Oid *argOidVect, int nargs);
+ Oid *argOidVect, int nargs);
static void init_sql_fcache(FmgrInfo *finfo);
static void postquel_start(execution_state *es);
static TupleTableSlot *postquel_getnext(execution_state *es);
static void postquel_end(execution_state *es);
static void postquel_sub_params(execution_state *es, FunctionCallInfo fcinfo);
static Datum postquel_execute(execution_state *es,
- FunctionCallInfo fcinfo,
- SQLFunctionCachePtr fcache);
+ FunctionCallInfo fcinfo,
+ SQLFunctionCachePtr fcache);
static execution_state *
@@ -180,7 +180,7 @@ init_sql_fcache(FmgrInfo *finfo)
* ----------------
*/
typeTuple = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(procedureStruct->prorettype),
+ ObjectIdGetDatum(procedureStruct->prorettype),
0, 0, 0);
if (!HeapTupleIsValid(typeTuple))
elog(ERROR, "init_sql_fcache: Cache lookup failed for type %u",
@@ -235,9 +235,7 @@ init_sql_fcache(FmgrInfo *finfo)
nargs * sizeof(Oid));
}
else
- {
argOidVect = (Oid *) NULL;
- }
tmp = SysCacheGetAttr(PROCOID,
procedureTuple,
@@ -346,8 +344,8 @@ copy_function_result(SQLFunctionCachePtr fcache,
return resultSlot; /* no need to copy result */
/*
- * If first time through, we have to initialize the funcSlot's
- * tuple descriptor.
+ * If first time through, we have to initialize the funcSlot's tuple
+ * descriptor.
*/
if (funcSlot->ttc_tupleDescriptor == NULL)
{
@@ -415,12 +413,14 @@ postquel_execute(execution_state *es,
/*
* If we are supposed to return a tuple, we return the tuple slot
- * pointer converted to Datum. If we are supposed to return a simple
- * value, then project out the first attribute of the result tuple
- * (ie, take the first result column of the final SELECT).
+ * pointer converted to Datum. If we are supposed to return a
+ * simple value, then project out the first attribute of the
+ * result tuple (ie, take the first result column of the final
+ * SELECT).
*/
if (fcache->returnsTuple)
{
+
/*
* XXX do we need to remove junk attrs from the result tuple?
* Probably OK to leave them, as long as they are at the end.
@@ -434,6 +434,7 @@ postquel_execute(execution_state *es,
1,
resSlot->ttc_tupleDescriptor,
&(fcinfo->isnull));
+
/*
* Note: if result type is pass-by-reference then we are
* returning a pointer into the tuple copied by
@@ -546,7 +547,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
*/
if (fcinfo->flinfo->fn_retset)
{
- ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
+ ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
if (rsi && IsA(rsi, ReturnSetInfo))
rsi->isDone = ExprEndResult;
@@ -572,7 +573,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
*/
if (fcinfo->flinfo->fn_retset)
{
- ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
+ ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
if (rsi && IsA(rsi, ReturnSetInfo))
rsi->isDone = ExprMultipleResult;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index f60f499e6be..e0f50bd66d1 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -19,7 +19,7 @@
* The agg's input type and transtype must be the same in this case!
*
* If transfunc is marked "strict" then NULL input_values are skipped,
- * keeping the previous transvalue. If transfunc is not strict then it
+ * keeping the previous transvalue. If transfunc is not strict then it
* is called for every input tuple and must deal with NULL initcond
* or NULL input_value for itself.
*
@@ -34,7 +34,7 @@
* are not allowed to accumulate until end of query. We do this by
* "ping-ponging" between two memory contexts; successive calls to the
* transfunc are executed in alternate contexts, passing the previous
- * transvalue that is in the other context. At the beginning of each
+ * transvalue that is in the other context. At the beginning of each
* tuple cycle we can reset the current output context to avoid memory
* usage growth. Note: we must use MemoryContextContains() to check
* whether the transfunc has perhaps handed us back one of its input
@@ -46,7 +46,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.75 2001/02/16 03:16:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.76 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -130,8 +130,8 @@ typedef struct AggStatePerAggData
* an input tuple group and updated for each input tuple.
*
* For a simple (non DISTINCT) aggregate, we just feed the input values
- * straight to the transition function. If it's DISTINCT, we pass
- * the input values into a Tuplesort object; then at completion of the
+ * straight to the transition function. If it's DISTINCT, we pass the
+ * input values into a Tuplesort object; then at completion of the
* input tuple group, we scan the sorted values, eliminate duplicates,
* and run the transition function on the rest.
*/
@@ -144,20 +144,21 @@ typedef struct AggStatePerAggData
bool noTransValue; /* true if transValue not set yet */
/*
- * Note: noTransValue initially has the same value as transValueIsNull,
- * and if true both are cleared to false at the same time. They are
- * not the same though: if transfn later returns a NULL, we want to
- * keep that NULL and not auto-replace it with a later input value.
- * Only the first non-NULL input will be auto-substituted.
+ * Note: noTransValue initially has the same value as
+ * transValueIsNull, and if true both are cleared to false at the same
+ * time. They are not the same though: if transfn later returns a
+ * NULL, we want to keep that NULL and not auto-replace it with a
+ * later input value. Only the first non-NULL input will be
+ * auto-substituted.
*/
} AggStatePerAggData;
static void initialize_aggregate(AggStatePerAgg peraggstate);
static void advance_transition_function(AggStatePerAgg peraggstate,
- Datum newVal, bool isNull);
+ Datum newVal, bool isNull);
static void process_sorted_aggregate(AggState *aggstate,
- AggStatePerAgg peraggstate);
+ AggStatePerAgg peraggstate);
static void finalize_aggregate(AggStatePerAgg peraggstate,
Datum *resultVal, bool *resultIsNull);
@@ -195,8 +196,8 @@ initialize_aggregate(AggStatePerAgg peraggstate)
* (Re)set transValue to the initial value.
*
* Note that when the initial value is pass-by-ref, we just reuse it
- * without copying for each group. Hence, transition function
- * had better not scribble on its input, or it will fail for GROUP BY!
+ * without copying for each group. Hence, transition function had
+ * better not scribble on its input, or it will fail for GROUP BY!
*/
peraggstate->transValue = peraggstate->initValue;
peraggstate->transValueIsNull = peraggstate->initValueIsNull;
@@ -222,50 +223,55 @@ static void
advance_transition_function(AggStatePerAgg peraggstate,
Datum newVal, bool isNull)
{
- FunctionCallInfoData fcinfo;
+ FunctionCallInfoData fcinfo;
if (peraggstate->transfn.fn_strict)
{
if (isNull)
{
+
/*
- * For a strict transfn, nothing happens at a NULL input tuple;
- * we just keep the prior transValue. However, if the transtype
- * is pass-by-ref, we have to copy it into the new context
- * because the old one is going to get reset.
+ * For a strict transfn, nothing happens at a NULL input
+ * tuple; we just keep the prior transValue. However, if the
+ * transtype is pass-by-ref, we have to copy it into the new
+ * context because the old one is going to get reset.
*/
if (!peraggstate->transValueIsNull)
peraggstate->transValue = datumCopy(peraggstate->transValue,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
return;
}
if (peraggstate->noTransValue)
{
+
/*
- * transValue has not been initialized. This is the first non-NULL
- * input value. We use it as the initial value for transValue.
- * (We already checked that the agg's input type is binary-
- * compatible with its transtype, so straight copy here is OK.)
+ * transValue has not been initialized. This is the first
+ * non-NULL input value. We use it as the initial value for
+ * transValue. (We already checked that the agg's input type
+ * is binary- compatible with its transtype, so straight copy
+ * here is OK.)
*
- * We had better copy the datum if it is pass-by-ref, since
- * the given pointer may be pointing into a scan tuple that
- * will be freed on the next iteration of the scan.
+ * We had better copy the datum if it is pass-by-ref, since the
+ * given pointer may be pointing into a scan tuple that will
+ * be freed on the next iteration of the scan.
*/
peraggstate->transValue = datumCopy(newVal,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
peraggstate->transValueIsNull = false;
peraggstate->noTransValue = false;
return;
}
if (peraggstate->transValueIsNull)
{
+
/*
* Don't call a strict function with NULL inputs. Note it is
- * possible to get here despite the above tests, if the transfn
- * is strict *and* returned a NULL on a prior cycle. If that
- * happens we will propagate the NULL all the way to the end.
+ * possible to get here despite the above tests, if the
+ * transfn is strict *and* returned a NULL on a prior cycle.
+ * If that happens we will propagate the NULL all the way to
+ * the end.
*/
return;
}
@@ -283,14 +289,14 @@ advance_transition_function(AggStatePerAgg peraggstate,
newVal = FunctionCallInvoke(&fcinfo);
/*
- * If the transition function was uncooperative, it may have
- * given us a pass-by-ref result that points at the scan tuple
- * or the prior-cycle working memory. Copy it into the active
- * context if it doesn't look right.
+ * If the transition function was uncooperative, it may have given us
+ * a pass-by-ref result that points at the scan tuple or the
+ * prior-cycle working memory. Copy it into the active context if it
+ * doesn't look right.
*/
if (!peraggstate->transtypeByVal && !fcinfo.isnull &&
- ! MemoryContextContains(CurrentMemoryContext,
- DatumGetPointer(newVal)))
+ !MemoryContextContains(CurrentMemoryContext,
+ DatumGetPointer(newVal)))
newVal = datumCopy(newVal,
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
@@ -302,7 +308,7 @@ advance_transition_function(AggStatePerAgg peraggstate,
/*
* Run the transition function for a DISTINCT aggregate. This is called
* after we have completed entering all the input values into the sort
- * object. We complete the sort, read out the values in sorted order,
+ * object. We complete the sort, read out the values in sorted order,
* and run the transition function on each non-duplicate value.
*
* When called, CurrentMemoryContext should be the per-query context.
@@ -321,19 +327,21 @@ process_sorted_aggregate(AggState *aggstate,
/*
* Note: if input type is pass-by-ref, the datums returned by the sort
- * are freshly palloc'd in the per-query context, so we must be careful
- * to pfree them when they are no longer needed.
+ * are freshly palloc'd in the per-query context, so we must be
+ * careful to pfree them when they are no longer needed.
*/
while (tuplesort_getdatum(peraggstate->sortstate, true,
&newVal, &isNull))
{
+
/*
* DISTINCT always suppresses nulls, per SQL spec, regardless of
* the transition function's strictness.
*/
if (isNull)
continue;
+
/*
* Clear and select the current working context for evaluation of
* the equality function and transition function.
@@ -349,6 +357,7 @@ process_sorted_aggregate(AggState *aggstate,
/* equal to prior, so forget this one */
if (!peraggstate->inputtypeByVal)
pfree(DatumGetPointer(newVal));
+
/*
* note we do NOT flip contexts in this case, so no need to
* copy prior transValue to other context.
@@ -357,6 +366,7 @@ process_sorted_aggregate(AggState *aggstate,
else
{
advance_transition_function(peraggstate, newVal, false);
+
/*
* Make the other context current so that this transition
* result is preserved.
@@ -389,12 +399,13 @@ static void
finalize_aggregate(AggStatePerAgg peraggstate,
Datum *resultVal, bool *resultIsNull)
{
+
/*
* Apply the agg's finalfn if one is provided, else return transValue.
*/
if (OidIsValid(peraggstate->finalfn_oid))
{
- FunctionCallInfoData fcinfo;
+ FunctionCallInfoData fcinfo;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.flinfo = &peraggstate->finalfn;
@@ -422,9 +433,9 @@ finalize_aggregate(AggStatePerAgg peraggstate,
/*
* If result is pass-by-ref, make sure it is in the right context.
*/
- if (!peraggstate->resulttypeByVal && ! *resultIsNull &&
- ! MemoryContextContains(CurrentMemoryContext,
- DatumGetPointer(*resultVal)))
+ if (!peraggstate->resulttypeByVal && !*resultIsNull &&
+ !MemoryContextContains(CurrentMemoryContext,
+ DatumGetPointer(*resultVal)))
*resultVal = datumCopy(*resultVal,
peraggstate->resulttypeByVal,
peraggstate->resulttypeLen);
@@ -480,7 +491,8 @@ ExecAgg(Agg *node)
peragg = aggstate->peragg;
/*
- * We loop retrieving groups until we find one matching node->plan.qual
+ * We loop retrieving groups until we find one matching
+ * node->plan.qual
*/
do
{
@@ -578,19 +590,19 @@ ExecAgg(Agg *node)
* calculation, and stash results in the per-output-tuple context.
*
* This is a bit tricky when there are both DISTINCT and plain
- * aggregates: we must first finalize all the plain aggs and then all
- * the DISTINCT ones. This is needed because the last transition
- * values for the plain aggs are stored in the not-current working
- * context, and we have to evaluate those aggs (and stash the results
- * in the output tup_cxt!) before we start flipping contexts again
- * in process_sorted_aggregate.
+ * aggregates: we must first finalize all the plain aggs and then
+ * all the DISTINCT ones. This is needed because the last
+ * transition values for the plain aggs are stored in the
+ * not-current working context, and we have to evaluate those aggs
+ * (and stash the results in the output tup_cxt!) before we start
+ * flipping contexts again in process_sorted_aggregate.
*/
oldContext = MemoryContextSwitchTo(aggstate->tup_cxt);
for (aggno = 0; aggno < aggstate->numaggs; aggno++)
{
AggStatePerAgg peraggstate = &peragg[aggno];
- if (! peraggstate->aggref->aggdistinct)
+ if (!peraggstate->aggref->aggdistinct)
finalize_aggregate(peraggstate,
&aggvalues[aggno], &aggnulls[aggno]);
}
@@ -766,21 +778,22 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
ExecAssignExprContext(estate, &aggstate->csstate.cstate);
/*
- * We actually need three separate expression memory contexts: one
- * for calculating per-output-tuple values (ie, the finished aggregate
+ * We actually need three separate expression memory contexts: one for
+ * calculating per-output-tuple values (ie, the finished aggregate
* results), and two that we ping-pong between for per-input-tuple
* evaluation of input expressions and transition functions. The
- * context made by ExecAssignExprContext() is used as the output context.
+ * context made by ExecAssignExprContext() is used as the output
+ * context.
*/
aggstate->tup_cxt =
aggstate->csstate.cstate.cs_ExprContext->ecxt_per_tuple_memory;
- aggstate->agg_cxt[0] =
+ aggstate->agg_cxt[0] =
AllocSetContextCreate(CurrentMemoryContext,
"AggExprContext1",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
- aggstate->agg_cxt[1] =
+ aggstate->agg_cxt[1] =
AllocSetContextCreate(CurrentMemoryContext,
"AggExprContext2",
ALLOCSET_DEFAULT_MINSIZE,
@@ -882,30 +895,32 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
/*
* If the transfn is strict and the initval is NULL, make sure
* input type and transtype are the same (or at least binary-
- * compatible), so that it's OK to use the first input value
- * as the initial transValue. This should have been checked at
- * agg definition time, but just in case...
+ * compatible), so that it's OK to use the first input value as
+ * the initial transValue. This should have been checked at agg
+ * definition time, but just in case...
*/
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
{
+
/*
- * Note: use the type from the input expression here,
- * not aggform->aggbasetype, because the latter might be 0.
+ * Note: use the type from the input expression here, not
+ * aggform->aggbasetype, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);
if (inputType != aggform->aggtranstype &&
- ! IS_BINARY_COMPATIBLE(inputType, aggform->aggtranstype))
+ !IS_BINARY_COMPATIBLE(inputType, aggform->aggtranstype))
elog(ERROR, "Aggregate %s needs to have compatible input type and transition type",
aggname);
}
if (aggref->aggdistinct)
{
+
/*
- * Note: use the type from the input expression here,
- * not aggform->aggbasetype, because the latter might be 0.
+ * Note: use the type from the input expression here, not
+ * aggform->aggbasetype, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);
@@ -947,12 +962,14 @@ ExecEndAgg(Agg *node)
Plan *outerPlan;
ExecFreeProjectionInfo(&aggstate->csstate.cstate);
+
/*
* Make sure ExecFreeExprContext() frees the right expr context...
*/
aggstate->csstate.cstate.cs_ExprContext->ecxt_per_tuple_memory =
aggstate->tup_cxt;
ExecFreeExprContext(&aggstate->csstate.cstate);
+
/*
* ... and I free the others.
*/
diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c
index e4ede51852a..500e9c07c43 100644
--- a/src/backend/executor/nodeGroup.c
+++ b/src/backend/executor/nodeGroup.c
@@ -15,7 +15,7 @@
* locate group boundaries.
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.41 2001/02/16 03:16:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.42 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,8 +88,8 @@ ExecGroupEveryTuple(Group *node)
tupdesc = ExecGetScanType(&grpstate->csstate);
/*
- * We need not call ResetExprContext here because execTuplesMatch
- * will reset the per-tuple memory context once per input tuple.
+ * We need not call ResetExprContext here because execTuplesMatch will
+ * reset the per-tuple memory context once per input tuple.
*/
/* if we haven't returned first tuple of a new group yet ... */
@@ -199,8 +199,8 @@ ExecGroupOneTuple(Group *node)
tupdesc = ExecGetScanType(&grpstate->csstate);
/*
- * We need not call ResetExprContext here because execTuplesMatch
- * will reset the per-tuple memory context once per input tuple.
+ * We need not call ResetExprContext here because execTuplesMatch will
+ * reset the per-tuple memory context once per input tuple.
*/
firsttuple = grpstate->grp_firstTuple;
@@ -465,8 +465,8 @@ execTuplesMatch(HeapTuple tuple1,
/* Apply the type-specific equality function */
- if (! DatumGetBool(FunctionCall2(&eqfunctions[i],
- attr1, attr2)))
+ if (!DatumGetBool(FunctionCall2(&eqfunctions[i],
+ attr1, attr2)))
{
result = false; /* they aren't equal */
break;
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 070354ace7c..7b5e3d4cced 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
*
- * $Id: nodeHash.c,v 1.54 2001/01/24 19:42:54 momjian Exp $
+ * $Id: nodeHash.c,v 1.55 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -540,9 +540,7 @@ ExecHashGetBucket(HashJoinTable hashtable,
* ------------------
*/
if (isNull)
- {
bucketno = 0;
- }
else
{
bucketno = hashFunc(keyval,
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index a3fc2f545cb..dae06d2c937 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.36 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.37 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -105,7 +105,7 @@ ExecHashJoin(HashJoin *node)
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a join tuple.
* ----------------
*/
@@ -155,6 +155,7 @@ ExecHashJoin(HashJoin *node)
for (;;)
{
+
/*
* If we don't have an outer tuple, get the next one
*/
@@ -276,14 +277,15 @@ ExecHashJoin(HashJoin *node)
*/
hjstate->hj_NeedNewOuter = true;
- if (! hjstate->hj_MatchedOuter &&
+ if (!hjstate->hj_MatchedOuter &&
node->join.jointype == JOIN_LEFT)
{
+
/*
* We are doing an outer join and there were no join matches
* for this outer tuple. Generate a fake join tuple with
- * nulls for the inner tuple, and return it if it passes
- * the non-join quals.
+ * nulls for the inner tuple, and return it if it passes the
+ * non-join quals.
*/
econtext->ecxt_innertuple = hjstate->hj_NullInnerTupleSlot;
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index c0369e8f4cd..a6e6e45e9dc 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.57 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.58 2001/03/22 03:59:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -224,7 +224,7 @@ IndexNext(IndexScan *node)
qual = lnext(qual);
}
if (!prev_matches)
- return slot; /* OK to return tuple */
+ return slot;/* OK to return tuple */
/* Duplicate tuple, so drop it and loop back for another */
ExecClearTuple(slot);
}
@@ -326,7 +326,8 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
estate = node->scan.plan.state;
indexstate = node->indxstate;
- econtext = indexstate->iss_RuntimeContext; /* context for runtime keys */
+ econtext = indexstate->iss_RuntimeContext; /* context for runtime
+ * keys */
direction = estate->es_direction;
numIndices = indexstate->iss_NumIndices;
scanDescs = indexstate->iss_ScanDescs;
@@ -340,16 +341,18 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
if (econtext)
{
+
/*
- * If we are being passed an outer tuple,
- * save it for runtime key calc
+ * If we are being passed an outer tuple, save it for runtime key
+ * calc
*/
if (exprCtxt != NULL)
econtext->ecxt_outertuple = exprCtxt->ecxt_outertuple;
+
/*
- * Reset the runtime-key context so we don't leak memory as
- * each outer tuple is scanned. Note this assumes that we
- * will recalculate *all* runtime keys on each call.
+ * Reset the runtime-key context so we don't leak memory as each
+ * outer tuple is scanned. Note this assumes that we will
+ * recalculate *all* runtime keys on each call.
*/
ResetExprContext(econtext);
}
@@ -385,8 +388,8 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
* outer tuple. We then stick the result into the scan
* key.
*
- * Note: the result of the eval could be a pass-by-ref
- * value that's stored in the outer scan's tuple, not in
+ * Note: the result of the eval could be a pass-by-ref value
+ * that's stored in the outer scan's tuple, not in
* econtext->ecxt_per_tuple_memory. We assume that the
* outer tuple will stay put throughout our scan. If this
* is wrong, we could copy the result into our context
@@ -790,7 +793,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
Assert(leftop != NULL);
- if (IsA(leftop, Var) && var_is_rel((Var *) leftop))
+ if (IsA(leftop, Var) &&var_is_rel((Var *) leftop))
{
/* ----------------
* if the leftop is a "rel-var", then it means
@@ -862,7 +865,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
Assert(rightop != NULL);
- if (IsA(rightop, Var) && var_is_rel((Var *) rightop))
+ if (IsA(rightop, Var) &&var_is_rel((Var *) rightop))
{
/* ----------------
* here we make sure only one op identifies the
@@ -986,7 +989,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
*/
if (have_runtime_keys)
{
- ExprContext *stdecontext = scanstate->cstate.cs_ExprContext;
+ ExprContext *stdecontext = scanstate->cstate.cs_ExprContext;
ExecAssignExprContext(estate, &scanstate->cstate);
indexstate->iss_RuntimeKeyInfo = runtimeKeyInfo;
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index c7cc76f0a70..534c3a419d1 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.3 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.4 2001/03/22 03:59:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,7 +59,7 @@ ExecLimit(Limit *node)
* may not be set until now.)
* ----------------
*/
- if (! limitstate->parmsSet)
+ if (!limitstate->parmsSet)
recompute_limits(node);
netlimit = limitstate->offset + limitstate->count;
@@ -89,7 +89,7 @@ ExecLimit(Limit *node)
{
if (limitstate->atEnd)
return NULL;
- if (! limitstate->noCount && limitstate->position > netlimit)
+ if (!limitstate->noCount && limitstate->position > netlimit)
return NULL;
}
else
@@ -104,13 +104,14 @@ ExecLimit(Limit *node)
slot = ExecProcNode(outerPlan, (Plan *) node);
if (TupIsNull(slot))
{
+
/*
* We are at start or end of the subplan. Update local state
* appropriately, but always return NULL.
*/
if (ScanDirectionIsForward(direction))
{
- Assert(! limitstate->atEnd);
+ Assert(!limitstate->atEnd);
/* must bump position to stay in sync for backwards fetch */
limitstate->position++;
limitstate->atEnd = true;
@@ -122,6 +123,7 @@ ExecLimit(Limit *node)
}
return NULL;
}
+
/*
* We got the next subplan tuple successfully, so adjust state.
*/
@@ -135,7 +137,7 @@ ExecLimit(Limit *node)
limitstate->atEnd = false;
/* ----------------
- * Now, is this a tuple we want? If not, loop around to fetch
+ * Now, is this a tuple we want? If not, loop around to fetch
* another tuple from the subplan.
* ----------------
*/
@@ -185,9 +187,9 @@ recompute_limits(Limit *node)
if (node->limitCount)
{
limitstate->count = DatumGetInt32(ExecEvalExpr(node->limitCount,
- econtext,
- &isNull,
- NULL));
+ econtext,
+ &isNull,
+ NULL));
/* Interpret NULL count as no count (LIMIT ALL) */
if (isNull)
limitstate->noCount = true;
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index dd09c6ac1c1..1f55f852f0e 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.33 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.34 2001/03/22 03:59:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
*
* The first time this is called, ExecMaterial retrieves tuples
* from this node's outer subplan and inserts them into a tuplestore
- * (a temporary tuple storage structure). The first tuple is then
+ * (a temporary tuple storage structure). The first tuple is then
* returned. Successive calls to ExecMaterial return successive
* tuples from the tuplestore.
*
@@ -85,7 +85,7 @@ ExecMaterial(Material *node)
* Initialize tuplestore module.
* ----------------
*/
- tuplestorestate = tuplestore_begin_heap(true, /* randomAccess */
+ tuplestorestate = tuplestore_begin_heap(true, /* randomAccess */
SortMem);
matstate->tuplestorestate = (void *) tuplestorestate;
@@ -250,7 +250,7 @@ ExecEndMaterial(Material *node)
void
ExecMaterialMarkPos(Material *node)
{
- MaterialState *matstate = node->matstate;
+ MaterialState *matstate = node->matstate;
/* ----------------
* if we haven't materialized yet, just return.
@@ -271,7 +271,7 @@ ExecMaterialMarkPos(Material *node)
void
ExecMaterialRestrPos(Material *node)
{
- MaterialState *matstate = node->matstate;
+ MaterialState *matstate = node->matstate;
/* ----------------
* if we haven't materialized yet, just return.
@@ -299,8 +299,8 @@ ExecMaterialReScan(Material *node, ExprContext *exprCtxt, Plan *parent)
MaterialState *matstate = node->matstate;
/*
- * If we haven't materialized yet, just return. If outerplan' chgParam is
- * not NULL then it will be re-scanned by ExecProcNode, else - no
+ * If we haven't materialized yet, just return. If outerplan' chgParam
+ * is not NULL then it will be re-scanned by ExecProcNode, else - no
* reason to re-scan it at all.
*/
if (!matstate->tuplestorestate)
@@ -309,8 +309,8 @@ ExecMaterialReScan(Material *node, ExprContext *exprCtxt, Plan *parent)
ExecClearTuple(matstate->csstate.cstate.cs_ResultTupleSlot);
/*
- * If subnode is to be rescanned then we forget previous stored results;
- * we have to re-read the subplan and re-store.
+ * If subnode is to be rescanned then we forget previous stored
+ * results; we have to re-read the subplan and re-store.
*
* Otherwise we can just rewind and rescan the stored output.
*/
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index fd8868a4a54..e3617c032b0 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.42 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.43 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -169,7 +169,7 @@ MJFormSkipQual(List *qualList, char *replaceopname)
CharGetDatum('b'));
if (!HeapTupleIsValid(optup))
elog(ERROR,
- "MJFormSkipQual: mergejoin operator %u has no matching %s op",
+ "MJFormSkipQual: mergejoin operator %u has no matching %s op",
op->opno, replaceopname);
opform = (Form_pg_operator) GETSTRUCT(optup);
@@ -258,7 +258,7 @@ MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
&isNull,
NULL);
- if (! DatumGetBool(const_value) || isNull)
+ if (!DatumGetBool(const_value) || isNull)
break; /* return false */
eqclause = lnext(eqclause);
@@ -439,7 +439,7 @@ ExecMergeJoin(MergeJoin *node)
default:
elog(ERROR, "ExecMergeJoin: unsupported join type %d",
(int) node->join.jointype);
- doFillOuter = false; /* keep compiler quiet */
+ doFillOuter = false;/* keep compiler quiet */
doFillInner = false;
break;
}
@@ -464,7 +464,7 @@ ExecMergeJoin(MergeJoin *node)
if (mergestate->jstate.cs_TupFromTlist)
{
TupleTableSlot *result;
- ExprDoneCond isDone;
+ ExprDoneCond isDone;
result = ExecProject(mergestate->jstate.cs_ProjInfo, &isDone);
if (isDone == ExprMultipleResult)
@@ -475,7 +475,7 @@ ExecMergeJoin(MergeJoin *node)
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a join tuple.
* ----------------
*/
@@ -500,9 +500,9 @@ ExecMergeJoin(MergeJoin *node)
/*
* EXEC_MJ_INITIALIZE means that this is the first time
- * ExecMergeJoin() has been called and so we have to
- * fetch the first tuple for both outer and inner subplans.
- * If we fail to get a tuple here, then that subplan is
+ * ExecMergeJoin() has been called and so we have to fetch
+ * the first tuple for both outer and inner subplans. If
+ * we fail to get a tuple here, then that subplan is
* empty, and we either end the join or go to one of the
* fill-remaining-tuples states.
*/
@@ -516,6 +516,7 @@ ExecMergeJoin(MergeJoin *node)
MJ_printf("ExecMergeJoin: outer subplan is empty\n");
if (doFillInner)
{
+
/*
* Need to emit right-join tuples for remaining
* inner tuples. We set MatchedInner = true to
@@ -536,11 +537,13 @@ ExecMergeJoin(MergeJoin *node)
MJ_printf("ExecMergeJoin: inner subplan is empty\n");
if (doFillOuter)
{
+
/*
- * Need to emit left-join tuples for all outer tuples,
- * including the one we just fetched. We set
- * MatchedOuter = false to force the ENDINNER state
- * to emit this tuple before advancing outer.
+ * Need to emit left-join tuples for all outer
+ * tuples, including the one we just fetched. We
+ * set MatchedOuter = false to force the ENDINNER
+ * state to emit this tuple before advancing
+ * outer.
*/
mergestate->mj_JoinState = EXEC_MJ_ENDINNER;
mergestate->mj_MatchedOuter = false;
@@ -614,17 +617,17 @@ ExecMergeJoin(MergeJoin *node)
/*
* Check the extra qual conditions to see if we actually
- * want to return this join tuple. If not, can proceed with
- * merge. We must distinguish the additional joinquals
- * (which must pass to consider the tuples "matched" for
- * outer-join logic) from the otherquals (which must pass
- * before we actually return the tuple).
+ * want to return this join tuple. If not, can proceed
+ * with merge. We must distinguish the additional
+ * joinquals (which must pass to consider the tuples
+ * "matched" for outer-join logic) from the otherquals
+ * (which must pass before we actually return the tuple).
*
* We don't bother with a ResetExprContext here, on the
- * assumption that we just did one before checking the merge
- * qual. One per tuple should be sufficient. Also, the
- * econtext's tuple pointers were set up before checking
- * the merge qual, so we needn't do it again.
+ * assumption that we just did one before checking the
+ * merge qual. One per tuple should be sufficient. Also,
+ * the econtext's tuple pointers were set up before
+ * checking the merge qual, so we needn't do it again.
*/
qualResult = (joinqual == NIL ||
ExecQual(joinqual, econtext, false));
@@ -677,11 +680,13 @@ ExecMergeJoin(MergeJoin *node)
if (doFillInner && !mergestate->mj_MatchedInner)
{
+
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedInner = true; /* do it only once */
+ mergestate->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -753,11 +758,13 @@ ExecMergeJoin(MergeJoin *node)
if (doFillOuter && !mergestate->mj_MatchedOuter)
{
+
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedOuter = true; /* do it only once */
+ mergestate->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -810,6 +817,7 @@ ExecMergeJoin(MergeJoin *node)
innerTupleSlot = mergestate->mj_InnerTupleSlot;
if (doFillInner && !TupIsNull(innerTupleSlot))
{
+
/*
* Need to emit right-join tuples for remaining
* inner tuples.
@@ -879,19 +887,20 @@ ExecMergeJoin(MergeJoin *node)
{
/*
- * the merge clause matched so now we restore the inner
- * scan position to the first mark, and loop back to
- * JOINTEST. Actually, since we know the mergeclause
- * matches, we can skip JOINTEST and go straight to
- * JOINTUPLES.
+ * the merge clause matched so now we restore the
+ * inner scan position to the first mark, and loop
+ * back to JOINTEST. Actually, since we know the
+ * mergeclause matches, we can skip JOINTEST and go
+ * straight to JOINTUPLES.
*
* NOTE: we do not need to worry about the MatchedInner
* state for the rescanned inner tuples. We know all
- * of them will match this new outer tuple and therefore
- * won't be emitted as fill tuples. This works *only*
- * because we require the extra joinquals to be nil when
- * doing a right or full join --- otherwise some of the
- * rescanned tuples might fail the extra joinquals.
+ * of them will match this new outer tuple and
+ * therefore won't be emitted as fill tuples. This
+ * works *only* because we require the extra joinquals
+ * to be nil when doing a right or full join ---
+ * otherwise some of the rescanned tuples might fail
+ * the extra joinquals.
*/
ExecRestrPos(innerPlan);
mergestate->mj_JoinState = EXEC_MJ_JOINTUPLES;
@@ -918,6 +927,7 @@ ExecMergeJoin(MergeJoin *node)
{
if (doFillOuter)
{
+
/*
* Need to emit left-join tuples for remaining
* outer tuples.
@@ -1044,11 +1054,13 @@ ExecMergeJoin(MergeJoin *node)
if (doFillOuter && !mergestate->mj_MatchedOuter)
{
+
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedOuter = true; /* do it only once */
+ mergestate->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1101,6 +1113,7 @@ ExecMergeJoin(MergeJoin *node)
innerTupleSlot = mergestate->mj_InnerTupleSlot;
if (doFillInner && !TupIsNull(innerTupleSlot))
{
+
/*
* Need to emit right-join tuples for remaining
* inner tuples.
@@ -1229,11 +1242,13 @@ ExecMergeJoin(MergeJoin *node)
if (doFillInner && !mergestate->mj_MatchedInner)
{
+
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedInner = true; /* do it only once */
+ mergestate->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -1286,6 +1301,7 @@ ExecMergeJoin(MergeJoin *node)
outerTupleSlot = mergestate->mj_OuterTupleSlot;
if (doFillOuter && !TupIsNull(outerTupleSlot))
{
+
/*
* Need to emit left-join tuples for remaining
* outer tuples.
@@ -1306,8 +1322,8 @@ ExecMergeJoin(MergeJoin *node)
/*
* EXEC_MJ_ENDOUTER means we have run out of outer tuples,
- * but are doing a right/full join and therefore must null-
- * fill any remaing unmatched inner tuples.
+ * but are doing a right/full join and therefore must
+ * null- fill any remaing unmatched inner tuples.
*/
case EXEC_MJ_ENDOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_ENDOUTER\n");
@@ -1316,11 +1332,13 @@ ExecMergeJoin(MergeJoin *node)
if (!mergestate->mj_MatchedInner)
{
+
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedInner = true; /* do it only once */
+ mergestate->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -1383,11 +1401,13 @@ ExecMergeJoin(MergeJoin *node)
if (!mergestate->mj_MatchedOuter)
{
+
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedOuter = true; /* do it only once */
+ mergestate->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1515,14 +1535,16 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent)
case JOIN_LEFT:
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(innerPlan((Plan*) node)));
+ ExecGetTupType(innerPlan((Plan *) node)));
break;
case JOIN_RIGHT:
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(outerPlan((Plan*) node)));
+ ExecGetTupType(outerPlan((Plan *) node)));
+
/*
- * Can't handle right or full join with non-nil extra joinclauses.
+ * Can't handle right or full join with non-nil extra
+ * joinclauses.
*/
if (node->join.joinqual != NIL)
elog(ERROR, "RIGHT JOIN is only supported with mergejoinable join conditions");
@@ -1530,12 +1552,14 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent)
case JOIN_FULL:
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(outerPlan((Plan*) node)));
+ ExecGetTupType(outerPlan((Plan *) node)));
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(innerPlan((Plan*) node)));
+ ExecGetTupType(innerPlan((Plan *) node)));
+
/*
- * Can't handle right or full join with non-nil extra joinclauses.
+ * Can't handle right or full join with non-nil extra
+ * joinclauses.
*/
if (node->join.joinqual != NIL)
elog(ERROR, "FULL JOIN is only supported with mergejoinable join conditions");
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index f514b03851f..9c01ee4a1fb 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.22 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.23 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,7 +97,7 @@ ExecNestLoop(NestLoop *node)
if (nlstate->jstate.cs_TupFromTlist)
{
TupleTableSlot *result;
- ExprDoneCond isDone;
+ ExprDoneCond isDone;
result = ExecProject(nlstate->jstate.cs_ProjInfo, &isDone);
if (isDone == ExprMultipleResult)
@@ -108,7 +108,7 @@ ExecNestLoop(NestLoop *node)
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a join tuple.
* ----------------
*/
@@ -179,14 +179,15 @@ ExecNestLoop(NestLoop *node)
nlstate->nl_NeedNewOuter = true;
- if (! nlstate->nl_MatchedOuter &&
+ if (!nlstate->nl_MatchedOuter &&
node->join.jointype == JOIN_LEFT)
{
+
/*
- * We are doing an outer join and there were no join matches
- * for this outer tuple. Generate a fake join tuple with
- * nulls for the inner tuple, and return it if it passes
- * the non-join quals.
+ * We are doing an outer join and there were no join
+ * matches for this outer tuple. Generate a fake join
+ * tuple with nulls for the inner tuple, and return it if
+ * it passes the non-join quals.
*/
econtext->ecxt_innertuple = nlstate->nl_NullInnerTupleSlot;
@@ -215,6 +216,7 @@ ExecNestLoop(NestLoop *node)
}
}
}
+
/*
* Otherwise just return to top of loop for a new outer tuple.
*/
@@ -328,7 +330,7 @@ ExecInitNestLoop(NestLoop *node, EState *estate, Plan *parent)
case JOIN_LEFT:
nlstate->nl_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(innerPlan((Plan*) node)));
+ ExecGetTupType(innerPlan((Plan *) node)));
break;
default:
elog(ERROR, "ExecInitNestLoop: unsupported join type %d",
diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c
index fa401e20b24..863d4a4a56e 100644
--- a/src/backend/executor/nodeResult.c
+++ b/src/backend/executor/nodeResult.c
@@ -34,7 +34,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.17 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.18 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -111,7 +111,7 @@ ExecResult(Result *node)
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a scan tuple.
* ----------------
*/
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index a39128ff2f0..d879cee7a75 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.27 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.28 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
#include "parser/parsetree.h"
static Oid InitScanRelation(SeqScan *node, EState *estate,
- CommonScanState *scanstate);
+ CommonScanState *scanstate);
static TupleTableSlot *SeqNext(SeqScan *node);
/* ----------------------------------------------------------------
@@ -174,8 +174,8 @@ InitScanRelation(SeqScan *node, EState *estate,
0, /* is index */
direction, /* scan direction */
estate->es_snapshot,
- &currentRelation, /* return: rel desc */
- (Pointer *) &currentScanDesc); /* return: scan desc */
+ &currentRelation, /* return: rel desc */
+ (Pointer *) &currentScanDesc); /* return: scan desc */
scanstate->css_currentRelation = currentRelation;
scanstate->css_currentScanDesc = currentScanDesc;
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index ad35a46d636..00c79992039 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -5,7 +5,7 @@
*
* The input of a SetOp node consists of tuples from two relations,
* which have been combined into one dataset and sorted on all the nonjunk
- * attributes. In addition there is a junk attribute that shows which
+ * attributes. In addition there is a junk attribute that shows which
* relation each tuple came from. The SetOp node scans each group of
* identical tuples to determine how many came from each input relation.
* Then it is a simple matter to emit the output demanded by the SQL spec
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSetOp.c,v 1.2 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSetOp.c,v 1.3 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ ExecSetOp(SetOp *node)
* ----------------
*/
if (setopstate->cstate.cs_OuterTupleSlot == NULL &&
- ! setopstate->subplan_done)
+ !setopstate->subplan_done)
{
setopstate->cstate.cs_OuterTupleSlot =
ExecProcNode(outerPlan, (Plan *) node);
@@ -104,6 +104,7 @@ ExecSetOp(SetOp *node)
if (TupIsNull(resultTupleSlot))
{
+
/*
* First of group: save a copy in result slot, and reset
* duplicate-counters for new group.
@@ -113,13 +114,15 @@ ExecSetOp(SetOp *node)
ExecStoreTuple(heap_copytuple(inputTupleSlot->val),
resultTupleSlot,
InvalidBuffer,
- true); /* free copied tuple at ExecClearTuple */
+ true); /* free copied tuple at
+ * ExecClearTuple */
setopstate->numLeft = 0;
setopstate->numRight = 0;
endOfGroup = false;
}
else if (setopstate->subplan_done)
{
+
/*
* Reached end of input, so finish processing final group
*/
@@ -127,8 +130,10 @@ ExecSetOp(SetOp *node)
}
else
{
+
/*
- * Else test if the new tuple and the previously saved tuple match.
+ * Else test if the new tuple and the previously saved tuple
+ * match.
*/
if (execTuplesMatch(inputTupleSlot->val,
resultTupleSlot->val,
@@ -143,6 +148,7 @@ ExecSetOp(SetOp *node)
if (endOfGroup)
{
+
/*
* We've reached the end of the group containing resultTuple.
* Decide how many copies (if any) to emit. This logic is
@@ -185,12 +191,13 @@ ExecSetOp(SetOp *node)
}
else
{
+
/*
- * Current tuple is member of same group as resultTuple.
- * Count it in the appropriate counter.
+ * Current tuple is member of same group as resultTuple. Count
+ * it in the appropriate counter.
*/
- int flag;
- bool isNull;
+ int flag;
+ bool isNull;
flag = DatumGetInt32(heap_getattr(inputTupleSlot->val,
node->flagColIdx,
@@ -207,8 +214,8 @@ ExecSetOp(SetOp *node)
}
/*
- * If we fall out of loop, then we need to emit at least one copy
- * of resultTuple.
+ * If we fall out of loop, then we need to emit at least one copy of
+ * resultTuple.
*/
Assert(setopstate->numOutput > 0);
setopstate->numOutput--;
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index d75d9a6f052..a8df4940ae4 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.29 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.30 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,7 +102,7 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
{
HeapTuple tup = slot->val;
TupleDesc tdesc = slot->ttc_tupleDescriptor;
- Datum rowresult = BoolGetDatum(! useor);
+ Datum rowresult = BoolGetDatum(!useor);
bool rownull = false;
int col = 1;
@@ -213,7 +213,7 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
/* combine within row per AND semantics */
if (expnull)
rownull = true;
- else if (! DatumGetBool(expresult))
+ else if (!DatumGetBool(expresult))
{
rowresult = BoolGetDatum(false);
rownull = false;
@@ -240,7 +240,7 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
/* combine across rows per AND semantics */
if (rownull)
*isNull = true;
- else if (! DatumGetBool(rowresult))
+ else if (!DatumGetBool(rowresult))
{
result = BoolGetDatum(false);
*isNull = false;
@@ -332,7 +332,7 @@ ExecInitSubPlan(SubPlan *node, EState *estate, Plan *parent)
*
* This is called from ExecEvalParam() when the value of a PARAM_EXEC
* parameter is requested and the param's execPlan field is set (indicating
- * that the param has not yet been evaluated). This allows lazy evaluation
+ * that the param has not yet been evaluated). This allows lazy evaluation
* of initplans: we don't run the subplan until/unless we need its output.
* Note that this routine MUST clear the execPlan fields of the plan's
* output parameters after evaluating them!
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 4c9144bc3a8..9b8711c9914 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.4 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.5 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,8 +67,8 @@ SubqueryNext(SubqueryScan *node)
/*
* Check if we are evaluating PlanQual for tuple of this relation.
* Additional checking is not good, but no other way for now. We could
- * introduce new nodes for this case and handle SubqueryScan --> NewNode
- * switching in Init/ReScan plan...
+ * introduce new nodes for this case and handle SubqueryScan -->
+ * NewNode switching in Init/ReScan plan...
*/
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scan.scanrelid - 1] != NULL)
@@ -202,6 +202,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, Plan *parent)
int
ExecCountSlotsSubqueryScan(SubqueryScan *node)
{
+
/*
* The subplan has its own tuple table and must not be counted here!
*/
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index a5c0299d289..04c9efc4b0a 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.14 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.15 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -371,6 +371,7 @@ ExecTidRestrPos(TidScan *node)
tidstate = node->tidstate;
tidstate->tss_TidPtr = tidstate->tss_MarkTidPtr;
}
+
#endif
/* ----------------------------------------------------------------
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 4c4da6c3034..4aa8c475c30 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -3,17 +3,17 @@
* spi.c
* Server Programming Interface
*
- * $Id: spi.c,v 1.52 2001/02/19 19:49:52 tgl Exp $
+ * $Id: spi.c,v 1.53 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "executor/spi_priv.h"
#include "access/printtup.h"
-uint32 SPI_processed = 0;
-Oid SPI_lastoid = InvalidOid;
+uint32 SPI_processed = 0;
+Oid SPI_lastoid = InvalidOid;
SPITupleTable *SPI_tuptable = NULL;
-int SPI_result;
+int SPI_result;
static _SPI_connection *_SPI_stack = NULL;
static _SPI_connection *_SPI_current = NULL;
@@ -46,6 +46,7 @@ extern void ShowUsage(void);
int
SPI_connect()
{
+
/*
* When procedure called by Executor _SPI_curid expected to be equal
* to _SPI_connected
@@ -80,14 +81,14 @@ SPI_connect()
/* Create memory contexts for this procedure */
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Proc",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
_SPI_current->execCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Exec",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
/* ... and switch to procedure's context */
_SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt);
@@ -146,6 +147,7 @@ SPI_finish()
void
AtEOXact_SPI(void)
{
+
/*
* Note that memory contexts belonging to SPI stack entries will be
* freed automatically, so we can ignore them here. We just need to
@@ -425,8 +427,8 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
}
/*
- * If we have a toasted datum, forcibly detoast it here to avoid memory
- * leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (typisvarlena)
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
@@ -436,7 +438,7 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
result = OidFunctionCall3(foutoid,
val,
ObjectIdGetDatum(typelem),
- Int32GetDatum(tupdesc->attrs[fnumber - 1]->atttypmod));
+ Int32GetDatum(tupdesc->attrs[fnumber - 1]->atttypmod));
/* Clean up detoasted copy, if any */
if (val != origval)
@@ -833,14 +835,13 @@ _SPI_pquery(QueryDesc *queryDesc, EState *state, int tcount)
#endif
tupdesc = ExecutorStart(queryDesc, state);
- /* Don't work currently --- need to rearrange callers so that
- * we prepare the portal before doing CreateExecutorState() etc.
- * See pquery.c for the correct order of operations.
+ /*
+ * Don't work currently --- need to rearrange callers so that we
+ * prepare the portal before doing CreateExecutorState() etc. See
+ * pquery.c for the correct order of operations.
*/
if (isRetrieveIntoPortal)
- {
elog(FATAL, "SPI_select: retrieve into portal not implemented");
- }
ExecutorRun(queryDesc, state, EXEC_FOR, (long) tcount);
@@ -901,9 +902,7 @@ _SPI_begin_call(bool execmem)
elog(FATAL, "SPI: stack corrupted");
if (execmem) /* switch to the Executor memory context */
- {
_SPI_execmem();
- }
return 0;
}
diff --git a/src/backend/lib/bit.c b/src/backend/lib/bit.c
index 1b54292a464..57da3522c46 100644
--- a/src/backend/lib/bit.c
+++ b/src/backend/lib/bit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/lib/Attic/bit.c,v 1.12 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/lib/Attic/bit.c,v 1.13 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,14 +22,14 @@ void
BitArraySetBit(BitArray bitArray, BitIndex bitIndex)
{
bitArray[bitIndex / BITS_PER_BYTE] |=
- (1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)));
+ (1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)));
}
void
BitArrayClearBit(BitArray bitArray, BitIndex bitIndex)
{
bitArray[bitIndex / BITS_PER_BYTE] &=
- ~(1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)));
+ ~(1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)));
}
bool
@@ -37,5 +37,5 @@ BitArrayBitIsSet(BitArray bitArray, BitIndex bitIndex)
{
return ((bitArray[bitIndex / BITS_PER_BYTE] &
(1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)))
- ) != 0);
+ ) != 0);
}
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index d91fa9a8220..dcb702e9596 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.51 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.52 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ static int map_old_to_new(Port *port, UserAuth old, int status);
static void auth_failed(Port *port);
-char * pg_krb_server_keyfile;
+char *pg_krb_server_keyfile;
#ifdef KRB4
@@ -177,7 +177,7 @@ pg_an_to_ln(char *aname)
* Various krb5 state which is not connection specfic, and a flag to
* indicate whether we have initialised it yet.
*/
-static int pg_krb5_initialised;
+static int pg_krb5_initialised;
static krb5_context pg_krb5_context;
static krb5_keytab pg_krb5_keytab;
static krb5_principal pg_krb5_server;
@@ -192,7 +192,8 @@ pg_krb5_init(void)
return STATUS_OK;
retval = krb5_init_context(&pg_krb5_context);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_init: krb5_init_context returned"
" Kerberos error %d\n", retval);
@@ -201,23 +202,25 @@ pg_krb5_init(void)
}
retval = krb5_kt_resolve(pg_krb5_context, pg_krb_server_keyfile, &pg_krb5_keytab);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_init: krb5_kt_resolve returned"
" Kerberos error %d\n", retval);
- com_err("postgres", retval, "while resolving keytab file %s",
+ com_err("postgres", retval, "while resolving keytab file %s",
pg_krb_server_keyfile);
krb5_free_context(pg_krb5_context);
return STATUS_ERROR;
}
- retval = krb5_sname_to_principal(pg_krb5_context, NULL, PG_KRB_SRVNAM,
+ retval = krb5_sname_to_principal(pg_krb5_context, NULL, PG_KRB_SRVNAM,
KRB5_NT_SRV_HST, &pg_krb5_server);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_init: krb5_sname_to_principal returned"
" Kerberos error %d\n", retval);
- com_err("postgres", retval,
+ com_err("postgres", retval,
"while getting server principal for service %s",
pg_krb_server_keyfile);
krb5_kt_close(pg_krb5_context, pg_krb5_keytab);
@@ -245,25 +248,26 @@ static int
pg_krb5_recvauth(Port *port)
{
krb5_error_code retval;
- int ret;
+ int ret;
krb5_auth_context auth_context = NULL;
krb5_ticket *ticket;
- char *kusername;
+ char *kusername;
ret = pg_krb5_init();
if (ret != STATUS_OK)
return ret;
retval = krb5_recvauth(pg_krb5_context, &auth_context,
- (krb5_pointer)&port->sock, PG_KRB_SRVNAM,
+ (krb5_pointer) & port->sock, PG_KRB_SRVNAM,
pg_krb5_server, 0, pg_krb5_keytab, &ticket);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_recvauth: krb5_recvauth returned"
" Kerberos error %d\n", retval);
- com_err("postgres", retval, "from krb5_recvauth");
+ com_err("postgres", retval, "from krb5_recvauth");
return STATUS_ERROR;
- }
+ }
/*
* The "client" structure comes out of the ticket and is therefore
@@ -272,13 +276,14 @@ pg_krb5_recvauth(Port *port)
*
* I have no idea why this is considered necessary.
*/
- retval = krb5_unparse_name(pg_krb5_context,
+ retval = krb5_unparse_name(pg_krb5_context,
ticket->enc_part2->client, &kusername);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_recvauth: krb5_unparse_name returned"
" Kerberos error %d\n", retval);
- com_err("postgres", retval, "while unparsing client name");
+ com_err("postgres", retval, "while unparsing client name");
krb5_free_ticket(pg_krb5_context, ticket);
krb5_auth_con_free(pg_krb5_context, auth_context);
return STATUS_ERROR;
@@ -288,13 +293,13 @@ pg_krb5_recvauth(Port *port)
if (strncmp(port->user, kusername, SM_USER))
{
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- "pg_krb5_recvauth: user name \"%s\" != krb5 name \"%s\"\n",
+ "pg_krb5_recvauth: user name \"%s\" != krb5 name \"%s\"\n",
port->user, kusername);
ret = STATUS_ERROR;
}
else
ret = STATUS_OK;
-
+
krb5_free_ticket(pg_krb5_context, ticket);
krb5_auth_con_free(pg_krb5_context, auth_context);
free(kusername);
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 1d5870e93f4..4d50ee1ae12 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.57 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.58 2001/03/22 03:59:30 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
@@ -60,7 +60,7 @@
* entries, of which any unused entries will be NULL.
*/
static LargeObjectDesc **cookies = NULL;
-static int cookies_size = 0;
+static int cookies_size = 0;
static MemoryContext fscxt = NULL;
@@ -329,10 +329,10 @@ loread(PG_FUNCTION_ARGS)
Datum
lowrite(PG_FUNCTION_ARGS)
{
- int32 fd = PG_GETARG_INT32(0);
+ int32 fd = PG_GETARG_INT32(0);
struct varlena *wbuf = PG_GETARG_VARLENA_P(1);
- int bytestowrite;
- int totalwritten;
+ int bytestowrite;
+ int totalwritten;
bytestowrite = VARSIZE(wbuf) - VARHDRSZ;
totalwritten = lo_write(fd, VARDATA(wbuf), bytestowrite);
@@ -371,7 +371,7 @@ lo_import(PG_FUNCTION_ARGS)
*/
nbytes = VARSIZE(filename) - VARHDRSZ;
if (nbytes >= MAXPGPATH)
- nbytes = MAXPGPATH-1;
+ nbytes = MAXPGPATH - 1;
memcpy(fnamebuf, VARDATA(filename), nbytes);
fnamebuf[nbytes] = '\0';
fd = PathNameOpenFile(fnamebuf, O_RDONLY | PG_BINARY, 0666);
@@ -445,7 +445,7 @@ lo_export(PG_FUNCTION_ARGS)
*/
nbytes = VARSIZE(filename) - VARHDRSZ;
if (nbytes >= MAXPGPATH)
- nbytes = MAXPGPATH-1;
+ nbytes = MAXPGPATH - 1;
memcpy(fnamebuf, VARDATA(filename), nbytes);
fnamebuf[nbytes] = '\0';
oumask = umask((mode_t) 0022);
diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c
index 325056ab1ef..59aef0d514d 100644
--- a/src/backend/libpq/crypt.c
+++ b/src/backend/libpq/crypt.c
@@ -9,7 +9,7 @@
* Dec 17, 1997 - Todd A. Brandys
* Orignal Version Completed.
*
- * $Id: crypt.c,v 1.30 2001/02/07 23:31:38 tgl Exp $
+ * $Id: crypt.c,v 1.31 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -295,7 +295,7 @@ crypt_verify(const Port *port, const char *user, const char *pgpass)
vuntil = INVALID_ABSTIME;
else
vuntil = DatumGetAbsoluteTime(DirectFunctionCall1(nabstimein,
- CStringGetDatum(valuntil)));
+ CStringGetDatum(valuntil)));
current = GetCurrentAbsoluteTime();
if (vuntil != INVALID_ABSTIME && vuntil < current)
retval = STATUS_ERROR;
diff --git a/src/backend/libpq/password.c b/src/backend/libpq/password.c
index 856c3028800..77b09be18a4 100644
--- a/src/backend/libpq/password.c
+++ b/src/backend/libpq/password.c
@@ -2,7 +2,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: password.c,v 1.35 2001/01/24 19:42:56 momjian Exp $
+ * $Id: password.c,v 1.36 2001/03/22 03:59:30 momjian Exp $
*
*/
@@ -37,7 +37,7 @@ verify_password(const Port *port, const char *user, const char *password)
if (!pw_file)
{
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- "verify_password: Unable to open password file \"%s\": %s\n",
+ "verify_password: Unable to open password file \"%s\": %s\n",
pw_file_fullname, strerror(errno));
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@@ -77,12 +77,12 @@ verify_password(const Port *port, const char *user, const char *password)
/*
* If the password is empty of "+" then we use the regular
- * pg_shadow passwords. If we use crypt then we have to
- * use pg_shadow passwords no matter what.
+ * pg_shadow passwords. If we use crypt then we have to use
+ * pg_shadow passwords no matter what.
*/
if (port->auth_method == uaCrypt
|| test_pw == NULL || test_pw[0] == '\0'
- || strcmp(test_pw, "+")==0)
+ || strcmp(test_pw, "+") == 0)
return crypt_verify(port, user, password);
if (strcmp(crypt(password, test_pw), test_pw) == 0)
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 7a20d66f7e1..e3250862363 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -29,7 +29,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: pqcomm.c,v 1.116 2001/01/24 19:42:56 momjian Exp $
+ * $Id: pqcomm.c,v 1.117 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -71,7 +71,7 @@
#include <netdb.h>
#include <netinet/in.h>
#ifdef HAVE_NETINET_TCP_H
-# include <netinet/tcp.h>
+#include <netinet/tcp.h>
#endif
#include <arpa/inet.h>
#include <sys/file.h>
@@ -91,8 +91,8 @@ static void pq_close(void);
/*
* Configuration options
*/
-int Unix_socket_permissions;
-char * Unix_socket_group;
+int Unix_socket_permissions;
+char *Unix_socket_group;
/*
@@ -223,47 +223,49 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
UNIXSOCK_PATH(saddr.un, portNumber, unixSocketName);
len = UNIXSOCK_LEN(saddr.un);
strcpy(sock_path, saddr.un.sun_path);
+
/*
* Grab an interlock file associated with the socket file.
*/
- if (! CreateSocketLockFile(sock_path, true))
+ if (!CreateSocketLockFile(sock_path, true))
return STATUS_ERROR;
+
/*
- * Once we have the interlock, we can safely delete any pre-existing
- * socket file to avoid failure at bind() time.
+ * Once we have the interlock, we can safely delete any
+ * pre-existing socket file to avoid failure at bind() time.
*/
unlink(sock_path);
}
-#endif /* HAVE_UNIX_SOCKETS */
+#endif /* HAVE_UNIX_SOCKETS */
- if (family == AF_INET)
- {
+ if (family == AF_INET)
+ {
/* TCP/IP socket */
if (hostName[0] == '\0')
- saddr.in.sin_addr.s_addr = htonl(INADDR_ANY);
+ saddr.in.sin_addr.s_addr = htonl(INADDR_ANY);
else
- {
+ {
struct hostent *hp;
-
+
hp = gethostbyname(hostName);
if ((hp == NULL) || (hp->h_addrtype != AF_INET))
{
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- "FATAL: StreamServerPort: gethostbyname(%s) failed\n",
- hostName);
- fputs(PQerrormsg, stderr);
- pqdebug("%s", PQerrormsg);
+ "FATAL: StreamServerPort: gethostbyname(%s) failed\n",
+ hostName);
+ fputs(PQerrormsg, stderr);
+ pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
}
memmove((char *) &(saddr.in.sin_addr), (char *) hp->h_addr,
hp->h_length);
}
-
+
saddr.in.sin_port = htons(portNumber);
len = sizeof(struct sockaddr_in);
}
- err = bind(fd, (struct sockaddr *)&saddr.sa, len);
+ err = bind(fd, (struct sockaddr *) & saddr.sa, len);
if (err < 0)
{
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
@@ -291,16 +293,16 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
on_proc_exit(StreamDoUnlink, 0);
/*
- * Fix socket ownership/permission if requested. Note we must
- * do this before we listen() to avoid a window where unwanted
+ * Fix socket ownership/permission if requested. Note we must do
+ * this before we listen() to avoid a window where unwanted
* connections could get accepted.
*/
Assert(Unix_socket_group);
if (Unix_socket_group[0] != '\0')
{
- char *endptr;
+ char *endptr;
unsigned long int val;
- gid_t gid;
+ gid_t gid;
val = strtoul(Unix_socket_group, &endptr, 10);
if (*endptr == '\0')
@@ -346,7 +348,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
return STATUS_ERROR;
}
}
-#endif /* HAVE_UNIX_SOCKETS */
+#endif /* HAVE_UNIX_SOCKETS */
listen(fd, SOMAXCONN);
@@ -385,9 +387,10 @@ StreamConnection(int server_fd, Port *port)
}
#ifdef SCO_ACCEPT_BUG
+
/*
- * UnixWare 7+ and OpenServer 5.0.4 are known to have this bug,
- * but it shouldn't hurt it catch if for all of them.
+ * UnixWare 7+ and OpenServer 5.0.4 are known to have this bug, but it
+ * shouldn't hurt it catch if for all of them.
*/
if (port->raddr.sa.sa_family == 0)
port->raddr.sa.sa_family = AF_UNIX;
diff --git a/src/backend/libpq/pqpacket.c b/src/backend/libpq/pqpacket.c
index 5e99b148f8e..5f9d3cdb48f 100644
--- a/src/backend/libpq/pqpacket.c
+++ b/src/backend/libpq/pqpacket.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/Attic/pqpacket.c,v 1.28 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/Attic/pqpacket.c,v 1.29 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,8 +64,8 @@ PacketReceiveFragment(Port *port)
#ifndef __BEOS__
got = read(port->sock, pkt->ptr, pkt->nrtodo);
#else
- got = recv(port->sock, pkt->ptr, pkt->nrtodo, 0);
-#endif /* __BEOS__ */
+ got = recv(port->sock, pkt->ptr, pkt->nrtodo, 0);
+#endif /* __BEOS__ */
if (got > 0)
{
pkt->nrtodo -= got;
diff --git a/src/backend/libpq/pqsignal.c b/src/backend/libpq/pqsignal.c
index 668d5f996dd..8cc8f140ace 100644
--- a/src/backend/libpq/pqsignal.c
+++ b/src/backend/libpq/pqsignal.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/pqsignal.c,v 1.19 2001/02/10 02:31:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/pqsignal.c,v 1.20 2001/03/22 03:59:30 momjian Exp $
*
* NOTES
* This shouldn't be in libpq, but the monitor and some other
@@ -61,10 +61,11 @@ pqinitmask(void)
#ifdef HAVE_SIGPROCMASK
sigemptyset(&UnBlockSig);
sigfillset(&BlockSig);
+
/*
- * Unmark those signals that should never be blocked.
- * Some of these signal names don't exist on all platforms. Most do,
- * but might as well ifdef them all for consistency...
+ * Unmark those signals that should never be blocked. Some of these
+ * signal names don't exist on all platforms. Most do, but might as
+ * well ifdef them all for consistency...
*/
#ifdef SIGTRAP
sigdelset(&BlockSig, SIGTRAP);
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index b8c6cffdf64..46e03d8e0ab 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.41 2001/02/06 17:00:01 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.42 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,17 +53,17 @@ main(int argc, char *argv[])
{
int len;
struct passwd *pw;
- char * pw_name_persist;
+ char *pw_name_persist;
/*
- * Place platform-specific startup hacks here. This is the right
- * place to put code that must be executed early in launch of either
- * a postmaster, a standalone backend, or a standalone bootstrap run.
+ * Place platform-specific startup hacks here. This is the right
+ * place to put code that must be executed early in launch of either a
+ * postmaster, a standalone backend, or a standalone bootstrap run.
* Note that this code will NOT be executed when a backend or
* sub-bootstrap run is forked by the postmaster.
*
- * XXX The need for code here is proof that the platform in question
- * is too brain-dead to provide a standard C execution environment
+ * XXX The need for code here is proof that the platform in question is
+ * too brain-dead to provide a standard C execution environment
* without help. Avoid adding more here, if you can.
*/
@@ -76,7 +76,7 @@ main(int argc, char *argv[])
int buffer[] = {SSIN_UACPROC, UAC_NOPRINT};
#endif /* NOPRINTADE */
-#endif /* __alpha */
+#endif /* __alpha */
#if defined(NOFIXADE) || defined(NOPRINTADE)
@@ -93,38 +93,39 @@ main(int argc, char *argv[])
#endif /* NOFIXADE || NOPRINTADE */
#ifdef __BEOS__
- /* BeOS-specific actions on startup */
- beos_startup(argc,argv);
+ /* BeOS-specific actions on startup */
+ beos_startup(argc, argv);
#endif
/*
- * Not-quite-so-platform-specific startup environment checks.
- * Still best to minimize these.
+ * Not-quite-so-platform-specific startup environment checks. Still
+ * best to minimize these.
*/
/*
* Make sure we are not running as root.
*
- * BeOS currently runs everything as root :-(, so this check must
- * be temporarily disabled there...
- */
+ * BeOS currently runs everything as root :-(, so this check must be
+ * temporarily disabled there...
+ */
#ifndef __BEOS__
if (!(argc > 1
- && ( strcmp(argv[1], "--help")==0 || strcmp(argv[1], "-?")==0
- || strcmp(argv[1], "--version")==0 || strcmp(argv[1], "-V")==0 ))
- && (geteuid() == 0) )
+ && (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0
+ || strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0))
+ && (geteuid() == 0))
{
fprintf(stderr, "%s", NOROOTEXEC);
exit(1);
}
-#endif /* __BEOS__ */
+#endif /* __BEOS__ */
/*
* Set up locale information from environment, in only the categories
* needed by Postgres; leave other categories set to default "C".
- * (Note that CTYPE and COLLATE will be overridden later from pg_control
- * if we are in an already-initialized database. We set them here so
- * that they will be available to fill pg_control during initdb.)
+ * (Note that CTYPE and COLLATE will be overridden later from
+ * pg_control if we are in an already-initialized database. We set
+ * them here so that they will be available to fill pg_control during
+ * initdb.)
*/
#ifdef USE_LOCALE
setlocale(LC_CTYPE, "");
@@ -133,9 +134,10 @@ main(int argc, char *argv[])
#endif
/*
- * Now dispatch to one of PostmasterMain, PostgresMain, or BootstrapMain
- * depending on the program name (and possibly first argument) we
- * were called with. The lack of consistency here is historical.
+ * Now dispatch to one of PostmasterMain, PostgresMain, or
+ * BootstrapMain depending on the program name (and possibly first
+ * argument) we were called with. The lack of consistency here is
+ * historical.
*/
len = strlen(argv[0]);
@@ -146,15 +148,16 @@ main(int argc, char *argv[])
}
/*
- * If the first argument is "-boot", then invoke bootstrap mode.
- * Note we remove "-boot" from the arguments passed on to BootstrapMain.
+ * If the first argument is "-boot", then invoke bootstrap mode. Note
+ * we remove "-boot" from the arguments passed on to BootstrapMain.
*/
if (argc > 1 && strcmp(argv[1], "-boot") == 0)
exit(BootstrapMain(argc - 1, argv + 1));
/*
* Otherwise we're a standalone backend. Invoke PostgresMain,
- * specifying current userid as the "authenticated" Postgres user name.
+ * specifying current userid as the "authenticated" Postgres user
+ * name.
*/
pw = getpwuid(geteuid());
if (pw == NULL)
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index f7127443082..3dc2bf0373a 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -6,7 +6,7 @@
* NOTE: a general convention when copying or comparing plan nodes is
* that we ignore the executor state subnode. We do not need to look
* at it because no current uses of copyObject() or equal() need to
- * deal with already-executing plan trees. By leaving the state subnodes
+ * deal with already-executing plan trees. By leaving the state subnodes
* out, we avoid needing to write copy/compare routines for all the
* different executor state node types.
*
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.138 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.139 2001/03/22 03:59:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -305,7 +305,7 @@ _copyTidScan(TidScan *from)
static SubqueryScan *
_copySubqueryScan(SubqueryScan *from)
{
- SubqueryScan *newnode = makeNode(SubqueryScan);
+ SubqueryScan *newnode = makeNode(SubqueryScan);
/* ----------------
* copy node superclass fields
@@ -339,7 +339,7 @@ CopyJoinFields(Join *from, Join *newnode)
/* subPlan list must point to subplans in the new subtree, not the old */
if (from->plan.subPlan != NIL)
newnode->plan.subPlan = nconc(newnode->plan.subPlan,
- pull_subplans((Node *) newnode->joinqual));
+ pull_subplans((Node *) newnode->joinqual));
}
@@ -991,7 +991,7 @@ _copyRangeTblRef(RangeTblRef *from)
static FromExpr *
_copyFromExpr(FromExpr *from)
{
- FromExpr *newnode = makeNode(FromExpr);
+ FromExpr *newnode = makeNode(FromExpr);
Node_Copy(from, newnode, fromlist);
Node_Copy(from, newnode, quals);
@@ -1002,7 +1002,7 @@ _copyFromExpr(FromExpr *from)
static JoinExpr *
_copyJoinExpr(JoinExpr *from)
{
- JoinExpr *newnode = makeNode(JoinExpr);
+ JoinExpr *newnode = makeNode(JoinExpr);
newnode->jointype = from->jointype;
newnode->isNatural = from->isNatural;
@@ -1281,7 +1281,7 @@ _copyTidPath(TidPath *from)
static AppendPath *
_copyAppendPath(AppendPath *from)
{
- AppendPath *newnode = makeNode(AppendPath);
+ AppendPath *newnode = makeNode(AppendPath);
/* ----------------
* copy the node superclass fields
@@ -1424,7 +1424,11 @@ _copyRestrictInfo(RestrictInfo *from)
newnode->mergejoinoperator = from->mergejoinoperator;
newnode->left_sortop = from->left_sortop;
newnode->right_sortop = from->right_sortop;
- /* Do not copy pathkeys, since they'd not be canonical in a copied query */
+
+ /*
+ * Do not copy pathkeys, since they'd not be canonical in a copied
+ * query
+ */
newnode->left_pathkey = NIL;
newnode->right_pathkey = NIL;
newnode->hashjoinoperator = from->hashjoinoperator;
@@ -1525,7 +1529,7 @@ _copyRangeTblEntry(RangeTblEntry *from)
static FkConstraint *
_copyFkConstraint(FkConstraint *from)
{
- FkConstraint *newnode = makeNode(FkConstraint);
+ FkConstraint *newnode = makeNode(FkConstraint);
if (from->constr_name)
newnode->constr_name = pstrdup(from->constr_name);
@@ -1538,7 +1542,7 @@ _copyFkConstraint(FkConstraint *from)
newnode->actions = from->actions;
newnode->deferrable = from->deferrable;
newnode->initdeferred = from->initdeferred;
-
+
return newnode;
}
@@ -1556,7 +1560,7 @@ _copySortClause(SortClause *from)
static A_Expr *
_copyAExpr(A_Expr *from)
{
- A_Expr *newnode = makeNode(A_Expr);
+ A_Expr *newnode = makeNode(A_Expr);
newnode->oper = from->oper;
if (from->opname)
@@ -1593,7 +1597,7 @@ _copyParamNo(ParamNo *from)
static Ident *
_copyIdent(Ident *from)
{
- Ident *newnode = makeNode(Ident);
+ Ident *newnode = makeNode(Ident);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -1606,7 +1610,7 @@ _copyIdent(Ident *from)
static FuncCall *
_copyFuncCall(FuncCall *from)
{
- FuncCall *newnode = makeNode(FuncCall);
+ FuncCall *newnode = makeNode(FuncCall);
if (from->funcname)
newnode->funcname = pstrdup(from->funcname);
@@ -1620,7 +1624,7 @@ _copyFuncCall(FuncCall *from)
static A_Indices *
_copyAIndices(A_Indices *from)
{
- A_Indices *newnode = makeNode(A_Indices);
+ A_Indices *newnode = makeNode(A_Indices);
Node_Copy(from, newnode, lidx);
Node_Copy(from, newnode, uidx);
@@ -1631,7 +1635,7 @@ _copyAIndices(A_Indices *from)
static ResTarget *
_copyResTarget(ResTarget *from)
{
- ResTarget *newnode = makeNode(ResTarget);
+ ResTarget *newnode = makeNode(ResTarget);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -1659,7 +1663,7 @@ _copyTypeName(TypeName *from)
static SortGroupBy *
_copySortGroupBy(SortGroupBy *from)
{
- SortGroupBy *newnode = makeNode(SortGroupBy);
+ SortGroupBy *newnode = makeNode(SortGroupBy);
if (from->useOp)
newnode->useOp = pstrdup(from->useOp);
@@ -1684,7 +1688,7 @@ _copyRangeVar(RangeVar *from)
static RangeSubselect *
_copyRangeSubselect(RangeSubselect *from)
{
- RangeSubselect *newnode = makeNode(RangeSubselect);
+ RangeSubselect *newnode = makeNode(RangeSubselect);
Node_Copy(from, newnode, subquery);
Node_Copy(from, newnode, name);
@@ -1706,7 +1710,7 @@ _copyTypeCast(TypeCast *from)
static IndexElem *
_copyIndexElem(IndexElem *from)
{
- IndexElem *newnode = makeNode(IndexElem);
+ IndexElem *newnode = makeNode(IndexElem);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -1720,7 +1724,7 @@ _copyIndexElem(IndexElem *from)
static ColumnDef *
_copyColumnDef(ColumnDef *from)
{
- ColumnDef *newnode = makeNode(ColumnDef);
+ ColumnDef *newnode = makeNode(ColumnDef);
if (from->colname)
newnode->colname = pstrdup(from->colname);
@@ -1738,7 +1742,7 @@ _copyColumnDef(ColumnDef *from)
static Constraint *
_copyConstraint(Constraint *from)
{
- Constraint *newnode = makeNode(Constraint);
+ Constraint *newnode = makeNode(Constraint);
newnode->contype = from->contype;
if (from->name)
@@ -1754,7 +1758,7 @@ _copyConstraint(Constraint *from)
static DefElem *
_copyDefElem(DefElem *from)
{
- DefElem *newnode = makeNode(DefElem);
+ DefElem *newnode = makeNode(DefElem);
if (from->defname)
newnode->defname = pstrdup(from->defname);
@@ -1811,7 +1815,7 @@ static InsertStmt *
_copyInsertStmt(InsertStmt *from)
{
InsertStmt *newnode = makeNode(InsertStmt);
-
+
if (from->relname)
newnode->relname = pstrdup(from->relname);
Node_Copy(from, newnode, cols);
@@ -1825,7 +1829,7 @@ static DeleteStmt *
_copyDeleteStmt(DeleteStmt *from)
{
DeleteStmt *newnode = makeNode(DeleteStmt);
-
+
if (from->relname)
newnode->relname = pstrdup(from->relname);
Node_Copy(from, newnode, whereClause);
@@ -1838,7 +1842,7 @@ static UpdateStmt *
_copyUpdateStmt(UpdateStmt *from)
{
UpdateStmt *newnode = makeNode(UpdateStmt);
-
+
if (from->relname)
newnode->relname = pstrdup(from->relname);
Node_Copy(from, newnode, targetList);
@@ -1853,7 +1857,7 @@ static SelectStmt *
_copySelectStmt(SelectStmt *from)
{
SelectStmt *newnode = makeNode(SelectStmt);
-
+
Node_Copy(from, newnode, distinctClause);
if (from->into)
newnode->into = pstrdup(from->into);
@@ -1882,7 +1886,7 @@ static SetOperationStmt *
_copySetOperationStmt(SetOperationStmt *from)
{
SetOperationStmt *newnode = makeNode(SetOperationStmt);
-
+
newnode->op = from->op;
newnode->all = from->all;
Node_Copy(from, newnode, larg);
@@ -1896,7 +1900,7 @@ static AlterTableStmt *
_copyAlterTableStmt(AlterTableStmt *from)
{
AlterTableStmt *newnode = makeNode(AlterTableStmt);
-
+
newnode->subtype = from->subtype;
if (from->relname)
newnode->relname = pstrdup(from->relname);
@@ -1913,7 +1917,7 @@ static ChangeACLStmt *
_copyChangeACLStmt(ChangeACLStmt *from)
{
ChangeACLStmt *newnode = makeNode(ChangeACLStmt);
-
+
Node_Copy(from, newnode, relNames);
if (from->aclString)
newnode->aclString = pstrdup(from->aclString);
@@ -1936,7 +1940,7 @@ static ClusterStmt *
_copyClusterStmt(ClusterStmt *from)
{
ClusterStmt *newnode = makeNode(ClusterStmt);
-
+
if (from->relname)
newnode->relname = pstrdup(from->relname);
if (from->indexname)
@@ -1948,8 +1952,8 @@ _copyClusterStmt(ClusterStmt *from)
static CopyStmt *
_copyCopyStmt(CopyStmt *from)
{
- CopyStmt *newnode = makeNode(CopyStmt);
-
+ CopyStmt *newnode = makeNode(CopyStmt);
+
newnode->binary = from->binary;
if (from->relname)
newnode->relname = pstrdup(from->relname);
@@ -1969,7 +1973,7 @@ static CreateStmt *
_copyCreateStmt(CreateStmt *from)
{
CreateStmt *newnode = makeNode(CreateStmt);
-
+
newnode->istemp = from->istemp;
newnode->relname = pstrdup(from->relname);
Node_Copy(from, newnode, tableElts);
@@ -1983,7 +1987,7 @@ static VersionStmt *
_copyVersionStmt(VersionStmt *from)
{
VersionStmt *newnode = makeNode(VersionStmt);
-
+
newnode->relname = pstrdup(from->relname);
newnode->direction = from->direction;
newnode->fromRelname = pstrdup(from->fromRelname);
@@ -1996,7 +2000,7 @@ static DefineStmt *
_copyDefineStmt(DefineStmt *from)
{
DefineStmt *newnode = makeNode(DefineStmt);
-
+
newnode->defType = from->defType;
newnode->defname = pstrdup(from->defname);
Node_Copy(from, newnode, definition);
@@ -2007,8 +2011,8 @@ _copyDefineStmt(DefineStmt *from)
static DropStmt *
_copyDropStmt(DropStmt *from)
{
- DropStmt *newnode = makeNode(DropStmt);
-
+ DropStmt *newnode = makeNode(DropStmt);
+
Node_Copy(from, newnode, names);
newnode->removeType = from->removeType;
@@ -2029,11 +2033,11 @@ static CommentStmt *
_copyCommentStmt(CommentStmt *from)
{
CommentStmt *newnode = makeNode(CommentStmt);
-
+
newnode->objtype = from->objtype;
newnode->objname = pstrdup(from->objname);
if (from->objproperty)
- newnode->objproperty = pstrdup(from->objproperty);
+ newnode->objproperty = pstrdup(from->objproperty);
Node_Copy(from, newnode, objlist);
newnode->comment = pstrdup(from->comment);
@@ -2044,7 +2048,7 @@ static ExtendStmt *
_copyExtendStmt(ExtendStmt *from)
{
ExtendStmt *newnode = makeNode(ExtendStmt);
-
+
newnode->idxname = pstrdup(from->idxname);
Node_Copy(from, newnode, whereClause);
Node_Copy(from, newnode, rangetable);
@@ -2055,8 +2059,8 @@ _copyExtendStmt(ExtendStmt *from)
static FetchStmt *
_copyFetchStmt(FetchStmt *from)
{
- FetchStmt *newnode = makeNode(FetchStmt);
-
+ FetchStmt *newnode = makeNode(FetchStmt);
+
newnode->direction = from->direction;
newnode->howMany = from->howMany;
newnode->portalname = pstrdup(from->portalname);
@@ -2068,8 +2072,8 @@ _copyFetchStmt(FetchStmt *from)
static IndexStmt *
_copyIndexStmt(IndexStmt *from)
{
- IndexStmt *newnode = makeNode(IndexStmt);
-
+ IndexStmt *newnode = makeNode(IndexStmt);
+
newnode->idxname = pstrdup(from->idxname);
newnode->relname = pstrdup(from->relname);
newnode->accessMethod = pstrdup(from->accessMethod);
@@ -2087,7 +2091,7 @@ static ProcedureStmt *
_copyProcedureStmt(ProcedureStmt *from)
{
ProcedureStmt *newnode = makeNode(ProcedureStmt);
-
+
newnode->funcname = pstrdup(from->funcname);
Node_Copy(from, newnode, argTypes);
Node_Copy(from, newnode, returnType);
@@ -2102,7 +2106,7 @@ static RemoveAggrStmt *
_copyRemoveAggrStmt(RemoveAggrStmt *from)
{
RemoveAggrStmt *newnode = makeNode(RemoveAggrStmt);
-
+
newnode->aggname = pstrdup(from->aggname);
Node_Copy(from, newnode, aggtype);
@@ -2113,7 +2117,7 @@ static RemoveFuncStmt *
_copyRemoveFuncStmt(RemoveFuncStmt *from)
{
RemoveFuncStmt *newnode = makeNode(RemoveFuncStmt);
-
+
newnode->funcname = pstrdup(from->funcname);
Node_Copy(from, newnode, args);
@@ -2124,7 +2128,7 @@ static RemoveOperStmt *
_copyRemoveOperStmt(RemoveOperStmt *from)
{
RemoveOperStmt *newnode = makeNode(RemoveOperStmt);
-
+
newnode->opname = pstrdup(from->opname);
Node_Copy(from, newnode, args);
@@ -2135,7 +2139,7 @@ static RenameStmt *
_copyRenameStmt(RenameStmt *from)
{
RenameStmt *newnode = makeNode(RenameStmt);
-
+
newnode->relname = pstrdup(from->relname);
newnode->inhOpt = from->inhOpt;
if (from->column)
@@ -2149,8 +2153,8 @@ _copyRenameStmt(RenameStmt *from)
static RuleStmt *
_copyRuleStmt(RuleStmt *from)
{
- RuleStmt *newnode = makeNode(RuleStmt);
-
+ RuleStmt *newnode = makeNode(RuleStmt);
+
newnode->rulename = pstrdup(from->rulename);
Node_Copy(from, newnode, whereClause);
newnode->event = from->event;
@@ -2231,7 +2235,7 @@ _copyLoadStmt(LoadStmt *from)
static CreatedbStmt *
_copyCreatedbStmt(CreatedbStmt *from)
{
- CreatedbStmt *newnode = makeNode(CreatedbStmt);
+ CreatedbStmt *newnode = makeNode(CreatedbStmt);
if (from->dbname)
newnode->dbname = pstrdup(from->dbname);
@@ -2247,7 +2251,7 @@ _copyCreatedbStmt(CreatedbStmt *from)
static DropdbStmt *
_copyDropdbStmt(DropdbStmt *from)
{
- DropdbStmt *newnode = makeNode(DropdbStmt);
+ DropdbStmt *newnode = makeNode(DropdbStmt);
if (from->dbname)
newnode->dbname = pstrdup(from->dbname);
@@ -2258,7 +2262,7 @@ _copyDropdbStmt(DropdbStmt *from)
static VacuumStmt *
_copyVacuumStmt(VacuumStmt *from)
{
- VacuumStmt *newnode = makeNode(VacuumStmt);
+ VacuumStmt *newnode = makeNode(VacuumStmt);
newnode->verbose = from->verbose;
newnode->analyze = from->analyze;
@@ -2272,7 +2276,7 @@ _copyVacuumStmt(VacuumStmt *from)
static ExplainStmt *
_copyExplainStmt(ExplainStmt *from)
{
- ExplainStmt *newnode = makeNode(ExplainStmt);
+ ExplainStmt *newnode = makeNode(ExplainStmt);
Node_Copy(from, newnode, query);
newnode->verbose = from->verbose;
@@ -2283,7 +2287,7 @@ _copyExplainStmt(ExplainStmt *from)
static CreateSeqStmt *
_copyCreateSeqStmt(CreateSeqStmt *from)
{
- CreateSeqStmt *newnode = makeNode(CreateSeqStmt);
+ CreateSeqStmt *newnode = makeNode(CreateSeqStmt);
if (from->seqname)
newnode->seqname = pstrdup(from->seqname);
@@ -2346,6 +2350,7 @@ _copyCreateTrigStmt(CreateTrigStmt *from)
newnode->lang = pstrdup(from->lang);
if (from->text)
newnode->text = pstrdup(from->text);
+
Node_Copy(from, newnode, attr);
if (from->when)
newnode->when = pstrdup(from->when);
@@ -2459,7 +2464,7 @@ _copyLockStmt(LockStmt *from)
static ConstraintsSetStmt *
_copyConstraintsSetStmt(ConstraintsSetStmt *from)
{
- ConstraintsSetStmt *newnode = makeNode(ConstraintsSetStmt);
+ ConstraintsSetStmt *newnode = makeNode(ConstraintsSetStmt);
Node_Copy(from, newnode, constraints);
newnode->deferred = from->deferred;
@@ -2470,7 +2475,7 @@ _copyConstraintsSetStmt(ConstraintsSetStmt *from)
static CreateGroupStmt *
_copyCreateGroupStmt(CreateGroupStmt *from)
{
- CreateGroupStmt *newnode = makeNode(CreateGroupStmt);
+ CreateGroupStmt *newnode = makeNode(CreateGroupStmt);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -2483,7 +2488,7 @@ _copyCreateGroupStmt(CreateGroupStmt *from)
static AlterGroupStmt *
_copyAlterGroupStmt(AlterGroupStmt *from)
{
- AlterGroupStmt *newnode = makeNode(AlterGroupStmt);
+ AlterGroupStmt *newnode = makeNode(AlterGroupStmt);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -2497,7 +2502,7 @@ _copyAlterGroupStmt(AlterGroupStmt *from)
static DropGroupStmt *
_copyDropGroupStmt(DropGroupStmt *from)
{
- DropGroupStmt *newnode = makeNode(DropGroupStmt);
+ DropGroupStmt *newnode = makeNode(DropGroupStmt);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -2508,7 +2513,7 @@ _copyDropGroupStmt(DropGroupStmt *from)
static ReindexStmt *
_copyReindexStmt(ReindexStmt *from)
{
- ReindexStmt *newnode = makeNode(ReindexStmt);
+ ReindexStmt *newnode = makeNode(ReindexStmt);
newnode->reindexType = from->reindexType;
if (from->name)
@@ -2919,7 +2924,7 @@ copyObject(void *from)
retval = _copyReindexStmt(from);
break;
case T_CheckPointStmt:
- retval = (void*)makeNode(CheckPointStmt);
+ retval = (void *) makeNode(CheckPointStmt);
break;
case T_A_Expr:
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 9e16b79c6cc..06ee63bbacd 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -6,7 +6,7 @@
* NOTE: a general convention when copying or comparing plan nodes is
* that we ignore the executor state subnode. We do not need to look
* at it because no current uses of copyObject() or equal() need to
- * deal with already-executing plan trees. By leaving the state subnodes
+ * deal with already-executing plan trees. By leaving the state subnodes
* out, we avoid needing to write copy/compare routines for all the
* different executor state node types.
*
@@ -20,7 +20,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.87 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.88 2001/03/22 03:59:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
/* Macro for comparing string fields that might be NULL */
-#define equalstr(a, b) \
+#define equalstr(a, b) \
(((a) != NULL && (b) != NULL) ? (strcmp(a, b) == 0) : (a) == (b))
@@ -134,9 +134,9 @@ _equalOper(Oper *a, Oper *b)
return false;
/*
- * We do not examine opid or op_fcache, since these are
- * logically derived from opno, and they may not be set yet depending
- * on how far along the node is in the parse/plan pipeline.
+ * We do not examine opid or op_fcache, since these are logically
+ * derived from opno, and they may not be set yet depending on how far
+ * along the node is in the parse/plan pipeline.
*
* (Besides, op_fcache is executor state, which we don't check --- see
* notes at head of file.)
@@ -514,6 +514,7 @@ _equalRestrictInfo(RestrictInfo *a, RestrictInfo *b)
{
if (!equal(a->clause, b->clause))
return false;
+
/*
* ignore eval_cost, left/right_pathkey, and left/right_dispersion,
* since they may not be set yet, and should be derivable from the
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index aa83006c840..9b588150fda 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.38 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.39 2001/03/22 03:59:32 momjian Exp $
*
* NOTES
* XXX a few of the following functions are duplicated to handle
@@ -557,13 +557,12 @@ set_differencei(List *l1, List *l2)
List *
lreverse(List *l)
{
- List *result = NIL;
- List *i;
- foreach(i, l)
- {
- result = lcons(lfirst(i), result);
- }
- return result;
+ List *result = NIL;
+ List *i;
+
+ foreach(i, l)
+ result = lcons(lfirst(i), result);
+ return result;
}
/*
diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c
index f8bbb117291..d8f8310c5b7 100644
--- a/src/backend/nodes/makefuncs.c
+++ b/src/backend/nodes/makefuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/makefuncs.c,v 1.25 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/makefuncs.c,v 1.26 2001/03/22 03:59:32 momjian Exp $
*
* NOTES
* Creator functions in POSTGRES 4.2 are generated automatically. Most of
@@ -109,9 +109,10 @@ makeResdom(AttrNumber resno,
resdom->resname = resname;
/*
- * We always set the sorting/grouping fields to 0. If the caller wants
- * to change them he must do so explicitly. Few if any callers should
- * be doing that, so omitting these arguments reduces the chance of error.
+ * We always set the sorting/grouping fields to 0. If the caller
+ * wants to change them he must do so explicitly. Few if any callers
+ * should be doing that, so omitting these arguments reduces the
+ * chance of error.
*/
resdom->ressortgroupref = 0;
resdom->reskey = 0;
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index c8baef5fbfa..42dd9a2dab8 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/nodeFuncs.c,v 1.13 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/nodeFuncs.c,v 1.14 2001/03/22 03:59:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -114,5 +114,5 @@ non_null(Expr *c)
else
return false;
}
-#endif
+#endif
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 97f931f5cc6..2c0cfed7ee4 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.139 2001/01/24 19:42:57 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.140 2001/03/22 03:59:32 momjian Exp $
*
* NOTES
* Every (plan) node in POSTGRES has an associated "out" routine which
@@ -224,6 +224,7 @@ _outQuery(StringInfo str, Query *node)
if (node->utilityStmt)
{
+
/*
* Hack to make up for lack of outfuncs for utility-stmt nodes
*/
@@ -602,7 +603,7 @@ _outGroup(StringInfo str, Group *node)
static void
_outUnique(StringInfo str, Unique *node)
{
- int i;
+ int i;
appendStringInfo(str, " UNIQUE ");
_outPlanInfo(str, (Plan *) node);
@@ -616,7 +617,7 @@ _outUnique(StringInfo str, Unique *node)
static void
_outSetOp(StringInfo str, SetOp *node)
{
- int i;
+ int i;
appendStringInfo(str, " SETOP ");
_outPlanInfo(str, (Plan *) node);
@@ -889,7 +890,7 @@ _outFieldSelect(StringInfo str, FieldSelect *node)
_outNode(str, node->arg);
appendStringInfo(str, " :fieldnum %d :resulttype %u :resulttypmod %d ",
- node->fieldnum, node->resulttype, node->resulttypmod);
+ node->fieldnum, node->resulttype, node->resulttypmod);
}
/*
@@ -1279,14 +1280,14 @@ static void
_outStream(StringInfo str, Stream *node)
{
appendStringInfo(str,
- " STREAM :pathptr @ %p :cinfo @ %p :clausetype %p :upstream @ %p ",
+ " STREAM :pathptr @ %p :cinfo @ %p :clausetype %p :upstream @ %p ",
node->pathptr,
node->cinfo,
node->clausetype,
node->upstream);
appendStringInfo(str,
- " :downstream @ %p :groupup %d :groupcost %f :groupsel %f ",
+ " :downstream @ %p :groupup %d :groupcost %f :groupsel %f ",
node->downstream,
node->groupup,
node->groupcost,
@@ -1332,7 +1333,7 @@ _outValue(StringInfo str, Value *value)
{
switch (value->type)
{
- case T_Integer:
+ case T_Integer:
appendStringInfo(str, " %ld ", value->val.ival);
break;
case T_Float:
diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c
index 4f57a365c45..932f55ab885 100644
--- a/src/backend/nodes/print.c
+++ b/src/backend/nodes/print.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.46 2001/01/24 19:42:57 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.47 2001/03/22 03:59:32 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -290,7 +290,7 @@ plannode_type(Plan *p)
{
switch (nodeTag(p))
{
- case T_Plan:
+ case T_Plan:
return "PLAN";
case T_Result:
return "RESULT";
diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c
index beebe262f9f..1f41cc85718 100644
--- a/src/backend/nodes/read.c
+++ b/src/backend/nodes/read.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/read.c,v 1.28 2001/01/24 19:42:57 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/read.c,v 1.29 2001/03/22 03:59:32 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -41,9 +41,9 @@ stringToNode(char *str)
void *retval;
/*
- * We save and restore the pre-existing state of pg_strtok.
- * This makes the world safe for re-entrant invocation of stringToNode,
- * without incurring a lot of notational overhead by having to pass the
+ * We save and restore the pre-existing state of pg_strtok. This makes
+ * the world safe for re-entrant invocation of stringToNode, without
+ * incurring a lot of notational overhead by having to pass the
* next-character pointer around through all the readfuncs.c code.
*/
save_strtok = pg_strtok_ptr;
@@ -213,7 +213,7 @@ nodeTokenType(char *token, int length)
if (*numptr == '+' || *numptr == '-')
numptr++, numlen--;
if ((numlen > 0 && isdigit((unsigned char) *numptr)) ||
- (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1])))
+ (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1])))
{
/*
@@ -357,14 +357,15 @@ nodeRead(bool read_car_only)
make_dotted_pair_cell = true;
break;
case T_BitString:
- {
- char * val = palloc(tok_len);
- /* skip leading 'b'*/
- strncpy(val, token + 1, tok_len - 1);
- val[tok_len - 1] = '\0';
- this_value = (Node *) makeBitString(val);
- break;
- }
+ {
+ char *val = palloc(tok_len);
+
+ /* skip leading 'b' */
+ strncpy(val, token + 1, tok_len - 1);
+ val[tok_len - 1] = '\0';
+ this_value = (Node *) makeBitString(val);
+ break;
+ }
default:
elog(ERROR, "nodeRead: Bad type %d", type);
this_value = NULL; /* keep compiler happy */
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index a6eb9de4dce..9a071e7a250 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.106 2001/02/12 21:03:03 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.107 2001/03/22 03:59:32 momjian Exp $
*
* NOTES
* Most of the read functions for plan nodes are tested. (In fact, they
@@ -35,7 +35,7 @@
/*
* NOTE: use atoi() to read values written with %d, or atoui() to read
* values written with %u in outfuncs.c. An exception is OID values,
- * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
+ * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
* but this will probably change in the future.)
*/
#define atoui(x) ((unsigned int) strtoul((x), NULL, 10))
@@ -64,7 +64,7 @@ toIntList(List *list)
foreach(l, list)
{
- Value *v = (Value *) lfirst(l);
+ Value *v = (Value *) lfirst(l);
if (!IsA(v, Integer))
elog(ERROR, "toIntList: unexpected datatype");
@@ -82,12 +82,12 @@ toOidList(List *list)
foreach(l, list)
{
- Value *v = (Value *) lfirst(l);
+ Value *v = (Value *) lfirst(l);
/*
* This is a bit tricky because OID is unsigned, and so nodeRead
- * might have concluded the value doesn't fit in an integer.
- * Must cope with T_Float as well.
+ * might have concluded the value doesn't fit in an integer. Must
+ * cope with T_Float as well.
*/
if (IsA(v, Integer))
{
@@ -119,16 +119,17 @@ _readQuery(void)
local_node = makeNode(Query);
- token = pg_strtok(&length); /* skip the :command */
- token = pg_strtok(&length); /* get the commandType */
+ token = pg_strtok(&length); /* skip the :command */
+ token = pg_strtok(&length); /* get the commandType */
local_node->commandType = atoi(token);
- token = pg_strtok(&length); /* skip :utility */
+ token = pg_strtok(&length); /* skip :utility */
token = pg_strtok(&length);
if (length == 0)
local_node->utilityStmt = NULL;
else
{
+
/*
* Hack to make up for lack of readfuncs for utility-stmt nodes
*
@@ -140,68 +141,68 @@ _readQuery(void)
local_node->utilityStmt = (Node *) n;
}
- token = pg_strtok(&length); /* skip the :resultRelation */
- token = pg_strtok(&length); /* get the resultRelation */
+ token = pg_strtok(&length); /* skip the :resultRelation */
+ token = pg_strtok(&length); /* get the resultRelation */
local_node->resultRelation = atoi(token);
- token = pg_strtok(&length); /* skip :into */
- token = pg_strtok(&length); /* get into */
+ token = pg_strtok(&length); /* skip :into */
+ token = pg_strtok(&length); /* get into */
local_node->into = nullable_string(token, length);
- token = pg_strtok(&length); /* skip :isPortal */
- token = pg_strtok(&length); /* get isPortal */
+ token = pg_strtok(&length); /* skip :isPortal */
+ token = pg_strtok(&length); /* get isPortal */
local_node->isPortal = strtobool(token);
- token = pg_strtok(&length); /* skip :isBinary */
- token = pg_strtok(&length); /* get isBinary */
+ token = pg_strtok(&length); /* skip :isBinary */
+ token = pg_strtok(&length); /* get isBinary */
local_node->isBinary = strtobool(token);
- token = pg_strtok(&length); /* skip :isTemp */
- token = pg_strtok(&length); /* get isTemp */
+ token = pg_strtok(&length); /* skip :isTemp */
+ token = pg_strtok(&length); /* get isTemp */
local_node->isTemp = strtobool(token);
- token = pg_strtok(&length); /* skip the :hasAggs */
- token = pg_strtok(&length); /* get hasAggs */
+ token = pg_strtok(&length); /* skip the :hasAggs */
+ token = pg_strtok(&length); /* get hasAggs */
local_node->hasAggs = strtobool(token);
- token = pg_strtok(&length); /* skip the :hasSubLinks */
- token = pg_strtok(&length); /* get hasSubLinks */
+ token = pg_strtok(&length); /* skip the :hasSubLinks */
+ token = pg_strtok(&length); /* get hasSubLinks */
local_node->hasSubLinks = strtobool(token);
- token = pg_strtok(&length); /* skip :rtable */
+ token = pg_strtok(&length); /* skip :rtable */
local_node->rtable = nodeRead(true);
- token = pg_strtok(&length); /* skip :jointree */
+ token = pg_strtok(&length); /* skip :jointree */
local_node->jointree = nodeRead(true);
- token = pg_strtok(&length); /* skip :rowMarks */
+ token = pg_strtok(&length); /* skip :rowMarks */
local_node->rowMarks = toIntList(nodeRead(true));
- token = pg_strtok(&length); /* skip :targetlist */
+ token = pg_strtok(&length); /* skip :targetlist */
local_node->targetList = nodeRead(true);
- token = pg_strtok(&length); /* skip :groupClause */
+ token = pg_strtok(&length); /* skip :groupClause */
local_node->groupClause = nodeRead(true);
- token = pg_strtok(&length); /* skip :havingQual */
+ token = pg_strtok(&length); /* skip :havingQual */
local_node->havingQual = nodeRead(true);
- token = pg_strtok(&length); /* skip :distinctClause */
+ token = pg_strtok(&length); /* skip :distinctClause */
local_node->distinctClause = nodeRead(true);
- token = pg_strtok(&length); /* skip :sortClause */
+ token = pg_strtok(&length); /* skip :sortClause */
local_node->sortClause = nodeRead(true);
- token = pg_strtok(&length); /* skip :limitOffset */
+ token = pg_strtok(&length); /* skip :limitOffset */
local_node->limitOffset = nodeRead(true);
- token = pg_strtok(&length); /* skip :limitCount */
+ token = pg_strtok(&length); /* skip :limitCount */
local_node->limitCount = nodeRead(true);
- token = pg_strtok(&length); /* skip :setOperations */
+ token = pg_strtok(&length); /* skip :setOperations */
local_node->setOperations = nodeRead(true);
- token = pg_strtok(&length); /* skip :resultRelations */
+ token = pg_strtok(&length); /* skip :resultRelations */
local_node->resultRelations = toIntList(nodeRead(true));
return local_node;
@@ -220,12 +221,12 @@ _readSortClause(void)
local_node = makeNode(SortClause);
- token = pg_strtok(&length); /* skip :tleSortGroupRef */
- token = pg_strtok(&length); /* get tleSortGroupRef */
+ token = pg_strtok(&length); /* skip :tleSortGroupRef */
+ token = pg_strtok(&length); /* get tleSortGroupRef */
local_node->tleSortGroupRef = atoui(token);
- token = pg_strtok(&length); /* skip :sortop */
- token = pg_strtok(&length); /* get sortop */
+ token = pg_strtok(&length); /* skip :sortop */
+ token = pg_strtok(&length); /* get sortop */
local_node->sortop = atooid(token);
return local_node;
@@ -244,12 +245,12 @@ _readGroupClause(void)
local_node = makeNode(GroupClause);
- token = pg_strtok(&length); /* skip :tleSortGroupRef */
- token = pg_strtok(&length); /* get tleSortGroupRef */
+ token = pg_strtok(&length); /* skip :tleSortGroupRef */
+ token = pg_strtok(&length); /* get tleSortGroupRef */
local_node->tleSortGroupRef = atoui(token);
- token = pg_strtok(&length); /* skip :sortop */
- token = pg_strtok(&length); /* get sortop */
+ token = pg_strtok(&length); /* skip :sortop */
+ token = pg_strtok(&length); /* get sortop */
local_node->sortop = atooid(token);
return local_node;
@@ -268,21 +269,21 @@ _readSetOperationStmt(void)
local_node = makeNode(SetOperationStmt);
- token = pg_strtok(&length); /* eat :op */
- token = pg_strtok(&length); /* get op */
+ token = pg_strtok(&length); /* eat :op */
+ token = pg_strtok(&length); /* get op */
local_node->op = (SetOperation) atoi(token);
- token = pg_strtok(&length); /* eat :all */
- token = pg_strtok(&length); /* get all */
+ token = pg_strtok(&length); /* eat :all */
+ token = pg_strtok(&length); /* get all */
local_node->all = strtobool(token);
- token = pg_strtok(&length); /* eat :larg */
+ token = pg_strtok(&length); /* eat :larg */
local_node->larg = nodeRead(true); /* get larg */
- token = pg_strtok(&length); /* eat :rarg */
+ token = pg_strtok(&length); /* eat :rarg */
local_node->rarg = nodeRead(true); /* get rarg */
- token = pg_strtok(&length); /* eat :colTypes */
+ token = pg_strtok(&length); /* eat :colTypes */
local_node->colTypes = toOidList(nodeRead(true));
return local_node;
@@ -298,32 +299,32 @@ _getPlan(Plan *node)
char *token;
int length;
- token = pg_strtok(&length); /* first token is :startup_cost */
- token = pg_strtok(&length); /* next is the actual cost */
+ token = pg_strtok(&length); /* first token is :startup_cost */
+ token = pg_strtok(&length); /* next is the actual cost */
node->startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* skip the :total_cost */
- token = pg_strtok(&length); /* next is the actual cost */
+ token = pg_strtok(&length); /* skip the :total_cost */
+ token = pg_strtok(&length); /* next is the actual cost */
node->total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* skip the :rows */
- token = pg_strtok(&length); /* get the plan_rows */
+ token = pg_strtok(&length); /* skip the :rows */
+ token = pg_strtok(&length); /* get the plan_rows */
node->plan_rows = atof(token);
- token = pg_strtok(&length); /* skip the :width */
- token = pg_strtok(&length); /* get the plan_width */
+ token = pg_strtok(&length); /* skip the :width */
+ token = pg_strtok(&length); /* get the plan_width */
node->plan_width = atoi(token);
- token = pg_strtok(&length); /* eat :qptargetlist */
+ token = pg_strtok(&length); /* eat :qptargetlist */
node->targetlist = nodeRead(true);
- token = pg_strtok(&length); /* eat :qpqual */
+ token = pg_strtok(&length); /* eat :qpqual */
node->qual = nodeRead(true);
- token = pg_strtok(&length); /* eat :lefttree */
+ token = pg_strtok(&length); /* eat :lefttree */
node->lefttree = (Plan *) nodeRead(true);
- token = pg_strtok(&length); /* eat :righttree */
+ token = pg_strtok(&length); /* eat :righttree */
node->righttree = (Plan *) nodeRead(true);
node->state = (EState *) NULL; /* never read in */
@@ -366,7 +367,7 @@ _readResult(void)
_getPlan((Plan *) local_node);
- token = pg_strtok(&length); /* eat :resconstantqual */
+ token = pg_strtok(&length); /* eat :resconstantqual */
local_node->resconstantqual = nodeRead(true); /* now read it */
return local_node;
@@ -390,11 +391,11 @@ _readAppend(void)
_getPlan((Plan *) local_node);
- token = pg_strtok(&length); /* eat :appendplans */
+ token = pg_strtok(&length); /* eat :appendplans */
local_node->appendplans = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :isTarget */
- token = pg_strtok(&length); /* get isTarget */
+ token = pg_strtok(&length); /* eat :isTarget */
+ token = pg_strtok(&length); /* get isTarget */
local_node->isTarget = strtobool(token);
return local_node;
@@ -412,11 +413,11 @@ _getJoin(Join *node)
_getPlan((Plan *) node);
- token = pg_strtok(&length); /* skip the :jointype */
- token = pg_strtok(&length); /* get the jointype */
+ token = pg_strtok(&length); /* skip the :jointype */
+ token = pg_strtok(&length); /* get the jointype */
node->jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* skip the :joinqual */
+ token = pg_strtok(&length); /* skip the :joinqual */
node->joinqual = nodeRead(true); /* get the joinqual */
}
@@ -475,7 +476,7 @@ _readMergeJoin(void)
_getJoin((Join *) local_node);
- token = pg_strtok(&length); /* eat :mergeclauses */
+ token = pg_strtok(&length); /* eat :mergeclauses */
local_node->mergeclauses = nodeRead(true); /* now read it */
return local_node;
@@ -498,11 +499,11 @@ _readHashJoin(void)
_getJoin((Join *) local_node);
- token = pg_strtok(&length); /* eat :hashclauses */
+ token = pg_strtok(&length); /* eat :hashclauses */
local_node->hashclauses = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :hashjoinop */
- token = pg_strtok(&length); /* get hashjoinop */
+ token = pg_strtok(&length); /* eat :hashjoinop */
+ token = pg_strtok(&length); /* get hashjoinop */
local_node->hashjoinop = atooid(token);
return local_node;
@@ -524,8 +525,8 @@ _getScan(Scan *node)
_getPlan((Plan *) node);
- token = pg_strtok(&length); /* eat :scanrelid */
- token = pg_strtok(&length); /* get scanrelid */
+ token = pg_strtok(&length); /* eat :scanrelid */
+ token = pg_strtok(&length); /* get scanrelid */
node->scanrelid = atoui(token);
}
@@ -582,17 +583,17 @@ _readIndexScan(void)
_getScan((Scan *) local_node);
- token = pg_strtok(&length); /* eat :indxid */
+ token = pg_strtok(&length); /* eat :indxid */
local_node->indxid = toOidList(nodeRead(true)); /* now read it */
- token = pg_strtok(&length); /* eat :indxqual */
+ token = pg_strtok(&length); /* eat :indxqual */
local_node->indxqual = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :indxqualorig */
+ token = pg_strtok(&length); /* eat :indxqualorig */
local_node->indxqualorig = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :indxorderdir */
- token = pg_strtok(&length); /* get indxorderdir */
+ token = pg_strtok(&length); /* eat :indxorderdir */
+ token = pg_strtok(&length); /* get indxorderdir */
local_node->indxorderdir = atoi(token);
return local_node;
@@ -615,11 +616,11 @@ _readTidScan(void)
_getScan((Scan *) local_node);
- token = pg_strtok(&length); /* eat :needrescan */
- token = pg_strtok(&length); /* get needrescan */
+ token = pg_strtok(&length); /* eat :needrescan */
+ token = pg_strtok(&length); /* get needrescan */
local_node->needRescan = atoi(token);
- token = pg_strtok(&length); /* eat :tideval */
+ token = pg_strtok(&length); /* eat :tideval */
local_node->tideval = nodeRead(true); /* now read it */
return local_node;
@@ -634,7 +635,7 @@ _readTidScan(void)
static SubqueryScan *
_readSubqueryScan(void)
{
- SubqueryScan *local_node;
+ SubqueryScan *local_node;
char *token;
int length;
@@ -642,8 +643,8 @@ _readSubqueryScan(void)
_getScan((Scan *) local_node);
- token = pg_strtok(&length); /* eat :subplan */
- local_node->subplan = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :subplan */
+ local_node->subplan = nodeRead(true); /* now read it */
return local_node;
}
@@ -665,8 +666,8 @@ _readSort(void)
_getPlan((Plan *) local_node);
- token = pg_strtok(&length); /* eat :keycount */
- token = pg_strtok(&length); /* get keycount */
+ token = pg_strtok(&length); /* eat :keycount */
+ token = pg_strtok(&length); /* get keycount */
local_node->keycount = atoi(token);
return local_node;
@@ -700,7 +701,7 @@ _readHash(void)
_getPlan((Plan *) local_node);
- token = pg_strtok(&length); /* eat :hashkey */
+ token = pg_strtok(&length); /* eat :hashkey */
local_node->hashkey = nodeRead(true);
return local_node;
@@ -725,36 +726,36 @@ _readResdom(void)
local_node = makeNode(Resdom);
- token = pg_strtok(&length); /* eat :resno */
- token = pg_strtok(&length); /* get resno */
+ token = pg_strtok(&length); /* eat :resno */
+ token = pg_strtok(&length); /* get resno */
local_node->resno = atoi(token);
- token = pg_strtok(&length); /* eat :restype */
- token = pg_strtok(&length); /* get restype */
+ token = pg_strtok(&length); /* eat :restype */
+ token = pg_strtok(&length); /* get restype */
local_node->restype = atooid(token);
- token = pg_strtok(&length); /* eat :restypmod */
- token = pg_strtok(&length); /* get restypmod */
+ token = pg_strtok(&length); /* eat :restypmod */
+ token = pg_strtok(&length); /* get restypmod */
local_node->restypmod = atoi(token);
- token = pg_strtok(&length); /* eat :resname */
- token = pg_strtok(&length); /* get the name */
+ token = pg_strtok(&length); /* eat :resname */
+ token = pg_strtok(&length); /* get the name */
local_node->resname = nullable_string(token, length);
- token = pg_strtok(&length); /* eat :reskey */
- token = pg_strtok(&length); /* get reskey */
+ token = pg_strtok(&length); /* eat :reskey */
+ token = pg_strtok(&length); /* get reskey */
local_node->reskey = atoui(token);
- token = pg_strtok(&length); /* eat :reskeyop */
- token = pg_strtok(&length); /* get reskeyop */
+ token = pg_strtok(&length); /* eat :reskeyop */
+ token = pg_strtok(&length); /* get reskeyop */
local_node->reskeyop = atooid(token);
- token = pg_strtok(&length); /* eat :ressortgroupref */
- token = pg_strtok(&length); /* get ressortgroupref */
+ token = pg_strtok(&length); /* eat :ressortgroupref */
+ token = pg_strtok(&length); /* get ressortgroupref */
local_node->ressortgroupref = atoui(token);
- token = pg_strtok(&length); /* eat :resjunk */
- token = pg_strtok(&length); /* get resjunk */
+ token = pg_strtok(&length); /* eat :resjunk */
+ token = pg_strtok(&length); /* get resjunk */
local_node->resjunk = strtobool(token);
return local_node;
@@ -775,12 +776,12 @@ _readExpr(void)
local_node = makeNode(Expr);
- token = pg_strtok(&length); /* eat :typeOid */
- token = pg_strtok(&length); /* get typeOid */
+ token = pg_strtok(&length); /* eat :typeOid */
+ token = pg_strtok(&length); /* get typeOid */
local_node->typeOid = atooid(token);
- token = pg_strtok(&length); /* eat :opType */
- token = pg_strtok(&length); /* get opType */
+ token = pg_strtok(&length); /* eat :opType */
+ token = pg_strtok(&length); /* get opType */
if (strncmp(token, "op", 2) == 0)
local_node->opType = OP_EXPR;
else if (strncmp(token, "func", 4) == 0)
@@ -796,10 +797,10 @@ _readExpr(void)
else
elog(ERROR, "_readExpr: unknown opType \"%.*s\"", length, token);
- token = pg_strtok(&length); /* eat :oper */
+ token = pg_strtok(&length); /* eat :oper */
local_node->oper = nodeRead(true);
- token = pg_strtok(&length); /* eat :args */
+ token = pg_strtok(&length); /* eat :args */
local_node->args = nodeRead(true); /* now read it */
return local_node;
@@ -820,17 +821,17 @@ _readCaseExpr(void)
local_node = makeNode(CaseExpr);
- token = pg_strtok(&length); /* eat :casetype */
- token = pg_strtok(&length); /* get casetype */
+ token = pg_strtok(&length); /* eat :casetype */
+ token = pg_strtok(&length); /* get casetype */
local_node->casetype = atooid(token);
- token = pg_strtok(&length); /* eat :arg */
+ token = pg_strtok(&length); /* eat :arg */
local_node->arg = nodeRead(true);
- token = pg_strtok(&length); /* eat :args */
+ token = pg_strtok(&length); /* eat :args */
local_node->args = nodeRead(true);
- token = pg_strtok(&length); /* eat :defresult */
+ token = pg_strtok(&length); /* eat :defresult */
local_node->defresult = nodeRead(true);
return local_node;
@@ -852,7 +853,7 @@ _readCaseWhen(void)
local_node = makeNode(CaseWhen);
local_node->expr = nodeRead(true);
- token = pg_strtok(&length); /* eat :then */
+ token = pg_strtok(&length); /* eat :then */
local_node->result = nodeRead(true);
return local_node;
@@ -873,32 +874,32 @@ _readVar(void)
local_node = makeNode(Var);
- token = pg_strtok(&length); /* eat :varno */
- token = pg_strtok(&length); /* get varno */
+ token = pg_strtok(&length); /* eat :varno */
+ token = pg_strtok(&length); /* get varno */
local_node->varno = atoui(token);
- token = pg_strtok(&length); /* eat :varattno */
- token = pg_strtok(&length); /* get varattno */
+ token = pg_strtok(&length); /* eat :varattno */
+ token = pg_strtok(&length); /* get varattno */
local_node->varattno = atoi(token);
- token = pg_strtok(&length); /* eat :vartype */
- token = pg_strtok(&length); /* get vartype */
+ token = pg_strtok(&length); /* eat :vartype */
+ token = pg_strtok(&length); /* get vartype */
local_node->vartype = atooid(token);
- token = pg_strtok(&length); /* eat :vartypmod */
- token = pg_strtok(&length); /* get vartypmod */
+ token = pg_strtok(&length); /* eat :vartypmod */
+ token = pg_strtok(&length); /* get vartypmod */
local_node->vartypmod = atoi(token);
- token = pg_strtok(&length); /* eat :varlevelsup */
- token = pg_strtok(&length); /* get varlevelsup */
+ token = pg_strtok(&length); /* eat :varlevelsup */
+ token = pg_strtok(&length); /* get varlevelsup */
local_node->varlevelsup = atoui(token);
- token = pg_strtok(&length); /* eat :varnoold */
- token = pg_strtok(&length); /* get varnoold */
+ token = pg_strtok(&length); /* eat :varnoold */
+ token = pg_strtok(&length); /* get varnoold */
local_node->varnoold = atoui(token);
- token = pg_strtok(&length); /* eat :varoattno */
- token = pg_strtok(&length); /* eat :varoattno */
+ token = pg_strtok(&length); /* eat :varoattno */
+ token = pg_strtok(&length); /* eat :varoattno */
local_node->varoattno = atoi(token);
return local_node;
@@ -919,32 +920,32 @@ _readArrayRef(void)
local_node = makeNode(ArrayRef);
- token = pg_strtok(&length); /* eat :refelemtype */
- token = pg_strtok(&length); /* get refelemtype */
+ token = pg_strtok(&length); /* eat :refelemtype */
+ token = pg_strtok(&length); /* get refelemtype */
local_node->refelemtype = atooid(token);
- token = pg_strtok(&length); /* eat :refattrlength */
- token = pg_strtok(&length); /* get refattrlength */
+ token = pg_strtok(&length); /* eat :refattrlength */
+ token = pg_strtok(&length); /* get refattrlength */
local_node->refattrlength = atoi(token);
- token = pg_strtok(&length); /* eat :refelemlength */
- token = pg_strtok(&length); /* get refelemlength */
+ token = pg_strtok(&length); /* eat :refelemlength */
+ token = pg_strtok(&length); /* get refelemlength */
local_node->refelemlength = atoi(token);
- token = pg_strtok(&length); /* eat :refelembyval */
- token = pg_strtok(&length); /* get refelembyval */
+ token = pg_strtok(&length); /* eat :refelembyval */
+ token = pg_strtok(&length); /* get refelembyval */
local_node->refelembyval = strtobool(token);
- token = pg_strtok(&length); /* eat :refupperindex */
+ token = pg_strtok(&length); /* eat :refupperindex */
local_node->refupperindexpr = nodeRead(true);
- token = pg_strtok(&length); /* eat :reflowerindex */
+ token = pg_strtok(&length); /* eat :reflowerindex */
local_node->reflowerindexpr = nodeRead(true);
- token = pg_strtok(&length); /* eat :refexpr */
+ token = pg_strtok(&length); /* eat :refexpr */
local_node->refexpr = nodeRead(true);
- token = pg_strtok(&length); /* eat :refassgnexpr */
+ token = pg_strtok(&length); /* eat :refassgnexpr */
local_node->refassgnexpr = nodeRead(true);
return local_node;
@@ -965,32 +966,30 @@ _readConst(void)
local_node = makeNode(Const);
- token = pg_strtok(&length); /* get :consttype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :consttype */
+ token = pg_strtok(&length); /* now read it */
local_node->consttype = atooid(token);
- token = pg_strtok(&length); /* get :constlen */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :constlen */
+ token = pg_strtok(&length); /* now read it */
local_node->constlen = atoi(token);
- token = pg_strtok(&length); /* get :constbyval */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :constbyval */
+ token = pg_strtok(&length); /* now read it */
local_node->constbyval = strtobool(token);
- token = pg_strtok(&length); /* get :constisnull */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :constisnull */
+ token = pg_strtok(&length); /* now read it */
local_node->constisnull = strtobool(token);
- token = pg_strtok(&length); /* get :constvalue */
+ token = pg_strtok(&length); /* get :constvalue */
if (local_node->constisnull)
{
- token = pg_strtok(&length); /* skip "NIL" */
+ token = pg_strtok(&length); /* skip "NIL" */
}
else
- {
local_node->constvalue = readDatum(local_node->constbyval);
- }
return local_node;
}
@@ -1010,12 +1009,12 @@ _readFunc(void)
local_node = makeNode(Func);
- token = pg_strtok(&length); /* get :funcid */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :funcid */
+ token = pg_strtok(&length); /* now read it */
local_node->funcid = atooid(token);
- token = pg_strtok(&length); /* get :functype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :functype */
+ token = pg_strtok(&length); /* now read it */
local_node->functype = atooid(token);
local_node->func_fcache = NULL;
@@ -1038,16 +1037,16 @@ _readOper(void)
local_node = makeNode(Oper);
- token = pg_strtok(&length); /* get :opno */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :opno */
+ token = pg_strtok(&length); /* now read it */
local_node->opno = atooid(token);
- token = pg_strtok(&length); /* get :opid */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :opid */
+ token = pg_strtok(&length); /* now read it */
local_node->opid = atooid(token);
- token = pg_strtok(&length); /* get :opresulttype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :opresulttype */
+ token = pg_strtok(&length); /* now read it */
local_node->opresulttype = atooid(token);
local_node->op_fcache = NULL;
@@ -1070,20 +1069,20 @@ _readParam(void)
local_node = makeNode(Param);
- token = pg_strtok(&length); /* get :paramkind */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :paramkind */
+ token = pg_strtok(&length); /* now read it */
local_node->paramkind = atoi(token);
- token = pg_strtok(&length); /* get :paramid */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :paramid */
+ token = pg_strtok(&length); /* now read it */
local_node->paramid = atoi(token);
- token = pg_strtok(&length); /* get :paramname */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :paramname */
+ token = pg_strtok(&length); /* now read it */
local_node->paramname = nullable_string(token, length);
- token = pg_strtok(&length); /* get :paramtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :paramtype */
+ token = pg_strtok(&length); /* now read it */
local_node->paramtype = atooid(token);
return local_node;
@@ -1104,27 +1103,27 @@ _readAggref(void)
local_node = makeNode(Aggref);
- token = pg_strtok(&length); /* eat :aggname */
- token = pg_strtok(&length); /* get aggname */
+ token = pg_strtok(&length); /* eat :aggname */
+ token = pg_strtok(&length); /* get aggname */
local_node->aggname = debackslash(token, length);
- token = pg_strtok(&length); /* eat :basetype */
- token = pg_strtok(&length); /* get basetype */
+ token = pg_strtok(&length); /* eat :basetype */
+ token = pg_strtok(&length); /* get basetype */
local_node->basetype = atooid(token);
- token = pg_strtok(&length); /* eat :aggtype */
- token = pg_strtok(&length); /* get aggtype */
+ token = pg_strtok(&length); /* eat :aggtype */
+ token = pg_strtok(&length); /* get aggtype */
local_node->aggtype = atooid(token);
- token = pg_strtok(&length); /* eat :target */
+ token = pg_strtok(&length); /* eat :target */
local_node->target = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :aggstar */
- token = pg_strtok(&length); /* get aggstar */
+ token = pg_strtok(&length); /* eat :aggstar */
+ token = pg_strtok(&length); /* get aggstar */
local_node->aggstar = strtobool(token);
- token = pg_strtok(&length); /* eat :aggdistinct */
- token = pg_strtok(&length); /* get aggdistinct */
+ token = pg_strtok(&length); /* eat :aggdistinct */
+ token = pg_strtok(&length); /* get aggdistinct */
local_node->aggdistinct = strtobool(token);
return local_node;
@@ -1145,21 +1144,21 @@ _readSubLink(void)
local_node = makeNode(SubLink);
- token = pg_strtok(&length); /* eat :subLinkType */
- token = pg_strtok(&length); /* get subLinkType */
+ token = pg_strtok(&length); /* eat :subLinkType */
+ token = pg_strtok(&length); /* get subLinkType */
local_node->subLinkType = atoi(token);
- token = pg_strtok(&length); /* eat :useor */
- token = pg_strtok(&length); /* get useor */
+ token = pg_strtok(&length); /* eat :useor */
+ token = pg_strtok(&length); /* get useor */
local_node->useor = strtobool(token);
- token = pg_strtok(&length); /* eat :lefthand */
+ token = pg_strtok(&length); /* eat :lefthand */
local_node->lefthand = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :oper */
+ token = pg_strtok(&length); /* eat :oper */
local_node->oper = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :subselect */
+ token = pg_strtok(&length); /* eat :subselect */
local_node->subselect = nodeRead(true); /* now read it */
return local_node;
@@ -1180,19 +1179,19 @@ _readFieldSelect(void)
local_node = makeNode(FieldSelect);
- token = pg_strtok(&length); /* eat :arg */
+ token = pg_strtok(&length); /* eat :arg */
local_node->arg = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :fieldnum */
- token = pg_strtok(&length); /* get fieldnum */
+ token = pg_strtok(&length); /* eat :fieldnum */
+ token = pg_strtok(&length); /* get fieldnum */
local_node->fieldnum = (AttrNumber) atoi(token);
- token = pg_strtok(&length); /* eat :resulttype */
- token = pg_strtok(&length); /* get resulttype */
+ token = pg_strtok(&length); /* eat :resulttype */
+ token = pg_strtok(&length); /* get resulttype */
local_node->resulttype = atooid(token);
- token = pg_strtok(&length); /* eat :resulttypmod */
- token = pg_strtok(&length); /* get resulttypmod */
+ token = pg_strtok(&length); /* eat :resulttypmod */
+ token = pg_strtok(&length); /* get resulttypmod */
local_node->resulttypmod = atoi(token);
return local_node;
@@ -1213,15 +1212,15 @@ _readRelabelType(void)
local_node = makeNode(RelabelType);
- token = pg_strtok(&length); /* eat :arg */
+ token = pg_strtok(&length); /* eat :arg */
local_node->arg = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :resulttype */
- token = pg_strtok(&length); /* get resulttype */
+ token = pg_strtok(&length); /* eat :resulttype */
+ token = pg_strtok(&length); /* get resulttype */
local_node->resulttype = atooid(token);
- token = pg_strtok(&length); /* eat :resulttypmod */
- token = pg_strtok(&length); /* get resulttypmod */
+ token = pg_strtok(&length); /* eat :resulttypmod */
+ token = pg_strtok(&length); /* get resulttypmod */
local_node->resulttypmod = atoi(token);
return local_node;
@@ -1242,7 +1241,7 @@ _readRangeTblRef(void)
local_node = makeNode(RangeTblRef);
- token = pg_strtok(&length); /* get rtindex */
+ token = pg_strtok(&length); /* get rtindex */
local_node->rtindex = atoi(token);
return local_node;
@@ -1263,11 +1262,11 @@ _readFromExpr(void)
local_node = makeNode(FromExpr);
- token = pg_strtok(&length); /* eat :fromlist */
- local_node->fromlist = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :fromlist */
+ local_node->fromlist = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :quals */
- local_node->quals = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :quals */
+ local_node->quals = nodeRead(true); /* now read it */
return local_node;
}
@@ -1287,34 +1286,34 @@ _readJoinExpr(void)
local_node = makeNode(JoinExpr);
- token = pg_strtok(&length); /* eat :jointype */
- token = pg_strtok(&length); /* get jointype */
+ token = pg_strtok(&length); /* eat :jointype */
+ token = pg_strtok(&length); /* get jointype */
local_node->jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* eat :isNatural */
- token = pg_strtok(&length); /* get :isNatural */
+ token = pg_strtok(&length); /* eat :isNatural */
+ token = pg_strtok(&length); /* get :isNatural */
local_node->isNatural = strtobool(token);
- token = pg_strtok(&length); /* eat :larg */
+ token = pg_strtok(&length); /* eat :larg */
local_node->larg = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :rarg */
+ token = pg_strtok(&length); /* eat :rarg */
local_node->rarg = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :using */
- local_node->using = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :using */
+ local_node->using = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :quals */
- local_node->quals = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :quals */
+ local_node->quals = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :alias */
- local_node->alias = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :alias */
+ local_node->alias = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :colnames */
- local_node->colnames = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :colnames */
+ local_node->colnames = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :colvars */
- local_node->colvars = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :colvars */
+ local_node->colvars = nodeRead(true); /* now read it */
return local_node;
}
@@ -1336,66 +1335,66 @@ _readRelOptInfo(void)
local_node = makeNode(RelOptInfo);
- token = pg_strtok(&length); /* get :relids */
+ token = pg_strtok(&length); /* get :relids */
local_node->relids = toIntList(nodeRead(true)); /* now read it */
- token = pg_strtok(&length); /* get :rows */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :rows */
+ token = pg_strtok(&length); /* now read it */
local_node->rows = atof(token);
- token = pg_strtok(&length); /* get :width */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :width */
+ token = pg_strtok(&length); /* now read it */
local_node->width = atoi(token);
- token = pg_strtok(&length); /* get :targetlist */
+ token = pg_strtok(&length); /* get :targetlist */
local_node->targetlist = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :pathlist */
+ token = pg_strtok(&length); /* get :pathlist */
local_node->pathlist = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :cheapest_startup_path */
+ token = pg_strtok(&length); /* get :cheapest_startup_path */
local_node->cheapest_startup_path = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :cheapest_total_path */
+ token = pg_strtok(&length); /* get :cheapest_total_path */
local_node->cheapest_total_path = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :pruneable */
- token = pg_strtok(&length); /* get :pruneable */
+ token = pg_strtok(&length); /* eat :pruneable */
+ token = pg_strtok(&length); /* get :pruneable */
local_node->pruneable = strtobool(token);
- token = pg_strtok(&length); /* get :issubquery */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :issubquery */
+ token = pg_strtok(&length); /* now read it */
local_node->issubquery = strtobool(token);
- token = pg_strtok(&length); /* get :indexed */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :indexed */
+ token = pg_strtok(&length); /* now read it */
local_node->indexed = strtobool(token);
- token = pg_strtok(&length); /* get :pages */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pages */
+ token = pg_strtok(&length); /* now read it */
local_node->pages = atol(token);
- token = pg_strtok(&length); /* get :tuples */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :tuples */
+ token = pg_strtok(&length); /* now read it */
local_node->tuples = atof(token);
- token = pg_strtok(&length); /* get :subplan */
- local_node->subplan = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :subplan */
+ local_node->subplan = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :baserestrictinfo */
- local_node->baserestrictinfo = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :baserestrictinfo */
+ local_node->baserestrictinfo = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :baserestrictcost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :baserestrictcost */
+ token = pg_strtok(&length); /* now read it */
local_node->baserestrictcost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :outerjoinset */
- local_node->outerjoinset = toIntList(nodeRead(true)); /* now read it */
+ token = pg_strtok(&length); /* get :outerjoinset */
+ local_node->outerjoinset = toIntList(nodeRead(true)); /* now read it */
- token = pg_strtok(&length); /* get :joininfo */
+ token = pg_strtok(&length); /* get :joininfo */
local_node->joininfo = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innerjoin */
+ token = pg_strtok(&length); /* get :innerjoin */
local_node->innerjoin = nodeRead(true); /* now read it */
return local_node;
@@ -1414,10 +1413,10 @@ _readTargetEntry(void)
local_node = makeNode(TargetEntry);
- token = pg_strtok(&length); /* get :resdom */
+ token = pg_strtok(&length); /* get :resdom */
local_node->resdom = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :expr */
+ token = pg_strtok(&length); /* get :expr */
local_node->expr = nodeRead(true); /* now read it */
return local_node;
@@ -1432,11 +1431,11 @@ _readAttr(void)
local_node = makeNode(Attr);
- token = pg_strtok(&length); /* eat :relname */
- token = pg_strtok(&length); /* get relname */
+ token = pg_strtok(&length); /* eat :relname */
+ token = pg_strtok(&length); /* get relname */
local_node->relname = debackslash(token, length);
- token = pg_strtok(&length); /* eat :attrs */
+ token = pg_strtok(&length); /* eat :attrs */
local_node->attrs = nodeRead(true); /* now read it */
return local_node;
@@ -1455,41 +1454,41 @@ _readRangeTblEntry(void)
local_node = makeNode(RangeTblEntry);
- token = pg_strtok(&length); /* eat :relname */
- token = pg_strtok(&length); /* get :relname */
+ token = pg_strtok(&length); /* eat :relname */
+ token = pg_strtok(&length); /* get :relname */
local_node->relname = nullable_string(token, length);
- token = pg_strtok(&length); /* eat :relid */
- token = pg_strtok(&length); /* get :relid */
+ token = pg_strtok(&length); /* eat :relid */
+ token = pg_strtok(&length); /* get :relid */
local_node->relid = atooid(token);
- token = pg_strtok(&length); /* eat :subquery */
- local_node->subquery = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :subquery */
+ local_node->subquery = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :alias */
- local_node->alias = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :alias */
+ local_node->alias = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :eref */
+ token = pg_strtok(&length); /* eat :eref */
local_node->eref = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :inh */
- token = pg_strtok(&length); /* get :inh */
+ token = pg_strtok(&length); /* eat :inh */
+ token = pg_strtok(&length); /* get :inh */
local_node->inh = strtobool(token);
- token = pg_strtok(&length); /* eat :inFromCl */
- token = pg_strtok(&length); /* get :inFromCl */
+ token = pg_strtok(&length); /* eat :inFromCl */
+ token = pg_strtok(&length); /* get :inFromCl */
local_node->inFromCl = strtobool(token);
- token = pg_strtok(&length); /* eat :checkForRead */
- token = pg_strtok(&length); /* get :checkForRead */
+ token = pg_strtok(&length); /* eat :checkForRead */
+ token = pg_strtok(&length); /* get :checkForRead */
local_node->checkForRead = strtobool(token);
- token = pg_strtok(&length); /* eat :checkForWrite */
- token = pg_strtok(&length); /* get :checkForWrite */
+ token = pg_strtok(&length); /* eat :checkForWrite */
+ token = pg_strtok(&length); /* get :checkForWrite */
local_node->checkForWrite = strtobool(token);
- token = pg_strtok(&length); /* eat :checkAsUser */
- token = pg_strtok(&length); /* get :checkAsUser */
+ token = pg_strtok(&length); /* eat :checkAsUser */
+ token = pg_strtok(&length); /* get :checkAsUser */
local_node->checkAsUser = atooid(token);
return local_node;
@@ -1510,19 +1509,19 @@ _readPath(void)
local_node = makeNode(Path);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->pathkeys = nodeRead(true); /* now read it */
return local_node;
@@ -1543,40 +1542,40 @@ _readIndexPath(void)
local_node = makeNode(IndexPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :indexid */
+ token = pg_strtok(&length); /* get :indexid */
local_node->indexid = toOidList(nodeRead(true));
- token = pg_strtok(&length); /* get :indexqual */
+ token = pg_strtok(&length); /* get :indexqual */
local_node->indexqual = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :indexscandir */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :indexscandir */
+ token = pg_strtok(&length); /* now read it */
local_node->indexscandir = (ScanDirection) atoi(token);
- token = pg_strtok(&length); /* get :joinrelids */
+ token = pg_strtok(&length); /* get :joinrelids */
local_node->joinrelids = toIntList(nodeRead(true));
- token = pg_strtok(&length); /* get :alljoinquals */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :alljoinquals */
+ token = pg_strtok(&length); /* now read it */
local_node->alljoinquals = strtobool(token);
- token = pg_strtok(&length); /* get :rows */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :rows */
+ token = pg_strtok(&length); /* now read it */
local_node->rows = atof(token);
return local_node;
@@ -1597,25 +1596,25 @@ _readTidPath(void)
local_node = makeNode(TidPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :tideval */
+ token = pg_strtok(&length); /* get :tideval */
local_node->tideval = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :unjoined_relids */
+ token = pg_strtok(&length); /* get :unjoined_relids */
local_node->unjoined_relids = toIntList(nodeRead(true));
return local_node;
@@ -1636,22 +1635,22 @@ _readAppendPath(void)
local_node = makeNode(AppendPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :subpaths */
+ token = pg_strtok(&length); /* get :subpaths */
local_node->subpaths = nodeRead(true); /* now read it */
return local_node;
@@ -1672,33 +1671,33 @@ _readNestPath(void)
local_node = makeNode(NestPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :jointype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :jointype */
+ token = pg_strtok(&length); /* now read it */
local_node->jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* get :outerjoinpath */
+ token = pg_strtok(&length); /* get :outerjoinpath */
local_node->outerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innerjoinpath */
+ token = pg_strtok(&length); /* get :innerjoinpath */
local_node->innerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :joinrestrictinfo */
- local_node->joinrestrictinfo = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :joinrestrictinfo */
+ local_node->joinrestrictinfo = nodeRead(true); /* now read it */
return local_node;
}
@@ -1718,41 +1717,41 @@ _readMergePath(void)
local_node = makeNode(MergePath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->jpath.path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :jointype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :jointype */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* get :outerjoinpath */
+ token = pg_strtok(&length); /* get :outerjoinpath */
local_node->jpath.outerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innerjoinpath */
+ token = pg_strtok(&length); /* get :innerjoinpath */
local_node->jpath.innerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :joinrestrictinfo */
- local_node->jpath.joinrestrictinfo = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :joinrestrictinfo */
+ local_node->jpath.joinrestrictinfo = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :path_mergeclauses */
+ token = pg_strtok(&length); /* get :path_mergeclauses */
local_node->path_mergeclauses = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :outersortkeys */
+ token = pg_strtok(&length); /* get :outersortkeys */
local_node->outersortkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innersortkeys */
+ token = pg_strtok(&length); /* get :innersortkeys */
local_node->innersortkeys = nodeRead(true); /* now read it */
return local_node;
@@ -1773,35 +1772,35 @@ _readHashPath(void)
local_node = makeNode(HashPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->jpath.path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :jointype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :jointype */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* get :outerjoinpath */
+ token = pg_strtok(&length); /* get :outerjoinpath */
local_node->jpath.outerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innerjoinpath */
+ token = pg_strtok(&length); /* get :innerjoinpath */
local_node->jpath.innerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :joinrestrictinfo */
- local_node->jpath.joinrestrictinfo = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :joinrestrictinfo */
+ local_node->jpath.joinrestrictinfo = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :path_hashclauses */
+ token = pg_strtok(&length); /* get :path_hashclauses */
local_node->path_hashclauses = nodeRead(true); /* now read it */
return local_node;
@@ -1822,11 +1821,11 @@ _readPathKeyItem(void)
local_node = makeNode(PathKeyItem);
- token = pg_strtok(&length); /* get :sortop */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :sortop */
+ token = pg_strtok(&length); /* now read it */
local_node->sortop = atooid(token);
- token = pg_strtok(&length); /* get :key */
+ token = pg_strtok(&length); /* get :key */
local_node->key = nodeRead(true); /* now read it */
return local_node;
@@ -1847,30 +1846,30 @@ _readRestrictInfo(void)
local_node = makeNode(RestrictInfo);
- token = pg_strtok(&length); /* get :clause */
+ token = pg_strtok(&length); /* get :clause */
local_node->clause = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :ispusheddown */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :ispusheddown */
+ token = pg_strtok(&length); /* now read it */
local_node->ispusheddown = strtobool(token);
- token = pg_strtok(&length); /* get :subclauseindices */
+ token = pg_strtok(&length); /* get :subclauseindices */
local_node->subclauseindices = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :mergejoinoperator */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :mergejoinoperator */
+ token = pg_strtok(&length); /* now read it */
local_node->mergejoinoperator = atooid(token);
- token = pg_strtok(&length); /* get :left_sortop */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :left_sortop */
+ token = pg_strtok(&length); /* now read it */
local_node->left_sortop = atooid(token);
- token = pg_strtok(&length); /* get :right_sortop */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :right_sortop */
+ token = pg_strtok(&length); /* now read it */
local_node->right_sortop = atooid(token);
- token = pg_strtok(&length); /* get :hashjoinoperator */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :hashjoinoperator */
+ token = pg_strtok(&length); /* now read it */
local_node->hashjoinoperator = atooid(token);
/* eval_cost is not part of saved representation; compute on first use */
@@ -1899,10 +1898,10 @@ _readJoinInfo(void)
local_node = makeNode(JoinInfo);
- token = pg_strtok(&length); /* get :unjoined_relids */
+ token = pg_strtok(&length); /* get :unjoined_relids */
local_node->unjoined_relids = toIntList(nodeRead(true)); /* now read it */
- token = pg_strtok(&length); /* get :jinfo_restrictinfo */
+ token = pg_strtok(&length); /* get :jinfo_restrictinfo */
local_node->jinfo_restrictinfo = nodeRead(true); /* now read it */
return local_node;
@@ -1922,7 +1921,7 @@ _readIter(void)
local_node = makeNode(Iter);
- token = pg_strtok(&length); /* eat :iterexpr */
+ token = pg_strtok(&length); /* eat :iterexpr */
local_node->iterexpr = nodeRead(true); /* now read it */
return local_node;
@@ -2081,7 +2080,7 @@ readDatum(bool typbyval)
token = pg_strtok(&tokenLength);
length = atoui(token);
- token = pg_strtok(&tokenLength); /* skip the '[' */
+ token = pg_strtok(&tokenLength); /* skip the '[' */
if (typbyval)
{
@@ -2109,7 +2108,7 @@ readDatum(bool typbyval)
res = PointerGetDatum(s);
}
- token = pg_strtok(&tokenLength); /* skip the ']' */
+ token = pg_strtok(&tokenLength); /* skip the ']' */
if (token == NULL || token[0] != ']')
elog(ERROR, "readDatum: ']' expected, length = %lu",
(unsigned long) length);
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index 4422e03cb40..dd3d6bd5372 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: geqo_eval.c,v 1.57 2001/01/24 19:42:57 momjian Exp $
+ * $Id: geqo_eval.c,v 1.58 2001/03/22 03:59:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,8 +49,8 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
* allocated inside gimme_tree().
*
* Since geqo_eval() will be called many times, we can't afford to let
- * all that memory go unreclaimed until end of statement. Note we make
- * the temp context a child of TransactionCommandContext, so that
+ * all that memory go unreclaimed until end of statement. Note we
+ * make the temp context a child of TransactionCommandContext, so that
* it will be freed even if we abort via elog(ERROR).
*/
mycontext = AllocSetContextCreate(TransactionCommandContext,
diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c
index 2300f8677c1..86b5b334097 100644
--- a/src/backend/optimizer/geqo/geqo_main.c
+++ b/src/backend/optimizer/geqo/geqo_main.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: geqo_main.c,v 1.26 2001/01/24 19:42:57 momjian Exp $
+ * $Id: geqo_main.c,v 1.27 2001/03/22 03:59:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,11 +36,11 @@
/*
* Configuration options
*/
-int Geqo_pool_size;
-int Geqo_effort;
-int Geqo_generations;
+int Geqo_pool_size;
+int Geqo_effort;
+int Geqo_generations;
double Geqo_selection_bias;
-int Geqo_random_seed;
+int Geqo_random_seed;
static int gimme_pool_size(int nr_rel);
@@ -101,10 +101,10 @@ geqo(Query *root, int number_of_rels, List *initial_rels)
/* seed random number generator */
/* XXX why is this done every time around? */
- if (Geqo_random_seed >= 0)
- srandom((unsigned int) Geqo_random_seed);
- else
- srandom((unsigned int) time(NULL));
+ if (Geqo_random_seed >= 0)
+ srandom((unsigned int) Geqo_random_seed);
+ else
+ srandom((unsigned int) time(NULL));
/* allocate genetic pool memory */
pool = alloc_pool(pool_size, number_of_rels);
@@ -160,8 +160,8 @@ geqo(Query *root, int number_of_rels, List *initial_rels)
{
/* SELECTION */
- geqo_selection(momma, daddy, pool, Geqo_selection_bias);/* using linear bias
- * function */
+ geqo_selection(momma, daddy, pool, Geqo_selection_bias); /* using linear bias
+ * function */
@@ -293,15 +293,15 @@ gimme_pool_size(int nr_rel)
{
double size;
- if (Geqo_pool_size != 0)
- {
- if (Geqo_pool_size < MIN_GEQO_POOL_SIZE)
- return MIN_GEQO_POOL_SIZE;
- else if (Geqo_pool_size > MAX_GEQO_POOL_SIZE)
- return MAX_GEQO_POOL_SIZE;
- else
- return Geqo_pool_size;
- }
+ if (Geqo_pool_size != 0)
+ {
+ if (Geqo_pool_size < MIN_GEQO_POOL_SIZE)
+ return MIN_GEQO_POOL_SIZE;
+ else if (Geqo_pool_size > MAX_GEQO_POOL_SIZE)
+ return MAX_GEQO_POOL_SIZE;
+ else
+ return Geqo_pool_size;
+ }
size = pow(2.0, nr_rel + 1.0);
@@ -323,8 +323,8 @@ gimme_pool_size(int nr_rel)
static int
gimme_number_generations(int pool_size, int effort)
{
- if (Geqo_generations <= 0)
- return effort * (int) ceil(log((double) pool_size) / log(2.0));
- else
- return Geqo_generations;
+ if (Geqo_generations <= 0)
+ return effort * (int) ceil(log((double) pool_size) / log(2.0));
+ else
+ return Geqo_generations;
}
diff --git a/src/backend/optimizer/path/_deadcode/predmig.c b/src/backend/optimizer/path/_deadcode/predmig.c
index bb73132240a..1781f43db1d 100644
--- a/src/backend/optimizer/path/_deadcode/predmig.c
+++ b/src/backend/optimizer/path/_deadcode/predmig.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.8 2001/01/24 19:42:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.9 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -485,7 +485,7 @@ xfunc_form_groups(Query *queryInfo, Stream root, Stream bottom)
}
-/* ------------------- UTILITY FUNCTIONS ------------------------- */
+/* ------------------- UTILITY FUNCTIONS ------------------------- */
/*
** xfunc_free_stream
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 7d44c4dcfa0..1cf73dffff7 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.71 2001/02/03 21:17:52 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.72 2001/03/22 03:59:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,12 +33,12 @@ int geqo_rels = DEFAULT_GEQO_RELS;
static void set_base_rel_pathlists(Query *root);
static void set_plain_rel_pathlist(Query *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_inherited_rel_pathlist(Query *root, RelOptInfo *rel,
- RangeTblEntry *rte,
- List *inheritlist);
+ RangeTblEntry *rte,
+ List *inheritlist);
static RelOptInfo *make_one_rel_by_joins(Query *root, int levels_needed,
- List *initial_rels);
+ List *initial_rels);
#ifdef OPTIMIZER_DEBUG
static void debug_print_rel(Query *root, RelOptInfo *rel);
@@ -94,7 +94,7 @@ set_base_rel_pathlists(Query *root)
RangeTblEntry *rte;
List *inheritlist;
- Assert(length(rel->relids) == 1); /* better be base rel */
+ Assert(length(rel->relids) == 1); /* better be base rel */
rti = lfirsti(rel->relids);
rte = rt_fetch(rti, root->rtable);
@@ -103,24 +103,25 @@ set_base_rel_pathlists(Query *root)
/* Subquery --- generate a separate plan for it */
/*
- * If there are any restriction clauses that have been attached
- * to the subquery relation, consider pushing them down to become
- * HAVING quals of the subquery itself. (Not WHERE clauses, since
- * they may refer to subquery outputs that are aggregate results.
- * But planner.c will transfer them into the subquery's WHERE if
- * they do not.) This transformation is useful because it may
- * allow us to generate a better plan for the subquery than
- * evaluating all the subquery output rows and then filtering
- * them.
+ * If there are any restriction clauses that have been
+ * attached to the subquery relation, consider pushing them
+ * down to become HAVING quals of the subquery itself. (Not
+ * WHERE clauses, since they may refer to subquery outputs
+ * that are aggregate results. But planner.c will transfer
+ * them into the subquery's WHERE if they do not.) This
+ * transformation is useful because it may allow us to
+ * generate a better plan for the subquery than evaluating all
+ * the subquery output rows and then filtering them.
*
- * Currently, we do not push down clauses that contain subselects,
- * mainly because I'm not sure it will work correctly (the
- * subplan hasn't yet transformed sublinks to subselects).
- * Also, if the subquery contains set ops (UNION/INTERSECT/EXCEPT)
- * we do not push down any qual clauses, since the planner doesn't
- * support quals at the top level of a setop. (With suitable
- * analysis we could try to push the quals down into the component
- * queries of the setop, but getting it right is not trivial.)
+ * Currently, we do not push down clauses that contain
+ * subselects, mainly because I'm not sure it will work
+ * correctly (the subplan hasn't yet transformed sublinks to
+ * subselects). Also, if the subquery contains set ops
+ * (UNION/INTERSECT/EXCEPT) we do not push down any qual
+ * clauses, since the planner doesn't support quals at the top
+ * level of a setop. (With suitable analysis we could try to
+ * push the quals down into the component queries of the
+ * setop, but getting it right is not trivial.)
* Non-pushed-down clauses will get evaluated as qpquals of
* the SubqueryScan node.
*
@@ -136,8 +137,8 @@ set_base_rel_pathlists(Query *root)
foreach(lst, rel->baserestrictinfo)
{
- RestrictInfo *rinfo = (RestrictInfo *) lfirst(lst);
- Node *clause = (Node *) rinfo->clause;
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lst);
+ Node *clause = (Node *) rinfo->clause;
if (contain_subplans(clause))
{
@@ -146,13 +147,14 @@ set_base_rel_pathlists(Query *root)
}
else
{
+
/*
- * We need to replace Vars in the clause (which must
- * refer to outputs of the subquery) with copies of
- * the subquery's targetlist expressions. Note that
- * at this point, any uplevel Vars in the clause
- * should have been replaced with Params, so they
- * need no work.
+ * We need to replace Vars in the clause (which
+ * must refer to outputs of the subquery) with
+ * copies of the subquery's targetlist
+ * expressions. Note that at this point, any
+ * uplevel Vars in the clause should have been
+ * replaced with Params, so they need no work.
*/
clause = ResolveNew(clause, rti, 0,
rte->subquery->targetList,
@@ -160,11 +162,12 @@ set_base_rel_pathlists(Query *root)
rte->subquery->havingQual =
make_and_qual(rte->subquery->havingQual,
clause);
+
/*
* We need not change the subquery's hasAggs or
- * hasSublinks flags, since we can't be pushing down
- * any aggregates that weren't there before, and we
- * don't push down subselects at all.
+ * hasSublinks flags, since we can't be pushing
+ * down any aggregates that weren't there before,
+ * and we don't push down subselects at all.
*/
}
}
@@ -215,9 +218,9 @@ set_plain_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte)
/*
* Generate paths and add them to the rel's pathlist.
*
- * Note: add_path() will discard any paths that are dominated by
- * another available path, keeping only those paths that are
- * superior along at least one dimension of cost or sortedness.
+ * Note: add_path() will discard any paths that are dominated by another
+ * available path, keeping only those paths that are superior along at
+ * least one dimension of cost or sortedness.
*/
/* Consider sequential scan */
@@ -230,9 +233,9 @@ set_plain_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte)
create_index_paths(root, rel, indices);
/*
- * Note: create_or_index_paths depends on create_index_paths to
- * have marked OR restriction clauses with relevant indices; this
- * is why it doesn't need to be given the list of indices.
+ * Note: create_or_index_paths depends on create_index_paths to have
+ * marked OR restriction clauses with relevant indices; this is why it
+ * doesn't need to be given the list of indices.
*/
create_or_index_paths(root, rel, rel->baserestrictinfo);
@@ -258,8 +261,8 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
List *il;
/*
- * XXX for now, can't handle inherited expansion of FOR UPDATE;
- * can we do better?
+ * XXX for now, can't handle inherited expansion of FOR UPDATE; can we
+ * do better?
*/
if (intMember(parentRTindex, root->rowMarks))
elog(ERROR, "SELECT FOR UPDATE is not supported for inherit queries");
@@ -271,14 +274,14 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
rel->width = 0;
/*
- * Generate access paths for each table in the tree (parent AND children),
- * and pick the cheapest path for each table.
+ * Generate access paths for each table in the tree (parent AND
+ * children), and pick the cheapest path for each table.
*/
foreach(il, inheritlist)
{
- int childRTindex = lfirsti(il);
+ int childRTindex = lfirsti(il);
RangeTblEntry *childrte;
- Oid childOID;
+ Oid childOID;
RelOptInfo *childrel;
childrte = rt_fetch(childRTindex, root->rtable);
@@ -289,16 +292,18 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
* attach the RelOptInfo to the query's base_rel_list, however.
*
* NOTE: when childRTindex == parentRTindex, we create a second
- * RelOptInfo for the same relation. This RelOptInfo will represent
- * the parent table alone, whereas the original RelOptInfo represents
- * the union of the inheritance tree members.
+ * RelOptInfo for the same relation. This RelOptInfo will
+ * represent the parent table alone, whereas the original
+ * RelOptInfo represents the union of the inheritance tree
+ * members.
*/
childrel = make_base_rel(root, childRTindex);
/*
- * Copy the parent's targetlist and restriction quals to the child,
- * with attribute-number adjustment if needed. We don't bother
- * to copy the join quals, since we can't do any joining here.
+ * Copy the parent's targetlist and restriction quals to the
+ * child, with attribute-number adjustment if needed. We don't
+ * bother to copy the join quals, since we can't do any joining
+ * here.
*/
childrel->targetlist = (List *)
adjust_inherited_attrs((Node *) rel->targetlist,
@@ -328,8 +333,8 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
}
/*
- * Finally, build Append path and install it as the only access
- * path for the parent rel.
+ * Finally, build Append path and install it as the only access path
+ * for the parent rel.
*/
add_path(rel, (Path *) create_append_path(rel, subpaths));
@@ -350,9 +355,9 @@ make_fromexpr_rel(Query *root, FromExpr *from)
List *jt;
/*
- * Count the number of child jointree nodes. This is the depth
- * of the dynamic-programming algorithm we must employ to consider
- * all ways of joining the child nodes.
+ * Count the number of child jointree nodes. This is the depth of the
+ * dynamic-programming algorithm we must employ to consider all ways
+ * of joining the child nodes.
*/
levels_needed = length(from->fromlist);
@@ -374,6 +379,7 @@ make_fromexpr_rel(Query *root, FromExpr *from)
if (levels_needed == 1)
{
+
/*
* Single jointree node, so we're done.
*/
@@ -381,6 +387,7 @@ make_fromexpr_rel(Query *root, FromExpr *from)
}
else
{
+
/*
* Consider the different orders in which we could join the rels,
* using either GEQO or regular optimizer.
@@ -401,7 +408,7 @@ make_fromexpr_rel(Query *root, FromExpr *from)
* independent jointree items in the query. This is > 1.
*
* 'initial_rels' is a list of RelOptInfo nodes for each independent
- * jointree item. These are the components to be joined together.
+ * jointree item. These are the components to be joined together.
*
* Returns the final level of join relations, i.e., the relation that is
* the result of joining all the original relations together.
@@ -423,8 +430,8 @@ make_one_rel_by_joins(Query *root, int levels_needed, List *initial_rels)
* joinitems[j] is a list of all the j-item rels. Initially we set
* joinitems[1] to represent all the single-jointree-item relations.
*/
- joinitems = (List **) palloc((levels_needed+1) * sizeof(List *));
- MemSet(joinitems, 0, (levels_needed+1) * sizeof(List *));
+ joinitems = (List **) palloc((levels_needed + 1) * sizeof(List *));
+ MemSet(joinitems, 0, (levels_needed + 1) * sizeof(List *));
joinitems[1] = initial_rels;
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index b404eabccdb..8493067f9a2 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/clausesel.c,v 1.41 2001/01/24 19:42:57 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/clausesel.c,v 1.42 2001/03/22 03:59:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -128,7 +128,8 @@ clauselist_selectivity(Query *root,
* behave in the simple way we are expecting.)
*
* NB: for consistency of results, this fragment of code had better
- * match what clause_selectivity() would do in the cases it handles.
+ * match what clause_selectivity() would do in the cases it
+ * handles.
*/
if (varRelid != 0 || NumRelids(clause) == 1)
{
@@ -148,7 +149,7 @@ clauselist_selectivity(Query *root,
get_leftop((Expr *) clause);
if (is_pseudo_constant_clause((Node *) other))
{
- Oid opno = ((Oper *) ((Expr *) clause)->oper)->opno;
+ Oid opno = ((Oper *) ((Expr *) clause)->oper)->opno;
RegProcedure oprrest = get_oprrest(opno);
if (!oprrest)
@@ -156,15 +157,16 @@ clauselist_selectivity(Query *root,
else
s2 = restriction_selectivity(oprrest, opno,
getrelid(relidx,
- root->rtable),
+ root->rtable),
attno,
constval, flag);
/*
- * If we reach here, we have computed the same result that
- * clause_selectivity would, so we can just use s2 if it's
- * the wrong oprrest. But if it's the right oprrest, add
- * the clause to rqlist for later processing.
+ * If we reach here, we have computed the same result
+ * that clause_selectivity would, so we can just use
+ * s2 if it's the wrong oprrest. But if it's the
+ * right oprrest, add the clause to rqlist for later
+ * processing.
*/
switch (oprrest)
{
@@ -384,18 +386,20 @@ clause_selectivity(Query *root,
if (rte->subquery)
{
+
/*
- * XXX not smart about subquery references...
- * any way to do better?
+ * XXX not smart about subquery references... any way to
+ * do better?
*/
s1 = 0.5;
}
else
{
+
/*
- * A Var at the top of a clause must be a bool Var.
- * This is equivalent to the clause reln.attribute = 't',
- * so we compute the selectivity as if that is what we have.
+ * A Var at the top of a clause must be a bool Var. This
+ * is equivalent to the clause reln.attribute = 't', so we
+ * compute the selectivity as if that is what we have.
*/
s1 = restriction_selectivity(F_EQSEL,
BooleanEqualOperator,
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 8e88e46d571..d5b343a90cf 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -41,7 +41,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.68 2001/02/16 00:03:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.69 2001/03/22 03:59:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,11 +67,11 @@
#define LOG6(x) (log(x) / 1.79175946922805)
-double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
-double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
-double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
-double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
-double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
+double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
+double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
+double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
+double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
+double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
Cost disable_cost = 100000000.0;
@@ -117,14 +117,14 @@ cost_seqscan(Path *path, RelOptInfo *baserel)
/*
* disk costs
*
- * The cost of reading a page sequentially is 1.0, by definition.
- * Note that the Unix kernel will typically do some amount of
- * read-ahead optimization, so that this cost is less than the
- * true cost of reading a page from disk. We ignore that issue
- * here, but must take it into account when estimating the cost of
- * non-sequential accesses!
+ * The cost of reading a page sequentially is 1.0, by definition. Note
+ * that the Unix kernel will typically do some amount of read-ahead
+ * optimization, so that this cost is less than the true cost of
+ * reading a page from disk. We ignore that issue here, but must take
+ * it into account when estimating the cost of non-sequential
+ * accesses!
*/
- run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
+ run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
/* CPU costs */
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
@@ -600,12 +600,12 @@ cost_hashjoin(Path *path,
/*
* The number of tuple comparisons needed is the number of outer
* tuples times the typical hash bucket size. nodeHash.c tries for
- * average bucket loading of NTUP_PER_BUCKET, but that goal will
- * be reached only if data values are uniformly distributed among
- * the buckets. To be conservative, we scale up the target bucket
- * size by the number of inner rows times inner dispersion, giving
- * an estimate of the typical number of duplicates of each value.
- * We then charge one cpu_operator_cost per tuple comparison.
+ * average bucket loading of NTUP_PER_BUCKET, but that goal will be
+ * reached only if data values are uniformly distributed among the
+ * buckets. To be conservative, we scale up the target bucket size by
+ * the number of inner rows times inner dispersion, giving an estimate
+ * of the typical number of duplicates of each value. We then charge
+ * one cpu_operator_cost per tuple comparison.
*/
run_cost += cpu_operator_cost * outer_path->parent->rows *
NTUP_PER_BUCKET * ceil(inner_path->parent->rows * innerdispersion);
@@ -672,7 +672,7 @@ cost_qual_eval(List *quals)
foreach(l, quals)
{
- Node *qual = (Node *) lfirst(l);
+ Node *qual = (Node *) lfirst(l);
/*
* RestrictInfo nodes contain an eval_cost field reserved for this
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index ed5a53db0b9..064a2fafa50 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.102 2001/02/16 03:16:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.103 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -183,8 +183,8 @@ create_index_paths(Query *root,
restrictinfo_list);
/*
- * 3. Compute pathkeys describing index's ordering, if any,
- * then see how many of them are actually useful for this query.
+ * 3. Compute pathkeys describing index's ordering, if any, then
+ * see how many of them are actually useful for this query.
*/
index_pathkeys = build_index_pathkeys(root, rel, index,
ForwardScanDirection);
@@ -207,8 +207,9 @@ create_index_paths(Query *root,
NoMovementScanDirection));
/*
- * 5. If the index is ordered, a backwards scan might be interesting.
- * Currently this is only possible for a DESC query result ordering.
+ * 5. If the index is ordered, a backwards scan might be
+ * interesting. Currently this is only possible for a DESC query
+ * result ordering.
*/
if (index_is_ordered)
{
@@ -422,10 +423,11 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
if (and_clause((Node *) orsubclause))
{
+
/*
- * Extract relevant sub-subclauses in indexkey order. This is just
- * like group_clauses_by_indexkey() except that the input and output
- * are lists of bare clauses, not of RestrictInfo nodes.
+ * Extract relevant sub-subclauses in indexkey order. This is
+ * just like group_clauses_by_indexkey() except that the input and
+ * output are lists of bare clauses, not of RestrictInfo nodes.
*/
int *indexkeys = index->indexkeys;
Oid *classes = index->classlist;
@@ -446,8 +448,8 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
}
/*
- * If no clauses match this key, we're done; we don't want to look
- * at keys to its right.
+ * If no clauses match this key, we're done; we don't want to
+ * look at keys to its right.
*/
if (clausegroup == NIL)
break;
@@ -748,8 +750,8 @@ match_clause_to_indexkey(RelOptInfo *rel,
/*
* Check for an indexqual that could be handled by a nestloop
* join. We need the index key to be compared against an
- * expression that uses none of the indexed relation's vars
- * and contains no non-cachable functions.
+ * expression that uses none of the indexed relation's vars and
+ * contains no non-cachable functions.
*/
if (match_index_to_operand(indexkey, leftop, rel, index))
{
@@ -793,7 +795,7 @@ match_clause_to_indexkey(RelOptInfo *rel,
* recognizing binary-compatible datatypes. For example, if we have
* an expression like "oid = 123", the operator will be oideqint4,
* which we need to replace with oideq in order to recognize it as
- * matching an oid_ops index on the oid field. A variant case is where
+ * matching an oid_ops index on the oid field. A variant case is where
* the expression is like "oid::int4 = 123", where the given operator
* will be int4eq and again we need to intuit that we want to use oideq.
*
@@ -832,13 +834,13 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
/*
* Maybe the index uses a binary-compatible operator set.
*
- * Get the nominal input types of the given operator and the actual
- * type (before binary-compatible relabeling) of the index key.
+ * Get the nominal input types of the given operator and the actual type
+ * (before binary-compatible relabeling) of the index key.
*/
oldoptup = SearchSysCache(OPEROID,
ObjectIdGetDatum(expr_op),
0, 0, 0);
- if (! HeapTupleIsValid(oldoptup))
+ if (!HeapTupleIsValid(oldoptup))
return InvalidOid; /* probably can't happen */
oldopform = (Form_pg_operator) GETSTRUCT(oldoptup);
opname = pstrdup(NameStr(oldopform->oprname));
@@ -848,7 +850,7 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
if (indexkey_on_left)
{
- Node *leftop = (Node *) get_leftop(clause);
+ Node *leftop = (Node *) get_leftop(clause);
if (leftop && IsA(leftop, RelabelType))
leftop = ((RelabelType *) leftop)->arg;
@@ -856,7 +858,7 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
}
else
{
- Node *rightop = (Node *) get_rightop(clause);
+ Node *rightop = (Node *) get_rightop(clause);
if (rightop && IsA(rightop, RelabelType))
rightop = ((RelabelType *) rightop)->arg;
@@ -874,9 +876,10 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
return InvalidOid;
/*
- * OK, look for operator of the same name with the indexkey's data type.
- * (In theory this might find a non-semantically-comparable operator,
- * but in practice that seems pretty unlikely for binary-compatible types.)
+ * OK, look for operator of the same name with the indexkey's data
+ * type. (In theory this might find a non-semantically-comparable
+ * operator, but in practice that seems pretty unlikely for
+ * binary-compatible types.)
*/
new_op = compatible_oper_opid(opname, indexkeytype, indexkeytype, true);
@@ -886,8 +889,8 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
{
/*
- * OK, we found a binary-compatible operator of the same
- * name; now does it match the index?
+ * OK, we found a binary-compatible operator of the same name;
+ * now does it match the index?
*/
if (indexkey_on_left)
commuted_op = new_op;
@@ -1491,8 +1494,9 @@ match_index_to_operand(int indexkey,
RelOptInfo *rel,
IndexOptInfo *index)
{
+
/*
- * Ignore any RelabelType node above the indexkey. This is needed to
+ * Ignore any RelabelType node above the indexkey. This is needed to
* be able to apply indexscanning in binary-compatible-operator cases.
* Note: we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
@@ -1670,7 +1674,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1687,7 +1691,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1704,7 +1708,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1721,7 +1725,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1983,8 +1987,8 @@ prefix_quals(Var *leftop, Oid expr_op,
result = makeList1(expr);
/*
- * If we can create a string larger than the prefix, we can say
- * "x < greaterstr".
+ * If we can create a string larger than the prefix, we can say "x <
+ * greaterstr".
*/
greaterstr = make_greater_string(prefix, datatype);
if (greaterstr)
@@ -2025,6 +2029,7 @@ find_operator(const char *opname, Oid datatype)
static Datum
string_to_datum(const char *str, Oid datatype)
{
+
/*
* We cheat a little by assuming that textin() will do for bpchar and
* varchar constants too...
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index cfbfb56c902..bfd246388b4 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.61 2001/01/24 19:42:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.62 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,32 +25,32 @@
#include "utils/lsyscache.h"
static void sort_inner_and_outer(Query *root, RelOptInfo *joinrel,
- RelOptInfo *outerrel, RelOptInfo *innerrel,
- List *restrictlist, List *mergeclause_list,
- JoinType jointype);
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ List *restrictlist, List *mergeclause_list,
+ JoinType jointype);
static void match_unsorted_outer(Query *root, RelOptInfo *joinrel,
- RelOptInfo *outerrel, RelOptInfo *innerrel,
- List *restrictlist, List *mergeclause_list,
- JoinType jointype);
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ List *restrictlist, List *mergeclause_list,
+ JoinType jointype);
#ifdef NOT_USED
static void match_unsorted_inner(Query *root, RelOptInfo *joinrel,
- RelOptInfo *outerrel, RelOptInfo *innerrel,
- List *restrictlist, List *mergeclause_list,
- JoinType jointype);
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ List *restrictlist, List *mergeclause_list,
+ JoinType jointype);
#endif
static void hash_inner_and_outer(Query *root, RelOptInfo *joinrel,
- RelOptInfo *outerrel, RelOptInfo *innerrel,
- List *restrictlist, JoinType jointype);
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ List *restrictlist, JoinType jointype);
static Path *best_innerjoin(List *join_paths, List *outer_relid,
- JoinType jointype);
+ JoinType jointype);
static Selectivity estimate_dispersion(Query *root, Var *var);
static List *select_mergejoin_clauses(RelOptInfo *joinrel,
- RelOptInfo *outerrel,
- RelOptInfo *innerrel,
- List *restrictlist,
- JoinType jointype);
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ List *restrictlist,
+ JoinType jointype);
/*
@@ -160,26 +160,27 @@ sort_inner_and_outer(Query *root,
* generate a differently-sorted result path at essentially the same
* cost. We have no basis for choosing one over another at this level
* of joining, but some sort orders may be more useful than others for
- * higher-level mergejoins, so it's worth considering multiple orderings.
+ * higher-level mergejoins, so it's worth considering multiple
+ * orderings.
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
- * redundant. Therefore, what we do is convert the mergeclause list to
- * a list of canonical pathkeys, and then consider different orderings
- * of the pathkeys.
+ * redundant. Therefore, what we do is convert the mergeclause list
+ * to a list of canonical pathkeys, and then consider different
+ * orderings of the pathkeys.
*
- * Generating a path for *every* permutation of the pathkeys doesn't
- * seem like a winning strategy; the cost in planning time is too high.
- * For now, we generate one path for each pathkey, listing that pathkey
- * first and the rest in random order. This should allow at
- * least a one-clause mergejoin without re-sorting against any other
- * possible mergejoin partner path. But if we've not guessed the
- * right ordering of secondary keys, we may end up evaluating
- * clauses as qpquals when they could have been done as mergeclauses.
- * We need to figure out a better way. (Two possible approaches: look
- * at all the relevant index relations to suggest plausible sort
- * orders, or make just one output path and somehow mark it as having
- * a sort-order that can be rearranged freely.)
+ * Generating a path for *every* permutation of the pathkeys doesn't seem
+ * like a winning strategy; the cost in planning time is too high. For
+ * now, we generate one path for each pathkey, listing that pathkey
+ * first and the rest in random order. This should allow at least a
+ * one-clause mergejoin without re-sorting against any other possible
+ * mergejoin partner path. But if we've not guessed the right
+ * ordering of secondary keys, we may end up evaluating clauses as
+ * qpquals when they could have been done as mergeclauses. We need to
+ * figure out a better way. (Two possible approaches: look at all the
+ * relevant index relations to suggest plausible sort orders, or make
+ * just one output path and somehow mark it as having a sort-order
+ * that can be rearranged freely.)
*/
all_pathkeys = make_pathkeys_for_mergeclauses(root,
mergeclause_list,
@@ -200,16 +201,17 @@ sort_inner_and_outer(Query *root,
lremove(front_pathkey,
listCopy(all_pathkeys)));
else
- cur_pathkeys = all_pathkeys; /* no work at first one... */
+ cur_pathkeys = all_pathkeys; /* no work at first one... */
/*
* Select mergeclause(s) that match this sort ordering. If we had
- * redundant merge clauses then we will get a subset of the original
- * clause list. There had better be some match, however...
+ * redundant merge clauses then we will get a subset of the
+ * original clause list. There had better be some match,
+ * however...
*/
cur_mergeclauses = find_mergeclauses_for_pathkeys(root,
cur_pathkeys,
- mergeclause_list);
+ mergeclause_list);
Assert(cur_mergeclauses != NIL);
/*
@@ -334,10 +336,12 @@ match_unsorted_outer(Query *root,
if (nestjoinOK)
{
+
/*
- * Always consider a nestloop join with this outer and cheapest-
- * total-cost inner. Consider nestloops using the cheapest-
- * startup-cost inner as well, and the best innerjoin indexpath.
+ * Always consider a nestloop join with this outer and
+ * cheapest- total-cost inner. Consider nestloops using the
+ * cheapest- startup-cost inner as well, and the best
+ * innerjoin indexpath.
*/
add_path(joinrel, (Path *)
create_nestloop_path(joinrel,
@@ -352,7 +356,7 @@ match_unsorted_outer(Query *root,
create_nestloop_path(joinrel,
jointype,
outerpath,
- innerrel->cheapest_startup_path,
+ innerrel->cheapest_startup_path,
restrictlist,
merge_pathkeys));
if (bestinnerjoin != NULL)
@@ -382,8 +386,8 @@ match_unsorted_outer(Query *root,
/*
* Generate a mergejoin on the basis of sorting the cheapest
* inner. Since a sort will be needed, only cheapest total cost
- * matters. (But create_mergejoin_path will do the right thing
- * if innerrel->cheapest_total_path is already correctly sorted.)
+ * matters. (But create_mergejoin_path will do the right thing if
+ * innerrel->cheapest_total_path is already correctly sorted.)
*/
add_path(joinrel, (Path *)
create_mergejoin_path(joinrel,
@@ -400,13 +404,14 @@ match_unsorted_outer(Query *root,
* Look for presorted inner paths that satisfy the innersortkey
* list or any truncation thereof. Here, we consider both cheap
* startup cost and cheap total cost. Ignore
- * innerrel->cheapest_total_path, since we already made a path with it.
+ * innerrel->cheapest_total_path, since we already made a path
+ * with it.
*/
num_sortkeys = length(innersortkeys);
if (num_sortkeys > 1)
- trialsortkeys = listCopy(innersortkeys); /* need modifiable copy */
+ trialsortkeys = listCopy(innersortkeys); /* need modifiable copy */
else
- trialsortkeys = innersortkeys; /* won't really truncate */
+ trialsortkeys = innersortkeys; /* won't really truncate */
cheapest_startup_inner = NULL;
cheapest_total_inner = NULL;
@@ -417,8 +422,8 @@ match_unsorted_outer(Query *root,
/*
* Look for an inner path ordered well enough for the first
- * 'sortkeycnt' innersortkeys. NB: trialsortkeys list
- * is modified destructively, which is why we made a copy...
+ * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is
+ * modified destructively, which is why we made a copy...
*/
trialsortkeys = ltruncate(sortkeycnt, trialsortkeys);
innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist,
@@ -478,8 +483,8 @@ match_unsorted_outer(Query *root,
{
newclauses =
find_mergeclauses_for_pathkeys(root,
- trialsortkeys,
- mergeclauses);
+ trialsortkeys,
+ mergeclauses);
Assert(newclauses != NIL);
}
else
@@ -601,7 +606,7 @@ match_unsorted_inner(Query *root,
if (startupouterpath != NULL && startupouterpath != totalouterpath)
{
merge_pathkeys = build_join_pathkeys(root, joinrel,
- startupouterpath->pathkeys);
+ startupouterpath->pathkeys);
add_path(joinrel, (Path *)
create_mergejoin_path(joinrel,
jointype,
@@ -696,8 +701,8 @@ hash_inner_and_outer(Query *root,
* estimate dispersion of inner var for costing purposes.
*
* Since we tend to visit the same clauses over and over when
- * planning a large query, we cache the dispersion estimates in the
- * RestrictInfo node to avoid repeated lookups of statistics.
+ * planning a large query, we cache the dispersion estimates in
+ * the RestrictInfo node to avoid repeated lookups of statistics.
*/
if (intMember(left->varno, outerrelids) &&
intMember(right->varno, innerrelids))
@@ -793,13 +798,13 @@ best_innerjoin(List *join_paths, Relids outer_relids, JoinType jointype)
foreach(join_path, join_paths)
{
- IndexPath *path = (IndexPath *) lfirst(join_path);
+ IndexPath *path = (IndexPath *) lfirst(join_path);
Assert(IsA(path, IndexPath));
/*
- * If processing an outer join, only use explicit join clauses in the
- * inner indexscan. For inner joins we need not be so picky.
+ * If processing an outer join, only use explicit join clauses in
+ * the inner indexscan. For inner joins we need not be so picky.
*/
if (isouterjoin && !path->alljoinquals)
continue;
@@ -879,15 +884,15 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
*right;
/*
- * If processing an outer join, only use its own join clauses in the
- * merge. For inner joins we need not be so picky.
+ * If processing an outer join, only use its own join clauses in
+ * the merge. For inner joins we need not be so picky.
*
* Furthermore, if it is a right/full join then *all* the explicit
- * join clauses must be mergejoinable, else the executor will fail.
- * If we are asked for a right join then just return NIL to indicate
- * no mergejoin is possible (we can handle it as a left join instead).
- * If we are asked for a full join then emit an error, because there
- * is no fallback.
+ * join clauses must be mergejoinable, else the executor will
+ * fail. If we are asked for a right join then just return NIL to
+ * indicate no mergejoin is possible (we can handle it as a left
+ * join instead). If we are asked for a full join then emit an
+ * error, because there is no fallback.
*/
if (isouterjoin)
{
@@ -897,7 +902,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
{
case JOIN_RIGHT:
if (restrictinfo->mergejoinoperator == InvalidOid)
- return NIL; /* not mergejoinable */
+ return NIL; /* not mergejoinable */
break;
case JOIN_FULL:
if (restrictinfo->mergejoinoperator == InvalidOid)
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 2492f17ea9b..929a977112d 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.51 2001/02/16 00:03:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.52 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
static RelOptInfo *make_join_rel(Query *root, RelOptInfo *rel1,
- RelOptInfo *rel2, JoinType jointype);
+ RelOptInfo *rel2, JoinType jointype);
/*
@@ -44,18 +44,19 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/*
* First, consider left-sided and right-sided plans, in which rels of
- * exactly level-1 member relations are joined against initial relations.
- * We prefer to join using join clauses, but if we find a rel of level-1
- * members that has no join clauses, we will generate Cartesian-product
- * joins against all initial rels not already contained in it.
+ * exactly level-1 member relations are joined against initial
+ * relations. We prefer to join using join clauses, but if we find a
+ * rel of level-1 members that has no join clauses, we will generate
+ * Cartesian-product joins against all initial rels not already
+ * contained in it.
*
- * In the first pass (level == 2), we try to join each initial rel to each
- * initial rel that appears later in joinrels[1]. (The mirror-image
- * joins are handled automatically by make_join_rel.) In later
- * passes, we try to join rels of size level-1 from joinrels[level-1]
- * to each initial rel in joinrels[1].
+ * In the first pass (level == 2), we try to join each initial rel to
+ * each initial rel that appears later in joinrels[1]. (The
+ * mirror-image joins are handled automatically by make_join_rel.) In
+ * later passes, we try to join rels of size level-1 from
+ * joinrels[level-1] to each initial rel in joinrels[1].
*/
- foreach(r, joinrels[level-1])
+ foreach(r, joinrels[level - 1])
{
RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
List *other_rels;
@@ -73,9 +74,9 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
* Note that if all available join clauses for this rel
* require more than one other rel, we will fail to make any
* joins against it here. That's OK; it'll be considered by
- * "bushy plan" join code in a higher-level pass where we
- * have those other rels collected into a join rel. See also
- * the last-ditch case below.
+ * "bushy plan" join code in a higher-level pass where we have
+ * those other rels collected into a join rel. See also the
+ * last-ditch case below.
*/
new_rels = make_rels_by_clause_joins(root,
old_rel,
@@ -94,16 +95,16 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
}
/*
- * At levels above 2 we will generate the same joined relation
- * in multiple ways --- for example (a join b) join c is the same
+ * At levels above 2 we will generate the same joined relation in
+ * multiple ways --- for example (a join b) join c is the same
* RelOptInfo as (b join c) join a, though the second case will
- * add a different set of Paths to it. To avoid making extra work
- * for subsequent passes, do not enter the same RelOptInfo into our
- * output list multiple times.
+ * add a different set of Paths to it. To avoid making extra work
+ * for subsequent passes, do not enter the same RelOptInfo into
+ * our output list multiple times.
*/
foreach(nr, new_rels)
{
- RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
+ RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
if (!ptrMember(jrel, result_rels))
result_rels = lcons(jrel, result_rels);
@@ -111,20 +112,21 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
}
/*
- * Now, consider "bushy plans" in which relations of k initial rels are
- * joined to relations of level-k initial rels, for 2 <= k <= level-2.
+ * Now, consider "bushy plans" in which relations of k initial rels
+ * are joined to relations of level-k initial rels, for 2 <= k <=
+ * level-2.
*
* We only consider bushy-plan joins for pairs of rels where there is a
* suitable join clause, in order to avoid unreasonable growth of
* planning time.
*/
- for (k = 2; ; k++)
+ for (k = 2;; k++)
{
int other_level = level - k;
/*
- * Since make_join_rel(x, y) handles both x,y and y,x cases,
- * we only need to go as far as the halfway point.
+ * Since make_join_rel(x, y) handles both x,y and y,x cases, we
+ * only need to go as far as the halfway point.
*/
if (k > other_level)
break;
@@ -139,7 +141,7 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
continue; /* we ignore clauseless joins here */
if (k == other_level)
- other_rels = lnext(r); /* only consider remaining rels */
+ other_rels = lnext(r); /* only consider remaining rels */
else
other_rels = joinrels[other_level];
@@ -153,8 +155,8 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/*
* OK, we can build a rel of the right level from this
- * pair of rels. Do so if there is at least one usable
- * join clause.
+ * pair of rels. Do so if there is at least one
+ * usable join clause.
*/
foreach(i, old_rel->joininfo)
{
@@ -170,7 +172,8 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/* Avoid making duplicate entries ... */
if (!ptrMember(jrel, result_rels))
result_rels = lcons(jrel, result_rels);
- break; /* need not consider more joininfos */
+ break; /* need not consider more
+ * joininfos */
}
}
}
@@ -180,31 +183,34 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/*
* Last-ditch effort: if we failed to find any usable joins so far,
- * force a set of cartesian-product joins to be generated. This
+ * force a set of cartesian-product joins to be generated. This
* handles the special case where all the available rels have join
- * clauses but we cannot use any of the joins yet. An example is
+ * clauses but we cannot use any of the joins yet. An example is
*
* SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
*
- * The join clause will be usable at level 3, but at level 2 we have
- * no choice but to make cartesian joins. We consider only left-sided
+ * The join clause will be usable at level 3, but at level 2 we have no
+ * choice but to make cartesian joins. We consider only left-sided
* and right-sided cartesian joins in this case (no bushy).
*/
if (result_rels == NIL)
{
- /* This loop is just like the first one, except we always call
+
+ /*
+ * This loop is just like the first one, except we always call
* make_rels_by_clauseless_joins().
*/
- foreach(r, joinrels[level-1])
+ foreach(r, joinrels[level - 1])
{
RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
List *other_rels;
if (level == 2)
- other_rels = lnext(r); /* only consider remaining initial
- * rels */
+ other_rels = lnext(r); /* only consider remaining initial
+ * rels */
else
- other_rels = joinrels[1]; /* consider all initial rels */
+ other_rels = joinrels[1]; /* consider all initial
+ * rels */
new_rels = make_rels_by_clauseless_joins(root,
old_rel,
@@ -212,7 +218,7 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
foreach(nr, new_rels)
{
- RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
+ RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
if (!ptrMember(jrel, result_rels))
result_rels = lcons(jrel, result_rels);
@@ -266,6 +272,7 @@ make_rels_by_clause_joins(Query *root,
RelOptInfo *jrel;
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
+
/*
* Avoid entering same joinrel into our output list more
* than once. (make_rels_by_joins doesn't really care,
@@ -310,9 +317,10 @@ make_rels_by_clauseless_joins(Query *root,
RelOptInfo *jrel;
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
+
/*
- * As long as given other_rels are distinct, don't need
- * to test to see if jrel is already part of output list.
+ * As long as given other_rels are distinct, don't need to
+ * test to see if jrel is already part of output list.
*/
result = lcons(jrel, result);
}
@@ -325,7 +333,7 @@ make_rels_by_clauseless_joins(Query *root,
/*
* make_jointree_rel
* Find or build a RelOptInfojoin rel representing a specific
- * jointree item. For JoinExprs, we only consider the construction
+ * jointree item. For JoinExprs, we only consider the construction
* path that corresponds exactly to what the user wrote.
*/
RelOptInfo *
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index e96a96f6deb..f93a027cd53 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.30 2001/01/24 19:42:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.31 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
static PathKeyItem *makePathKeyItem(Node *key, Oid sortop);
static List *make_canonical_pathkey(Query *root, PathKeyItem *item);
static Var *find_indexkey_var(Query *root, RelOptInfo *rel,
- AttrNumber varattno);
+ AttrNumber varattno);
/*
@@ -89,10 +89,10 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
* into our new set. When done, we add the new set to the front of
* equi_key_list.
*
- * It may well be that the two items we're given are already known to
- * be equijoin-equivalent, in which case we don't need to change our
- * data structure. If we find both of them in the same equivalence
- * set to start with, we can quit immediately.
+ * It may well be that the two items we're given are already known to be
+ * equijoin-equivalent, in which case we don't need to change our data
+ * structure. If we find both of them in the same equivalence set to
+ * start with, we can quit immediately.
*
* This is a standard UNION-FIND problem, for which there exist better
* data structures than simple lists. If this code ever proves to be
@@ -109,7 +109,11 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
if (item1here || item2here)
{
- /* If find both in same equivalence set, no need to do any more */
+
+ /*
+ * If find both in same equivalence set, no need to do any
+ * more
+ */
if (item1here && item2here)
{
/* Better not have seen only one in an earlier set... */
@@ -126,7 +130,8 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
/*
* Remove old set from equi_key_list. NOTE this does not
- * change lnext(cursetlink), so the foreach loop doesn't break.
+ * change lnext(cursetlink), so the foreach loop doesn't
+ * break.
*/
root->equi_key_list = lremove(curset, root->equi_key_list);
freeList(curset); /* might as well recycle old cons cells */
@@ -171,8 +176,8 @@ generate_implied_equalities(Query *root)
continue;
/*
- * Match each item in the set with all that appear after it
- * (it's sufficient to generate A=B, need not process B=A too).
+ * Match each item in the set with all that appear after it (it's
+ * sufficient to generate A=B, need not process B=A too).
*/
foreach(ptr1, curset)
{
@@ -246,11 +251,12 @@ canonicalize_pathkeys(Query *root, List *pathkeys)
Assert(pathkey != NIL);
item = (PathKeyItem *) lfirst(pathkey);
cpathkey = make_canonical_pathkey(root, item);
+
/*
- * Eliminate redundant ordering requests --- ORDER BY A,A
- * is the same as ORDER BY A. We want to check this only
- * after we have canonicalized the keys, so that equivalent-key
- * knowledge is used when deciding if an item is redundant.
+ * Eliminate redundant ordering requests --- ORDER BY A,A is the
+ * same as ORDER BY A. We want to check this only after we have
+ * canonicalized the keys, so that equivalent-key knowledge is
+ * used when deciding if an item is redundant.
*/
if (!ptrMember(cpathkey, new_pathkeys))
new_pathkeys = lappend(new_pathkeys, cpathkey);
@@ -285,8 +291,8 @@ compare_pathkeys(List *keys1, List *keys2)
List *subkey2 = lfirst(key2);
/*
- * XXX would like to check that we've been given canonicalized input,
- * but query root not accessible here...
+ * XXX would like to check that we've been given canonicalized
+ * input, but query root not accessible here...
*/
#ifdef NOT_USED
Assert(ptrMember(subkey1, root->equi_key_list));
@@ -295,7 +301,7 @@ compare_pathkeys(List *keys1, List *keys2)
/*
* We will never have two subkeys where one is a subset of the
- * other, because of the canonicalization process. Either they
+ * other, because of the canonicalization process. Either they
* are equal or they ain't. Furthermore, we only need pointer
* comparison to detect equality.
*/
@@ -555,9 +561,10 @@ build_index_pathkeys(Query *root,
/* OK, make a sublist for this sort key */
item = makePathKeyItem((Node *) relvar, sortop);
cpathkey = make_canonical_pathkey(root, item);
+
/*
- * Eliminate redundant ordering info; could happen if query
- * is such that index keys are equijoined...
+ * Eliminate redundant ordering info; could happen if query is
+ * such that index keys are equijoined...
*/
if (!ptrMember(cpathkey, retval))
retval = lappend(retval, cpathkey);
@@ -693,7 +700,7 @@ make_pathkeys_for_sortclauses(List *sortclauses,
*
* RestrictInfo contains fields in which we may cache the result
* of looking up the canonical pathkeys for the left and right sides
- * of the mergeclause. (Note that in normal cases they will be the
+ * of the mergeclause. (Note that in normal cases they will be the
* same, but not if the mergeclause appears above an OUTER JOIN.)
* This is a worthwhile savings because these routines will be invoked
* many times when dealing with a many-relation query.
@@ -756,8 +763,8 @@ find_mergeclauses_for_pathkeys(Query *root,
/*
* We can match a pathkey against either left or right side of any
* mergejoin clause we haven't used yet. For the moment we use a
- * dumb "greedy" algorithm with no backtracking. Is it worth being
- * any smarter to make a longer list of usable mergeclauses?
+ * dumb "greedy" algorithm with no backtracking. Is it worth
+ * being any smarter to make a longer list of usable mergeclauses?
* Probably not.
*/
foreach(j, restrictinfos)
@@ -765,9 +772,10 @@ find_mergeclauses_for_pathkeys(Query *root,
RestrictInfo *restrictinfo = lfirst(j);
cache_mergeclause_pathkeys(root, restrictinfo);
+
/*
- * We can compare canonical pathkey sublists by simple
- * pointer equality; see compare_pathkeys.
+ * We can compare canonical pathkey sublists by simple pointer
+ * equality; see compare_pathkeys.
*/
if ((pathkey == restrictinfo->left_pathkey ||
pathkey == restrictinfo->right_pathkey) &&
@@ -830,7 +838,7 @@ make_pathkeys_for_mergeclauses(Query *root,
cache_mergeclause_pathkeys(root, restrictinfo);
key = (Node *) get_leftop(restrictinfo->clause);
- if (IsA(key, Var) && intMember(((Var *) key)->varno, rel->relids))
+ if (IsA(key, Var) &&intMember(((Var *) key)->varno, rel->relids))
{
/* Rel is left side of mergeclause */
pathkey = restrictinfo->left_pathkey;
@@ -838,7 +846,7 @@ make_pathkeys_for_mergeclauses(Query *root,
else
{
key = (Node *) get_rightop(restrictinfo->clause);
- if (IsA(key, Var) && intMember(((Var *) key)->varno, rel->relids))
+ if (IsA(key, Var) &&intMember(((Var *) key)->varno, rel->relids))
{
/* Rel is right side of mergeclause */
pathkey = restrictinfo->right_pathkey;
@@ -851,13 +859,14 @@ make_pathkeys_for_mergeclauses(Query *root,
}
/*
- * When we are given multiple merge clauses, it's possible that some
- * clauses refer to the same vars as earlier clauses. There's no
- * reason for us to specify sort keys like (A,B,A) when (A,B) will
- * do --- and adding redundant sort keys makes add_path think that
- * this sort order is different from ones that are really the same,
- * so don't do it. Since we now have a canonicalized pathkey,
- * a simple ptrMember test is sufficient to detect redundant keys.
+ * When we are given multiple merge clauses, it's possible that
+ * some clauses refer to the same vars as earlier clauses.
+ * There's no reason for us to specify sort keys like (A,B,A) when
+ * (A,B) will do --- and adding redundant sort keys makes add_path
+ * think that this sort order is different from ones that are
+ * really the same, so don't do it. Since we now have a
+ * canonicalized pathkey, a simple ptrMember test is sufficient to
+ * detect redundant keys.
*/
if (!ptrMember(pathkey, pathkeys))
pathkeys = lappend(pathkeys, pathkey);
@@ -911,6 +920,7 @@ pathkeys_useful_for_merging(Query *root, RelOptInfo *rel, List *pathkeys)
if (restrictinfo->mergejoinoperator == InvalidOid)
continue;
cache_mergeclause_pathkeys(root, restrictinfo);
+
/*
* We can compare canonical pathkey sublists by simple
* pointer equality; see compare_pathkeys.
@@ -984,7 +994,9 @@ truncate_useless_pathkeys(Query *root,
nuseful2 = pathkeys_useful_for_ordering(root, pathkeys);
if (nuseful2 > nuseful)
nuseful = nuseful2;
- /* Note: not safe to modify input list destructively, but we can avoid
+
+ /*
+ * Note: not safe to modify input list destructively, but we can avoid
* copying the list if we're not actually going to change it
*/
if (nuseful == length(pathkeys))
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 738b696306a..8c3b00289d3 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.103 2001/01/24 19:42:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.104 2001/03/22 03:59:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,19 +42,19 @@ static IndexScan *create_indexscan_plan(Query *root, IndexPath *best_path,
static TidScan *create_tidscan_plan(TidPath *best_path, List *tlist,
List *scan_clauses);
static SubqueryScan *create_subqueryscan_plan(Path *best_path,
- List *tlist, List *scan_clauses);
+ List *tlist, List *scan_clauses);
static NestLoop *create_nestloop_plan(NestPath *best_path, List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *outer_plan, List *outer_tlist,
- Plan *inner_plan, List *inner_tlist);
+ List *joinclauses, List *otherclauses,
+ Plan *outer_plan, List *outer_tlist,
+ Plan *inner_plan, List *inner_tlist);
static MergeJoin *create_mergejoin_plan(MergePath *best_path, List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *outer_plan, List *outer_tlist,
- Plan *inner_plan, List *inner_tlist);
+ List *joinclauses, List *otherclauses,
+ Plan *outer_plan, List *outer_tlist,
+ Plan *inner_plan, List *inner_tlist);
static HashJoin *create_hashjoin_plan(HashPath *best_path, List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *outer_plan, List *outer_tlist,
- Plan *inner_plan, List *inner_tlist);
+ List *joinclauses, List *otherclauses,
+ Plan *outer_plan, List *outer_tlist,
+ Plan *inner_plan, List *inner_tlist);
static List *fix_indxqual_references(List *indexquals, IndexPath *index_path);
static List *fix_indxqual_sublist(List *indexqual, int baserelid, Oid relam,
Form_pg_index index);
@@ -72,20 +72,20 @@ static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
List *tideval);
static NestLoop *make_nestloop(List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *lefttree, Plan *righttree,
- JoinType jointype);
+ List *joinclauses, List *otherclauses,
+ Plan *lefttree, Plan *righttree,
+ JoinType jointype);
static HashJoin *make_hashjoin(List *tlist,
- List *joinclauses, List *otherclauses,
- List *hashclauses,
- Plan *lefttree, Plan *righttree,
- JoinType jointype);
+ List *joinclauses, List *otherclauses,
+ List *hashclauses,
+ Plan *lefttree, Plan *righttree,
+ JoinType jointype);
static Hash *make_hash(List *tlist, Node *hashkey, Plan *lefttree);
static MergeJoin *make_mergejoin(List *tlist,
- List *joinclauses, List *otherclauses,
- List *mergeclauses,
- Plan *lefttree, Plan *righttree,
- JoinType jointype);
+ List *joinclauses, List *otherclauses,
+ List *mergeclauses,
+ Plan *lefttree, Plan *righttree,
+ JoinType jointype);
/*
* create_plan
@@ -313,8 +313,8 @@ create_append_plan(Query *root, AppendPath *best_path)
foreach(subpaths, best_path->subpaths)
{
- Path *subpath = (Path *) lfirst(subpaths);
-
+ Path *subpath = (Path *) lfirst(subpaths);
+
subplans = lappend(subplans, create_plan(root, subpath));
}
@@ -344,7 +344,7 @@ create_seqscan_plan(Path *best_path, List *tlist, List *scan_clauses)
/* there should be exactly one base rel involved... */
Assert(length(best_path->parent->relids) == 1);
- Assert(! best_path->parent->issubquery);
+ Assert(!best_path->parent->issubquery);
scan_relid = (Index) lfirsti(best_path->parent->relids);
@@ -386,7 +386,7 @@ create_indexscan_plan(Query *root,
/* there should be exactly one base rel involved... */
Assert(length(best_path->path.parent->relids) == 1);
- Assert(! best_path->path.parent->issubquery);
+ Assert(!best_path->path.parent->issubquery);
baserelid = lfirsti(best_path->path.parent->relids);
@@ -496,7 +496,7 @@ create_tidscan_plan(TidPath *best_path, List *tlist, List *scan_clauses)
/* there should be exactly one base rel involved... */
Assert(length(best_path->path.parent->relids) == 1);
- Assert(! best_path->path.parent->issubquery);
+ Assert(!best_path->path.parent->issubquery);
scan_relid = (Index) lfirsti(best_path->path.parent->relids);
@@ -737,21 +737,22 @@ create_mergejoin_plan(MergePath *best_path,
best_path->innersortkeys);
/*
- * The executor requires the inner side of a mergejoin to support "mark"
- * and "restore" operations. Not all plan types do, so we must be careful
- * not to generate an invalid plan. If necessary, an invalid inner plan
- * can be handled by inserting a Materialize node.
+ * The executor requires the inner side of a mergejoin to support
+ * "mark" and "restore" operations. Not all plan types do, so we must
+ * be careful not to generate an invalid plan. If necessary, an
+ * invalid inner plan can be handled by inserting a Materialize node.
*
- * Since the inner side must be ordered, and only Sorts and IndexScans can
- * create order to begin with, you might think there's no problem --- but
- * you'd be wrong. Nestloop and merge joins can *preserve* the order of
- * their inputs, so they can be selected as the input of a mergejoin,
- * and that won't work in the present executor.
+ * Since the inner side must be ordered, and only Sorts and IndexScans
+ * can create order to begin with, you might think there's no problem
+ * --- but you'd be wrong. Nestloop and merge joins can *preserve*
+ * the order of their inputs, so they can be selected as the input of
+ * a mergejoin, and that won't work in the present executor.
*
* Doing this here is a bit of a kluge since the cost of the Materialize
- * wasn't taken into account in our earlier decisions. But Materialize
- * is hard to estimate a cost for, and the above consideration shows that
- * this is a rare case anyway, so this seems an acceptable way to proceed.
+ * wasn't taken into account in our earlier decisions. But
+ * Materialize is hard to estimate a cost for, and the above
+ * consideration shows that this is a rare case anyway, so this seems
+ * an acceptable way to proceed.
*
* This check must agree with ExecMarkPos/ExecRestrPos in
* executor/execAmi.c!
@@ -1015,6 +1016,7 @@ static Node *
fix_indxqual_operand(Node *node, int baserelid, Form_pg_index index,
Oid *opclass)
{
+
/*
* Remove any binary-compatible relabeling of the indexkey
*/
@@ -1025,8 +1027,8 @@ fix_indxqual_operand(Node *node, int baserelid, Form_pg_index index,
* We represent index keys by Var nodes having the varno of the base
* table but varattno equal to the index's attribute number (index
* column position). This is a bit hokey ... would be cleaner to use
- * a special-purpose node type that could not be mistaken for a regular
- * Var. But it will do for now.
+ * a special-purpose node type that could not be mistaken for a
+ * regular Var. But it will do for now.
*/
if (IsA(node, Var))
{
@@ -1062,7 +1064,7 @@ fix_indxqual_operand(Node *node, int baserelid, Form_pg_index index,
* the returned varattno must be 1.
*/
- Assert(is_funcclause(node)); /* not a very thorough check, but easy */
+ Assert(is_funcclause(node));/* not a very thorough check, but easy */
/* indclass[0] is the only class of a functional index */
*opclass = index->indclass[0];
@@ -1493,7 +1495,7 @@ make_sort_from_pathkeys(List *tlist, Plan *lefttree, List *pathkeys)
return make_sort(sort_tlist, lefttree, numsortkeys);
}
-Material *
+Material *
make_material(List *tlist, Plan *lefttree)
{
Material *node = makeNode(Material);
@@ -1734,10 +1736,10 @@ make_limit(List *tlist, Plan *lefttree,
copy_plan_costsize(plan, lefttree);
/*
- * If offset/count are constants, adjust the output rows count and costs
- * accordingly. This is only a cosmetic issue if we are at top level,
- * but if we are building a subquery then it's important to report
- * correct info to the outer planner.
+ * If offset/count are constants, adjust the output rows count and
+ * costs accordingly. This is only a cosmetic issue if we are at top
+ * level, but if we are building a subquery then it's important to
+ * report correct info to the outer planner.
*/
if (limitOffset && IsA(limitOffset, Const))
{
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 1025a9b7f6b..2b2ebf238fe 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.57 2001/02/16 03:16:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.58 2001/03/22 03:59:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,11 +35,11 @@
static void mark_baserels_for_outer_join(Query *root, Relids rels,
- Relids outerrels);
+ Relids outerrels);
static void distribute_qual_to_rels(Query *root, Node *clause,
- bool ispusheddown,
- bool isouterjoin,
- Relids qualscope);
+ bool ispusheddown,
+ bool isouterjoin,
+ Relids qualscope);
static void add_join_info_to_rels(Query *root, RestrictInfo *restrictinfo,
Relids join_relids);
static void add_vars_to_targetlist(Query *root, List *vars);
@@ -57,7 +57,7 @@ static void check_hashjoinable(RestrictInfo *restrictinfo);
* build_base_rel_tlists
* Creates rel nodes for every relation mentioned in the target list
* 'tlist' (if a node hasn't already been created) and adds them to
- * root->base_rel_list. Creates targetlist entries for each var seen
+ * root->base_rel_list. Creates targetlist entries for each var seen
* in 'tlist' and adds them to the tlist of the appropriate rel node.
*/
void
@@ -118,6 +118,7 @@ add_missing_rels_to_query(Query *root, Node *jtnode)
if (IsA(jtnode, RangeTblRef))
{
int varno = ((RangeTblRef *) jtnode)->rtindex;
+
/* This call to get_base_rel does the primary work... */
RelOptInfo *rel = get_base_rel(root, varno);
@@ -160,7 +161,7 @@ add_missing_rels_to_query(Query *root, Node *jtnode)
* distribute_quals_to_rels
* Recursively scan the query's join tree for WHERE and JOIN/ON qual
* clauses, and add these to the appropriate RestrictInfo and JoinInfo
- * lists belonging to base RelOptInfos. New base rel entries are created
+ * lists belonging to base RelOptInfos. New base rel entries are created
* as needed. Also, base RelOptInfos are marked with outerjoinset
* information, to aid in proper positioning of qual clauses that appear
* above outer joins.
@@ -169,7 +170,7 @@ add_missing_rels_to_query(Query *root, Node *jtnode)
* be evaluated at the lowest level where all the variables it mentions are
* available. However, we cannot push a qual down into the nullable side(s)
* of an outer join since the qual might eliminate matching rows and cause a
- * NULL row to be incorrectly emitted by the join. Therefore, rels appearing
+ * NULL row to be incorrectly emitted by the join. Therefore, rels appearing
* within the nullable side(s) of an outer join are marked with
* outerjoinset = list of Relids used at the outer join node.
* This list will be added to the list of rels referenced by quals using such
@@ -228,14 +229,14 @@ distribute_quals_to_rels(Query *root, Node *jtnode)
List *qual;
/*
- * Order of operations here is subtle and critical. First we recurse
- * to handle sub-JOINs. Their join quals will be placed without
- * regard for whether this level is an outer join, which is correct.
- * Then, if we are an outer join, we mark baserels contained within
- * the nullable side(s) with our own rel list; this will restrict
- * placement of subsequent quals using those rels, including our own
- * quals and quals above us in the join tree.
- * Finally we place our own join quals.
+ * Order of operations here is subtle and critical. First we
+ * recurse to handle sub-JOINs. Their join quals will be placed
+ * without regard for whether this level is an outer join, which
+ * is correct. Then, if we are an outer join, we mark baserels
+ * contained within the nullable side(s) with our own rel list;
+ * this will restrict placement of subsequent quals using those
+ * rels, including our own quals and quals above us in the join
+ * tree. Finally we place our own join quals.
*/
leftids = distribute_quals_to_rels(root, j->larg);
rightids = distribute_quals_to_rels(root, j->rarg);
@@ -261,9 +262,10 @@ distribute_quals_to_rels(Query *root, Node *jtnode)
isouterjoin = true;
break;
case JOIN_UNION:
+
/*
- * This is where we fail if upper levels of planner haven't
- * rewritten UNION JOIN as an Append ...
+ * This is where we fail if upper levels of planner
+ * haven't rewritten UNION JOIN as an Append ...
*/
elog(ERROR, "UNION JOIN is not implemented yet");
break;
@@ -338,12 +340,12 @@ distribute_qual_to_rels(Query *root, Node *clause,
bool can_be_equijoin;
restrictinfo->clause = (Expr *) clause;
- restrictinfo->eval_cost = -1; /* not computed until needed */
+ restrictinfo->eval_cost = -1; /* not computed until needed */
restrictinfo->subclauseindices = NIL;
restrictinfo->mergejoinoperator = InvalidOid;
restrictinfo->left_sortop = InvalidOid;
restrictinfo->right_sortop = InvalidOid;
- restrictinfo->left_pathkey = NIL; /* not computable yet */
+ restrictinfo->left_pathkey = NIL; /* not computable yet */
restrictinfo->right_pathkey = NIL;
restrictinfo->hashjoinoperator = InvalidOid;
restrictinfo->left_dispersion = -1; /* not computed until needed */
@@ -358,7 +360,7 @@ distribute_qual_to_rels(Query *root, Node *clause,
* Cross-check: clause should contain no relids not within its scope.
* Otherwise the parser messed up.
*/
- if (! is_subseti(relids, qualscope))
+ if (!is_subseti(relids, qualscope))
elog(ERROR, "JOIN qualification may not refer to other relations");
/*
@@ -377,14 +379,14 @@ distribute_qual_to_rels(Query *root, Node *clause,
* This ensures that the clause will be evaluated exactly at the level
* of joining corresponding to the outer join.
*
- * For a non-outer-join qual, we can evaluate the qual as soon as
- * (1) we have all the rels it mentions, and (2) we are at or above any
- * outer joins that can null any of these rels and are below the syntactic
- * location of the given qual. To enforce the latter, scan the base rels
- * listed in relids, and merge their outer-join lists into the clause's
- * own reference list. At the time we are called, the outerjoinset list
- * of each baserel will show exactly those outer joins that are below the
- * qual in the join tree.
+ * For a non-outer-join qual, we can evaluate the qual as soon as (1) we
+ * have all the rels it mentions, and (2) we are at or above any outer
+ * joins that can null any of these rels and are below the syntactic
+ * location of the given qual. To enforce the latter, scan the base
+ * rels listed in relids, and merge their outer-join lists into the
+ * clause's own reference list. At the time we are called, the
+ * outerjoinset list of each baserel will show exactly those outer
+ * joins that are below the qual in the join tree.
*/
if (isouterjoin)
{
@@ -396,19 +398,24 @@ distribute_qual_to_rels(Query *root, Node *clause,
Relids newrelids = relids;
List *relid;
- /* We rely on set_unioni to be nondestructive of its input lists... */
+ /*
+ * We rely on set_unioni to be nondestructive of its input
+ * lists...
+ */
can_be_equijoin = true;
foreach(relid, relids)
{
RelOptInfo *rel = get_base_rel(root, lfirsti(relid));
if (rel->outerjoinset &&
- ! is_subseti(rel->outerjoinset, relids))
+ !is_subseti(rel->outerjoinset, relids))
{
newrelids = set_unioni(newrelids, rel->outerjoinset);
+
/*
- * Because application of the qual will be delayed by outer
- * join, we mustn't assume its vars are equal everywhere.
+ * Because application of the qual will be delayed by
+ * outer join, we mustn't assume its vars are equal
+ * everywhere.
*/
can_be_equijoin = false;
}
@@ -419,10 +426,11 @@ distribute_qual_to_rels(Query *root, Node *clause,
}
/*
- * Mark the qual as "pushed down" if it can be applied at a level below
- * its original syntactic level. This allows us to distinguish original
- * JOIN/ON quals from higher-level quals pushed down to the same joinrel.
- * A qual originating from WHERE is always considered "pushed down".
+ * Mark the qual as "pushed down" if it can be applied at a level
+ * below its original syntactic level. This allows us to distinguish
+ * original JOIN/ON quals from higher-level quals pushed down to the
+ * same joinrel. A qual originating from WHERE is always considered
+ * "pushed down".
*/
restrictinfo->ispusheddown = ispusheddown || !sameseti(relids,
qualscope);
@@ -458,10 +466,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
* the relid list. Set additional RestrictInfo fields for
* joining.
*
- * We don't bother setting the merge/hashjoin info if we're not
- * going to need it. We do want to know about mergejoinable ops
- * in any potential equijoin clause (see later in this routine),
- * and we ignore enable_mergejoin if isouterjoin is true, because
+ * We don't bother setting the merge/hashjoin info if we're not going
+ * to need it. We do want to know about mergejoinable ops in any
+ * potential equijoin clause (see later in this routine), and we
+ * ignore enable_mergejoin if isouterjoin is true, because
* mergejoin is the only implementation we have for full and right
* outer joins.
*/
@@ -485,6 +493,7 @@ distribute_qual_to_rels(Query *root, Node *clause,
}
else
{
+
/*
* 'clause' references no rels, and therefore we have no place to
* attach it. Shouldn't get here if callers are working properly.
@@ -493,12 +502,12 @@ distribute_qual_to_rels(Query *root, Node *clause,
}
/*
- * If the clause has a mergejoinable operator, and is not an outer-join
- * qualification nor bubbled up due to an outer join, then the two sides
- * represent equivalent PathKeyItems for path keys: any path that is
- * sorted by one side will also be sorted by the other (as soon as the
- * two rels are joined, that is). Record the key equivalence for future
- * use.
+ * If the clause has a mergejoinable operator, and is not an
+ * outer-join qualification nor bubbled up due to an outer join, then
+ * the two sides represent equivalent PathKeyItems for path keys: any
+ * path that is sorted by one side will also be sorted by the other
+ * (as soon as the two rels are joined, that is). Record the key
+ * equivalence for future use.
*/
if (can_be_equijoin && restrictinfo->mergejoinoperator != InvalidOid)
add_equijoined_keys(root, restrictinfo);
@@ -569,15 +578,16 @@ process_implied_equality(Query *root, Node *item1, Node *item2,
Expr *clause;
/*
- * Currently, since check_mergejoinable only accepts Var = Var clauses,
- * we should only see Var nodes here. Would have to work a little
- * harder to locate the right rel(s) if more-general mergejoin clauses
- * were accepted.
+ * Currently, since check_mergejoinable only accepts Var = Var
+ * clauses, we should only see Var nodes here. Would have to work a
+ * little harder to locate the right rel(s) if more-general mergejoin
+ * clauses were accepted.
*/
Assert(IsA(item1, Var));
irel1 = ((Var *) item1)->varno;
Assert(IsA(item2, Var));
irel2 = ((Var *) item2)->varno;
+
/*
* If both vars belong to same rel, we need to look at that rel's
* baserestrictinfo list. If different rels, each will have a
@@ -593,6 +603,7 @@ process_implied_equality(Query *root, Node *item1, Node *item2,
restrictlist = joininfo->jinfo_restrictinfo;
}
+
/*
* Scan to see if equality is already known.
*/
@@ -611,6 +622,7 @@ process_implied_equality(Query *root, Node *item1, Node *item2,
(equal(item2, left) && equal(item1, right)))
return; /* found a matching clause */
}
+
/*
* This equality is new information, so construct a clause
* representing it to add to the query data structures.
@@ -620,6 +632,7 @@ process_implied_equality(Query *root, Node *item1, Node *item2,
eq_operator = compatible_oper("=", ltype, rtype, true);
if (!HeapTupleIsValid(eq_operator))
{
+
/*
* Would it be safe to just not add the equality to the query if
* we have no suitable equality operator for the combination of
@@ -629,6 +642,7 @@ process_implied_equality(Query *root, Node *item1, Node *item2,
typeidTypeName(ltype), typeidTypeName(rtype));
}
pgopform = (Form_pg_operator) GETSTRUCT(eq_operator);
+
/*
* Let's just make sure this appears to be a compatible operator.
*/
@@ -641,21 +655,21 @@ process_implied_equality(Query *root, Node *item1, Node *item2,
clause = makeNode(Expr);
clause->typeOid = BOOLOID;
clause->opType = OP_EXPR;
- clause->oper = (Node *) makeOper(oprid(eq_operator), /* opno */
- InvalidOid, /* opid */
- BOOLOID); /* operator result type */
+ clause->oper = (Node *) makeOper(oprid(eq_operator), /* opno */
+ InvalidOid, /* opid */
+ BOOLOID); /* operator result type */
clause->args = makeList2(item1, item2);
ReleaseSysCache(eq_operator);
/*
* Note: we mark the qual "pushed down" to ensure that it can never be
- * taken for an original JOIN/ON clause. We also claim it is an outer-
- * join clause, which it isn't, but that keeps distribute_qual_to_rels
- * from examining the outerjoinsets of the relevant rels (which are no
- * longer of interest, but could keep the qual from being pushed down
- * to where it should be). It'll also save a useless call to
- * add_equijoined keys...
+ * taken for an original JOIN/ON clause. We also claim it is an
+ * outer- join clause, which it isn't, but that keeps
+ * distribute_qual_to_rels from examining the outerjoinsets of the
+ * relevant rels (which are no longer of interest, but could keep the
+ * qual from being pushed down to where it should be). It'll also
+ * save a useless call to add_equijoined keys...
*/
distribute_qual_to_rels(root, (Node *) clause,
true, true,
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index d196e755d66..b2b362e84a5 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.63 2001/01/24 19:42:59 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.64 2001/03/22 03:59:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@
static Plan *subplanner(Query *root, List *flat_tlist,
- double tuple_fraction);
+ double tuple_fraction);
/*--------------------
@@ -82,7 +82,7 @@ query_planner(Query *root,
/*
* If the query has an empty join tree, then it's something easy like
- * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
+ * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly.
*/
if (root->jointree->fromlist == NIL)
{
@@ -213,9 +213,9 @@ subplanner(Query *root,
foreach(brel, root->base_rel_list)
{
RelOptInfo *baserel = (RelOptInfo *) lfirst(brel);
- int relid = lfirsti(baserel->relids);
+ int relid = lfirsti(baserel->relids);
- if (! ptrMember(baserel, joined_rels))
+ if (!ptrMember(baserel, joined_rels))
elog(ERROR, "Internal error: no jointree entry for rel %s (%d)",
rt_fetch(relid, root->rtable)->eref->relname, relid);
}
@@ -334,9 +334,9 @@ subplanner(Query *root,
/*
* Nothing for it but to sort the cheapest-total-cost path --- but we
* let the caller do that. grouping_planner has to be able to add a
- * sort node anyway, so no need for extra code here. (Furthermore, the
- * given pathkeys might involve something we can't compute here, such
- * as an aggregate function...)
+ * sort node anyway, so no need for extra code here. (Furthermore,
+ * the given pathkeys might involve something we can't compute here,
+ * such as an aggregate function...)
*/
root->query_pathkeys = cheapestpath->pathkeys;
resultplan = create_plan(root, cheapestpath);
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 9faf6b95e80..c5bd439587e 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.101 2001/01/27 04:42:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.102 2001/03/22 03:59:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,9 +33,9 @@
/* Expression kind codes for preprocess_expression */
-#define EXPRKIND_TARGET 0
+#define EXPRKIND_TARGET 0
#define EXPRKIND_WHERE 1
-#define EXPRKIND_HAVING 2
+#define EXPRKIND_HAVING 2
static Node *pull_up_subqueries(Query *parse, Node *jtnode);
@@ -68,16 +68,16 @@ planner(Query *parse)
/*
* The planner can be called recursively (an example is when
- * eval_const_expressions tries to pre-evaluate an SQL function).
- * So, these global state variables must be saved and restored.
+ * eval_const_expressions tries to pre-evaluate an SQL function). So,
+ * these global state variables must be saved and restored.
*
- * These vars cannot be moved into the Query structure since their
- * whole purpose is communication across multiple sub-Queries.
+ * These vars cannot be moved into the Query structure since their whole
+ * purpose is communication across multiple sub-Queries.
*
* Note we do NOT save and restore PlannerPlanId: it exists to assign
- * unique IDs to SubPlan nodes, and we want those IDs to be unique
- * for the life of a backend. Also, PlannerInitPlan is saved/restored
- * in subquery_planner, not here.
+ * unique IDs to SubPlan nodes, and we want those IDs to be unique for
+ * the life of a backend. Also, PlannerInitPlan is saved/restored in
+ * subquery_planner, not here.
*/
save_PlannerQueryLevel = PlannerQueryLevel;
save_PlannerParamVar = PlannerParamVar;
@@ -150,6 +150,7 @@ subquery_planner(Query *parse, double tuple_fraction)
*/
parse->jointree = (FromExpr *)
pull_up_subqueries(parse, (Node *) parse->jointree);
+
/*
* If so, we may have created opportunities to simplify the jointree.
*/
@@ -170,26 +171,26 @@ subquery_planner(Query *parse, double tuple_fraction)
/*
* A HAVING clause without aggregates is equivalent to a WHERE clause
- * (except it can only refer to grouped fields). Transfer any agg-free
- * clauses of the HAVING qual into WHERE. This may seem like wasting
- * cycles to cater to stupidly-written queries, but there are other
- * reasons for doing it. Firstly, if the query contains no aggs at all,
- * then we aren't going to generate an Agg plan node, and so there'll be
- * no place to execute HAVING conditions; without this transfer, we'd
- * lose the HAVING condition entirely, which is wrong. Secondly, when
- * we push down a qual condition into a sub-query, it's easiest to push
- * the qual into HAVING always, in case it contains aggs, and then let
- * this code sort it out.
+ * (except it can only refer to grouped fields). Transfer any
+ * agg-free clauses of the HAVING qual into WHERE. This may seem like
+ * wasting cycles to cater to stupidly-written queries, but there are
+ * other reasons for doing it. Firstly, if the query contains no aggs
+ * at all, then we aren't going to generate an Agg plan node, and so
+ * there'll be no place to execute HAVING conditions; without this
+ * transfer, we'd lose the HAVING condition entirely, which is wrong.
+ * Secondly, when we push down a qual condition into a sub-query, it's
+ * easiest to push the qual into HAVING always, in case it contains
+ * aggs, and then let this code sort it out.
*
* Note that both havingQual and parse->jointree->quals are in
* implicitly-ANDed-list form at this point, even though they are
- * declared as Node *. Also note that contain_agg_clause does not
+ * declared as Node *. Also note that contain_agg_clause does not
* recurse into sub-selects, which is exactly what we need here.
*/
newHaving = NIL;
foreach(lst, (List *) parse->havingQual)
{
- Node *havingclause = (Node *) lfirst(lst);
+ Node *havingclause = (Node *) lfirst(lst);
if (contain_agg_clause(havingclause))
newHaving = lappend(newHaving, havingclause);
@@ -201,30 +202,32 @@ subquery_planner(Query *parse, double tuple_fraction)
/*
* Do the main planning. If we have an inherited target relation,
- * that needs special processing, else go straight to grouping_planner.
+ * that needs special processing, else go straight to
+ * grouping_planner.
*/
if (parse->resultRelation &&
- (lst = expand_inherted_rtentry(parse, parse->resultRelation)) != NIL)
+ (lst = expand_inherted_rtentry(parse, parse->resultRelation)) != NIL)
plan = inheritance_planner(parse, lst);
else
plan = grouping_planner(parse, tuple_fraction);
/*
- * If any subplans were generated, or if we're inside a subplan,
- * build subPlan, extParam and locParam lists for plan nodes.
+ * If any subplans were generated, or if we're inside a subplan, build
+ * subPlan, extParam and locParam lists for plan nodes.
*/
if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1)
{
(void) SS_finalize_plan(plan);
+
/*
- * At the moment, SS_finalize_plan doesn't handle initPlans
- * and so we assign them to the topmost plan node.
+ * At the moment, SS_finalize_plan doesn't handle initPlans and so
+ * we assign them to the topmost plan node.
*/
plan->initPlan = PlannerInitPlan;
/* Must add the initPlans' extParams to the topmost node's, too */
foreach(lst, plan->initPlan)
{
- SubPlan *subplan = (SubPlan *) lfirst(lst);
+ SubPlan *subplan = (SubPlan *) lfirst(lst);
plan->extParam = set_unioni(plan->extParam,
subplan->plan->extParam);
@@ -266,44 +269,47 @@ pull_up_subqueries(Query *parse, Node *jtnode)
Query *subquery = rte->subquery;
/*
- * Is this a subquery RTE, and if so, is the subquery simple enough
- * to pull up? (If not, do nothing at this node.)
+ * Is this a subquery RTE, and if so, is the subquery simple
+ * enough to pull up? (If not, do nothing at this node.)
*/
if (subquery && is_simple_subquery(subquery))
{
- int rtoffset;
- Node *subjointree;
- List *subtlist;
- List *l;
+ int rtoffset;
+ Node *subjointree;
+ List *subtlist;
+ List *l;
/*
- * First, recursively pull up the subquery's subqueries,
- * so that this routine's processing is complete for its
- * jointree and rangetable. NB: if the same subquery is
- * referenced from multiple jointree items (which can't happen
- * normally, but might after rule rewriting), then we will invoke
- * this processing multiple times on that subquery. OK because
+ * First, recursively pull up the subquery's subqueries, so
+ * that this routine's processing is complete for its jointree
+ * and rangetable. NB: if the same subquery is referenced
+ * from multiple jointree items (which can't happen normally,
+ * but might after rule rewriting), then we will invoke this
+ * processing multiple times on that subquery. OK because
* nothing will happen after the first time. We do have to be
* careful to copy everything we pull up, however, or risk
* having chunks of structure multiply linked.
*/
subquery->jointree = (FromExpr *)
pull_up_subqueries(subquery, (Node *) subquery->jointree);
+
/*
- * Append the subquery's rangetable to mine (currently,
- * no adjustments will be needed in the subquery's rtable).
+ * Append the subquery's rangetable to mine (currently, no
+ * adjustments will be needed in the subquery's rtable).
*/
rtoffset = length(parse->rtable);
parse->rtable = nconc(parse->rtable,
copyObject(subquery->rtable));
+
/*
- * Make copies of the subquery's jointree and targetlist
- * with varnos adjusted to match the merged rangetable.
+ * Make copies of the subquery's jointree and targetlist with
+ * varnos adjusted to match the merged rangetable.
*/
subjointree = copyObject(subquery->jointree);
OffsetVarNodes(subjointree, rtoffset, 0);
subtlist = copyObject(subquery->targetList);
OffsetVarNodes((Node *) subtlist, rtoffset, 0);
+
/*
* Replace all of the top query's references to the subquery's
* outputs with copies of the adjusted subtlist items, being
@@ -316,16 +322,18 @@ pull_up_subqueries(Query *parse, Node *jtnode)
parse->havingQual =
ResolveNew(parse->havingQual,
varno, 0, subtlist, CMD_SELECT, 0);
+
/*
* Pull up any FOR UPDATE markers, too.
*/
foreach(l, subquery->rowMarks)
{
- int submark = lfirsti(l);
+ int submark = lfirsti(l);
parse->rowMarks = lappendi(parse->rowMarks,
submark + rtoffset);
}
+
/*
* Miscellaneous housekeeping.
*/
@@ -345,9 +353,7 @@ pull_up_subqueries(Query *parse, Node *jtnode)
List *l;
foreach(l, f->fromlist)
- {
lfirst(l) = pull_up_subqueries(parse, lfirst(l));
- }
}
else if (IsA(jtnode, JoinExpr))
{
@@ -370,6 +376,7 @@ pull_up_subqueries(Query *parse, Node *jtnode)
static bool
is_simple_subquery(Query *subquery)
{
+
/*
* Let's just make sure it's a valid subselect ...
*/
@@ -379,12 +386,14 @@ is_simple_subquery(Query *subquery)
subquery->into != NULL ||
subquery->isPortal)
elog(ERROR, "is_simple_subquery: subquery is bogus");
+
/*
- * Can't currently pull up a query with setops.
- * Maybe after querytree redesign...
+ * Can't currently pull up a query with setops. Maybe after querytree
+ * redesign...
*/
if (subquery->setOperations)
return false;
+
/*
* Can't pull up a subquery involving grouping, aggregation, sorting,
* or limiting.
@@ -397,12 +406,13 @@ is_simple_subquery(Query *subquery)
subquery->limitOffset ||
subquery->limitCount)
return false;
+
/*
* Hack: don't try to pull up a subquery with an empty jointree.
* query_planner() will correctly generate a Result plan for a
* jointree that's totally empty, but I don't think the right things
- * happen if an empty FromExpr appears lower down in a jointree.
- * Not worth working hard on this, just to collapse SubqueryScan/Result
+ * happen if an empty FromExpr appears lower down in a jointree. Not
+ * worth working hard on this, just to collapse SubqueryScan/Result
* into Result...
*/
if (subquery->jointree->fromlist == NIL)
@@ -443,7 +453,9 @@ resolvenew_in_jointree(Node *jtnode, int varno, List *subtlist)
resolvenew_in_jointree(j->rarg, varno, subtlist);
j->quals = ResolveNew(j->quals,
varno, 0, subtlist, CMD_SELECT, 0);
- /* We don't bother to update the colvars list, since it won't be
+
+ /*
+ * We don't bother to update the colvars list, since it won't be
* used again ...
*/
}
@@ -458,13 +470,13 @@ resolvenew_in_jointree(Node *jtnode, int varno, List *subtlist)
*
* If we succeed in pulling up a subquery then we might form a jointree
* in which a FromExpr is a direct child of another FromExpr. In that
- * case we can consider collapsing the two FromExprs into one. This is
+ * case we can consider collapsing the two FromExprs into one. This is
* an optional conversion, since the planner will work correctly either
* way. But we may find a better plan (at the cost of more planning time)
* if we merge the two nodes.
*
* NOTE: don't try to do this in the same jointree scan that does subquery
- * pullup! Since we're changing the jointree structure here, that wouldn't
+ * pullup! Since we're changing the jointree structure here, that wouldn't
* work reliably --- see comments for pull_up_subqueries().
*/
static Node *
@@ -484,27 +496,29 @@ preprocess_jointree(Query *parse, Node *jtnode)
foreach(l, f->fromlist)
{
- Node *child = (Node *) lfirst(l);
+ Node *child = (Node *) lfirst(l);
/* Recursively simplify the child... */
child = preprocess_jointree(parse, child);
/* Now, is it a FromExpr? */
if (child && IsA(child, FromExpr))
{
+
/*
- * Yes, so do we want to merge it into parent? Always do so
- * if child has just one element (since that doesn't make the
- * parent's list any longer). Otherwise we have to be careful
- * about the increase in planning time caused by combining the
- * two join search spaces into one. Our heuristic is to merge
- * if the merge will produce a join list no longer than
- * GEQO_RELS/2. (Perhaps need an additional user parameter?)
+ * Yes, so do we want to merge it into parent? Always do
+ * so if child has just one element (since that doesn't
+ * make the parent's list any longer). Otherwise we have
+ * to be careful about the increase in planning time
+ * caused by combining the two join search spaces into
+ * one. Our heuristic is to merge if the merge will
+ * produce a join list no longer than GEQO_RELS/2.
+ * (Perhaps need an additional user parameter?)
*/
FromExpr *subf = (FromExpr *) child;
int childlen = length(subf->fromlist);
int myothers = length(newlist) + length(lnext(l));
- if (childlen <= 1 || (childlen+myothers) <= geqo_rels/2)
+ if (childlen <= 1 || (childlen + myothers) <= geqo_rels / 2)
{
newlist = nconc(newlist, subf->fromlist);
f->quals = make_and_qual(f->quals, subf->quals);
@@ -540,6 +554,7 @@ preprocess_jointree(Query *parse, Node *jtnode)
static Node *
preprocess_expression(Query *parse, Node *expr, int kind)
{
+
/*
* Simplify constant expressions.
*
@@ -551,8 +566,8 @@ preprocess_expression(Query *parse, Node *expr, int kind)
expr = eval_const_expressions(expr);
/*
- * If it's a qual or havingQual, canonicalize it, and convert it
- * to implicit-AND format.
+ * If it's a qual or havingQual, canonicalize it, and convert it to
+ * implicit-AND format.
*
* XXX Is there any value in re-applying eval_const_expressions after
* canonicalize_qual?
@@ -575,10 +590,11 @@ preprocess_expression(Query *parse, Node *expr, int kind)
if (kind != EXPRKIND_WHERE &&
(parse->groupClause != NIL || parse->hasAggs))
{
+
/*
* Check for ungrouped variables passed to subplans. Note we
- * do NOT do this for subplans in WHERE (or JOIN/ON); it's legal
- * there because WHERE is evaluated pre-GROUP.
+ * do NOT do this for subplans in WHERE (or JOIN/ON); it's
+ * legal there because WHERE is evaluated pre-GROUP.
*/
check_subplans_for_ungrouped_vars(expr, parse);
}
@@ -635,12 +651,12 @@ preprocess_qual_conditions(Query *parse, Node *jtnode)
* inheritance set.
*
* We have to handle this case differently from cases where a source
- * relation is an inheritance set. Source inheritance is expanded at
+ * relation is an inheritance set. Source inheritance is expanded at
* the bottom of the plan tree (see allpaths.c), but target inheritance
* has to be expanded at the top. The reason is that for UPDATE, each
* target relation needs a different targetlist matching its own column
* set. (This is not so critical for DELETE, but for simplicity we treat
- * inherited DELETE the same way.) Fortunately, the UPDATE/DELETE target
+ * inherited DELETE the same way.) Fortunately, the UPDATE/DELETE target
* can never be the nullable side of an outer join, so it's OK to generate
* the plan this way.
*
@@ -661,17 +677,17 @@ inheritance_planner(Query *parse, List *inheritlist)
foreach(l, inheritlist)
{
- int childRTindex = lfirsti(l);
- Oid childOID = getrelid(childRTindex, parse->rtable);
- Query *subquery;
- Plan *subplan;
+ int childRTindex = lfirsti(l);
+ Oid childOID = getrelid(childRTindex, parse->rtable);
+ Query *subquery;
+ Plan *subplan;
/* Generate modified query with this rel as target */
subquery = (Query *) adjust_inherited_attrs((Node *) parse,
- parentRTindex, parentOID,
- childRTindex, childOID);
+ parentRTindex, parentOID,
+ childRTindex, childOID);
/* Generate plan */
- subplan = grouping_planner(subquery, 0.0 /* retrieve all tuples */);
+ subplan = grouping_planner(subquery, 0.0 /* retrieve all tuples */ );
subplans = lappend(subplans, subplan);
/* Save preprocessed tlist from first rel for use in Append */
if (tlist == NIL)
@@ -718,6 +734,7 @@ grouping_planner(Query *parse, double tuple_fraction)
if (parse->setOperations)
{
+
/*
* Construct the plan for set operations. The result will not
* need any work except perhaps a top-level sort and/or LIMIT.
@@ -736,17 +753,17 @@ grouping_planner(Query *parse, double tuple_fraction)
tlist = postprocess_setop_tlist(result_plan->targetlist, tlist);
/*
- * Can't handle FOR UPDATE here (parser should have checked already,
- * but let's make sure).
+ * Can't handle FOR UPDATE here (parser should have checked
+ * already, but let's make sure).
*/
if (parse->rowMarks)
elog(ERROR, "SELECT FOR UPDATE is not allowed with UNION/INTERSECT/EXCEPT");
/*
* We set current_pathkeys NIL indicating we do not know sort
- * order. This is correct when the top set operation is UNION ALL,
- * since the appended-together results are unsorted even if the
- * subplans were sorted. For other set operations we could be
+ * order. This is correct when the top set operation is UNION
+ * ALL, since the appended-together results are unsorted even if
+ * the subplans were sorted. For other set operations we could be
* smarter --- room for future improvement!
*/
current_pathkeys = NIL;
@@ -772,22 +789,26 @@ grouping_planner(Query *parse, double tuple_fraction)
/*
* Add TID targets for rels selected FOR UPDATE (should this be
- * done in preprocess_targetlist?). The executor uses the TID
- * to know which rows to lock, much as for UPDATE or DELETE.
+ * done in preprocess_targetlist?). The executor uses the TID to
+ * know which rows to lock, much as for UPDATE or DELETE.
*/
if (parse->rowMarks)
{
List *l;
/*
- * We've got trouble if the FOR UPDATE appears inside grouping,
- * since grouping renders a reference to individual tuple CTIDs
- * invalid. This is also checked at parse time, but that's
- * insufficient because of rule substitution, query pullup, etc.
+ * We've got trouble if the FOR UPDATE appears inside
+ * grouping, since grouping renders a reference to individual
+ * tuple CTIDs invalid. This is also checked at parse time,
+ * but that's insufficient because of rule substitution, query
+ * pullup, etc.
*/
CheckSelectForUpdate(parse);
- /* Currently the executor only supports FOR UPDATE at top level */
+ /*
+ * Currently the executor only supports FOR UPDATE at top
+ * level
+ */
if (PlannerQueryLevel > 1)
elog(ERROR, "SELECT FOR UPDATE is not allowed in subselects");
@@ -873,9 +894,9 @@ grouping_planner(Query *parse, double tuple_fraction)
int32 count = DatumGetInt32(limitc->constvalue);
/*
- * A NULL-constant LIMIT represents "LIMIT ALL",
- * which we treat the same as no limit (ie,
- * expect to retrieve all the tuples).
+ * A NULL-constant LIMIT represents "LIMIT ALL", which
+ * we treat the same as no limit (ie, expect to
+ * retrieve all the tuples).
*/
if (!limitc->constisnull && count > 0)
{
@@ -902,17 +923,19 @@ grouping_planner(Query *parse, double tuple_fraction)
}
else
{
+
/*
- * COUNT is an expression ... don't know exactly what the
- * limit will be, but for lack of a better idea assume
- * 10% of the plan's result is wanted.
+ * COUNT is an expression ... don't know exactly what
+ * the limit will be, but for lack of a better idea
+ * assume 10% of the plan's result is wanted.
*/
tuple_fraction = 0.10;
}
}
/*
- * If no LIMIT, check for retrieve-into-portal, ie DECLARE CURSOR.
+ * If no LIMIT, check for retrieve-into-portal, ie DECLARE
+ * CURSOR.
*
* We have no real idea how many tuples the user will ultimately
* FETCH from a cursor, but it seems a good bet that he
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 1f2ae69561b..e7f8361b9a7 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/setrefs.c,v 1.70 2001/01/24 19:42:59 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/setrefs.c,v 1.71 2001/03/22 03:59:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,16 +97,17 @@ set_plan_references(Plan *plan)
fix_expr_references(plan,
(Node *) ((IndexScan *) plan)->indxqual);
fix_expr_references(plan,
- (Node *) ((IndexScan *) plan)->indxqualorig);
+ (Node *) ((IndexScan *) plan)->indxqualorig);
break;
case T_TidScan:
fix_expr_references(plan, (Node *) plan->targetlist);
fix_expr_references(plan, (Node *) plan->qual);
break;
case T_SubqueryScan:
+
/*
- * We do not do set_uppernode_references() here, because
- * a SubqueryScan will always have been created with correct
+ * We do not do set_uppernode_references() here, because a
+ * SubqueryScan will always have been created with correct
* references to its subplan's outputs to begin with.
*/
fix_expr_references(plan, (Node *) plan->targetlist);
@@ -126,7 +127,7 @@ set_plan_references(Plan *plan)
fix_expr_references(plan, (Node *) plan->qual);
fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual);
fix_expr_references(plan,
- (Node *) ((MergeJoin *) plan)->mergeclauses);
+ (Node *) ((MergeJoin *) plan)->mergeclauses);
break;
case T_HashJoin:
set_join_references((Join *) plan);
@@ -134,7 +135,7 @@ set_plan_references(Plan *plan)
fix_expr_references(plan, (Node *) plan->qual);
fix_expr_references(plan, (Node *) ((Join *) plan)->joinqual);
fix_expr_references(plan,
- (Node *) ((HashJoin *) plan)->hashclauses);
+ (Node *) ((HashJoin *) plan)->hashclauses);
break;
case T_Material:
case T_Sort:
@@ -148,10 +149,10 @@ set_plan_references(Plan *plan)
* targetlists or quals (because they just return their
* unmodified input tuples). The optimizer is lazy about
* creating really valid targetlists for them. Best to just
- * leave the targetlist alone. In particular, we do not want
+ * leave the targetlist alone. In particular, we do not want
* to pull a subplan list for them, since we will likely end
- * up with duplicate list entries for subplans that also appear
- * in lower levels of the plan tree!
+ * up with duplicate list entries for subplans that also
+ * appear in lower levels of the plan tree!
*/
break;
case T_Agg:
@@ -175,11 +176,12 @@ set_plan_references(Plan *plan)
fix_expr_references(plan, ((Result *) plan)->resconstantqual);
break;
case T_Append:
+
/*
* Append, like Sort et al, doesn't actually evaluate its
- * targetlist or quals, and we haven't bothered to give it
- * its own tlist copy. So, don't fix targetlist/qual.
- * But do recurse into subplans.
+ * targetlist or quals, and we haven't bothered to give it its
+ * own tlist copy. So, don't fix targetlist/qual. But do
+ * recurse into subplans.
*/
foreach(pl, ((Append *) plan)->appendplans)
set_plan_references((Plan *) lfirst(pl));
@@ -296,7 +298,7 @@ set_uppernode_references(Plan *plan, Index subvarno)
subplanTargetList = NIL;
outputTargetList = NIL;
- foreach (l, plan->targetlist)
+ foreach(l, plan->targetlist)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
TargetEntry *subplantle;
@@ -306,8 +308,8 @@ set_uppernode_references(Plan *plan, Index subvarno)
if (subplantle)
{
/* Found a matching subplan output expression */
- Resdom *resdom = subplantle->resdom;
- Var *newvar;
+ Resdom *resdom = subplantle->resdom;
+ Var *newvar;
newvar = makeVar(subvarno,
resdom->resno,
@@ -317,7 +319,7 @@ set_uppernode_references(Plan *plan, Index subvarno)
/* If we're just copying a simple Var, copy up original info */
if (subplantle->expr && IsA(subplantle->expr, Var))
{
- Var *subvar = (Var *) subplantle->expr;
+ Var *subvar = (Var *) subplantle->expr;
newvar->varnoold = subvar->varnoold;
newvar->varoattno = subvar->varoattno;
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 0a7d4926b76..9d47b60430a 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/subselect.c,v 1.48 2001/01/24 19:42:59 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/subselect.c,v 1.49 2001/03/22 03:59:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -82,12 +82,12 @@ replace_var(Var *var)
/*
* If there's already a PlannerParamVar entry for this same Var, just
- * use it. NOTE: in sufficiently complex querytrees, it is
- * possible for the same varno/varlevel to refer to different RTEs in
- * different parts of the parsetree, so that different fields might
- * end up sharing the same Param number. As long as we check the
- * vartype as well, I believe that this sort of aliasing will cause no
- * trouble. The correct field should get stored into the Param slot at
+ * use it. NOTE: in sufficiently complex querytrees, it is possible
+ * for the same varno/varlevel to refer to different RTEs in different
+ * parts of the parsetree, so that different fields might end up
+ * sharing the same Param number. As long as we check the vartype as
+ * well, I believe that this sort of aliasing will cause no trouble.
+ * The correct field should get stored into the Param slot at
* execution in each part of the tree.
*/
i = 0;
@@ -142,10 +142,10 @@ make_subplan(SubLink *slink)
elog(ERROR, "make_subplan: invalid expression structure (subquery already processed?)");
/*
- * Copy the source Query node. This is a quick and dirty kluge to resolve
- * the fact that the parser can generate trees with multiple links to the
- * same sub-Query node, but the planner wants to scribble on the Query.
- * Try to clean this up when we do querytree redesign...
+ * Copy the source Query node. This is a quick and dirty kluge to
+ * resolve the fact that the parser can generate trees with multiple
+ * links to the same sub-Query node, but the planner wants to scribble
+ * on the Query. Try to clean this up when we do querytree redesign...
*/
subquery = (Query *) copyObject(subquery);
@@ -183,7 +183,8 @@ make_subplan(SubLink *slink)
*/
node->plan = plan = subquery_planner(subquery, tuple_fraction);
- node->plan_id = PlannerPlanId++; /* Assign unique ID to this SubPlan */
+ node->plan_id = PlannerPlanId++; /* Assign unique ID to this
+ * SubPlan */
node->rtable = subquery->rtable;
node->sublink = slink;
@@ -191,8 +192,8 @@ make_subplan(SubLink *slink)
slink->subselect = NULL; /* cool ?! see error check above! */
/*
- * Make parParam list of params that current query level will pass
- * to this child plan.
+ * Make parParam list of params that current query level will pass to
+ * this child plan.
*/
foreach(lst, plan->extParam)
{
@@ -275,7 +276,7 @@ make_subplan(SubLink *slink)
tup = SearchSysCache(OPEROID,
ObjectIdGetDatum(oper->opno),
0, 0, 0);
- if (! HeapTupleIsValid(tup))
+ if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for operator %u", oper->opno);
opform = (Form_pg_operator) GETSTRUCT(tup);
@@ -413,7 +414,7 @@ make_subplan(SubLink *slink)
tup = SearchSysCache(OPEROID,
ObjectIdGetDatum(oper->opno),
0, 0, 0);
- if (! HeapTupleIsValid(tup))
+ if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for operator %u", oper->opno);
opform = (Form_pg_operator) GETSTRUCT(tup);
@@ -614,15 +615,16 @@ SS_finalize_plan(Plan *plan)
break;
case T_SubqueryScan:
+
/*
- * In a SubqueryScan, SS_finalize_plan has already been run
- * on the subplan by the inner invocation of subquery_planner,
- * so there's no need to do it again. Instead, just pull out
- * the subplan's extParams list, which represents the params
- * it needs from my level and higher levels.
+ * In a SubqueryScan, SS_finalize_plan has already been run on
+ * the subplan by the inner invocation of subquery_planner, so
+ * there's no need to do it again. Instead, just pull out the
+ * subplan's extParams list, which represents the params it
+ * needs from my level and higher levels.
*/
results.paramids = set_unioni(results.paramids,
- ((SubqueryScan *) plan)->subplan->extParam);
+ ((SubqueryScan *) plan)->subplan->extParam);
break;
case T_IndexScan:
diff --git a/src/backend/optimizer/prep/prepkeyset.c b/src/backend/optimizer/prep/prepkeyset.c
index 64d1d818108..180637d099e 100644
--- a/src/backend/optimizer/prep/prepkeyset.c
+++ b/src/backend/optimizer/prep/prepkeyset.c
@@ -104,7 +104,8 @@ transformKeySetQuery(Query *origNode)
Node_Copy(origNode, unionNode, distinctClause);
Node_Copy(origNode, unionNode, sortClause);
Node_Copy(origNode, unionNode, rtable);
- origNode->jointree->quals = NULL; /* avoid unnecessary copying */
+ origNode->jointree->quals = NULL; /* avoid unnecessary
+ * copying */
Node_Copy(origNode, unionNode, jointree);
Node_Copy(origNode, unionNode, targetList);
@@ -216,4 +217,4 @@ inspectOpNode(Expr *expr)
return (firstExpr && secondExpr && nodeTag(firstExpr) == T_Var && nodeTag(secondExpr) == T_Const);
}
-#endif /* ENABLE_KEY_SET_QUERY */
+#endif /* ENABLE_KEY_SET_QUERY */
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
index 6fa9c4db29e..2dc92deca17 100644
--- a/src/backend/optimizer/prep/prepqual.c
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepqual.c,v 1.28 2001/01/24 19:42:59 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepqual.c,v 1.29 2001/03/22 03:59:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -260,6 +260,7 @@ dnfify(Expr *qual)
return newqual;
}
+
#endif
/*--------------------
@@ -663,11 +664,11 @@ or_normalize(List *orlist)
* We are going to insert the orlist into multiple places in the
* result expression. For most expression types, it'd be OK to
* just have multiple links to the same subtree, but this fails
- * badly for SubLinks (and perhaps other cases?). For safety,
- * we make a distinct copy for each place the orlist is inserted.
+ * badly for SubLinks (and perhaps other cases?). For safety, we
+ * make a distinct copy for each place the orlist is inserted.
*/
if (lnext(temp) == NIL)
- neworlist = orlist; /* can use original tree at the end */
+ neworlist = orlist; /* can use original tree at the end */
else
neworlist = copyObject(orlist);
@@ -791,11 +792,12 @@ and_normalize(List *andlist)
* We are going to insert the andlist into multiple places in the
* result expression. For most expression types, it'd be OK to
* just have multiple links to the same subtree, but this fails
- * badly for SubLinks (and perhaps other cases?). For safety,
- * we make a distinct copy for each place the andlist is inserted.
+ * badly for SubLinks (and perhaps other cases?). For safety, we
+ * make a distinct copy for each place the andlist is inserted.
*/
if (lnext(temp) == NIL)
- newandlist = andlist; /* can use original tree at the end */
+ newandlist = andlist; /* can use original tree at the
+ * end */
else
newandlist = copyObject(andlist);
@@ -957,8 +959,10 @@ count_bool_nodes(Expr *qual,
}
else if (contain_subplans((Node *) qual))
{
- /* charge extra for subexpressions containing sub-SELECTs,
- * to discourage us from rearranging them in a way that might
+
+ /*
+ * charge extra for subexpressions containing sub-SELECTs, to
+ * discourage us from rearranging them in a way that might
* generate N copies of a subselect rather than one. The magic
* constant here interacts with the "4x maximum growth" heuristic
* in canonicalize_qual().
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index f8a5afbbc45..da51a76d3f3 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/preptlist.c,v 1.41 2001/01/24 19:42:59 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/preptlist.c,v 1.42 2001/03/22 03:59:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,8 +33,8 @@
static List *expand_targetlist(List *tlist, int command_type,
Index result_relation, List *range_table);
static TargetEntry *process_matched_tle(TargetEntry *src_tle,
- TargetEntry *prior_tle,
- int attrno);
+ TargetEntry *prior_tle,
+ int attrno);
/*
@@ -49,9 +49,10 @@ preprocess_targetlist(List *tlist,
Index result_relation,
List *range_table)
{
+
/*
- * Sanity check: if there is a result relation, it'd better be a
- * real relation not a subquery. Else parser or rewriter messed up.
+ * Sanity check: if there is a result relation, it'd better be a real
+ * relation not a subquery. Else parser or rewriter messed up.
*/
if (result_relation)
{
@@ -250,7 +251,7 @@ expand_targetlist(List *tlist, int command_type,
new_tle = makeTargetEntry(makeResdom(attrno,
atttype,
atttypmod,
- pstrdup(attrname),
+ pstrdup(attrname),
false),
(Node *) temp_var);
break;
@@ -280,7 +281,7 @@ expand_targetlist(List *tlist, int command_type,
{
Resdom *resdom = old_tle->resdom;
- if (! resdom->resjunk)
+ if (!resdom->resjunk)
elog(ERROR, "Unexpected assignment to attribute \"%s\"",
resdom->resname);
/* Get the resno right, but don't copy unnecessarily */
@@ -314,9 +315,10 @@ expand_targetlist(List *tlist, int command_type,
* Essentially, the expression we want to produce in this case is like
* foo = array_set(array_set(foo, 2, 42), 4, 43)
*/
-static TargetEntry *process_matched_tle(TargetEntry *src_tle,
- TargetEntry *prior_tle,
- int attrno)
+static TargetEntry *
+process_matched_tle(TargetEntry *src_tle,
+ TargetEntry *prior_tle,
+ int attrno)
{
Resdom *resdom = src_tle->resdom;
Node *priorbottom;
@@ -324,11 +326,13 @@ static TargetEntry *process_matched_tle(TargetEntry *src_tle,
if (prior_tle == NULL)
{
+
/*
- * Normal case where this is the first assignment to the attribute.
+ * Normal case where this is the first assignment to the
+ * attribute.
*
- * We can recycle the old TLE+resdom if right resno; else make a
- * new one to avoid modifying the old tlist structure. (Is preserving
+ * We can recycle the old TLE+resdom if right resno; else make a new
+ * one to avoid modifying the old tlist structure. (Is preserving
* old tlist actually necessary? Not sure, be safe.)
*/
if (resdom->resno == attrno)
@@ -339,7 +343,7 @@ static TargetEntry *process_matched_tle(TargetEntry *src_tle,
}
/*
- * Multiple assignments to same attribute. Allow only if all are
+ * Multiple assignments to same attribute. Allow only if all are
* array-assign operators with same bottom array object.
*/
if (src_tle->expr == NULL || !IsA(src_tle->expr, ArrayRef) ||
@@ -350,16 +354,19 @@ static TargetEntry *process_matched_tle(TargetEntry *src_tle,
((ArrayRef *) prior_tle->expr)->refelemtype)
elog(ERROR, "Multiple assignments to same attribute \"%s\"",
resdom->resname);
+
/*
- * Prior TLE could be a nest of ArrayRefs if we do this more than once.
+ * Prior TLE could be a nest of ArrayRefs if we do this more than
+ * once.
*/
priorbottom = ((ArrayRef *) prior_tle->expr)->refexpr;
while (priorbottom != NULL && IsA(priorbottom, ArrayRef) &&
((ArrayRef *) priorbottom)->refassgnexpr != NULL)
priorbottom = ((ArrayRef *) priorbottom)->refexpr;
- if (! equal(priorbottom, ((ArrayRef *) src_tle->expr)->refexpr))
+ if (!equal(priorbottom, ((ArrayRef *) src_tle->expr)->refexpr))
elog(ERROR, "Multiple assignments to same attribute \"%s\"",
resdom->resname);
+
/*
* Looks OK to nest 'em.
*/
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 85058757e5e..968b9ded780 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -5,7 +5,7 @@
* from a time when only UNIONs were implemented.
*
* There is also some code here to support planning of queries that use
- * inheritance (SELECT FROM foo*). This no longer has much connection
+ * inheritance (SELECT FROM foo*). This no longer has much connection
* to the processing of UNION queries, but it's still here.
*
*
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepunion.c,v 1.60 2001/01/24 19:42:59 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepunion.c,v 1.61 2001/03/22 03:59:38 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,22 +50,22 @@ typedef struct
} adjust_inherited_attrs_context;
static Plan *recurse_set_operations(Node *setOp, Query *parse,
- List *colTypes, bool junkOK,
- int flag, List *refnames_tlist);
+ List *colTypes, bool junkOK,
+ int flag, List *refnames_tlist);
static Plan *generate_union_plan(SetOperationStmt *op, Query *parse,
- List *refnames_tlist);
+ List *refnames_tlist);
static Plan *generate_nonunion_plan(SetOperationStmt *op, Query *parse,
- List *refnames_tlist);
+ List *refnames_tlist);
static List *recurse_union_children(Node *setOp, Query *parse,
- SetOperationStmt *top_union,
- List *refnames_tlist);
+ SetOperationStmt *top_union,
+ List *refnames_tlist);
static List *generate_setop_tlist(List *colTypes, int flag,
- bool hack_constants,
- List *input_tlist,
- List *refnames_tlist);
+ bool hack_constants,
+ List *input_tlist,
+ List *refnames_tlist);
static bool tlist_same_datatypes(List *tlist, List *colTypes, bool junkOK);
static Node *adjust_inherited_attrs_mutator(Node *node,
- adjust_inherited_attrs_context *context);
+ adjust_inherited_attrs_context *context);
/*
@@ -99,10 +99,10 @@ plan_set_operations(Query *parse)
Assert(leftmostQuery != NULL);
/*
- * Recurse on setOperations tree to generate plans for set ops.
- * The final output plan should have just the column types shown
- * as the output from the top-level node, plus possibly a resjunk
- * working column (we can rely on upper-level nodes to deal with that).
+ * Recurse on setOperations tree to generate plans for set ops. The
+ * final output plan should have just the column types shown as the
+ * output from the top-level node, plus possibly a resjunk working
+ * column (we can rely on upper-level nodes to deal with that).
*/
return recurse_set_operations((Node *) topop, parse,
topop->colTypes, true, -1,
@@ -127,16 +127,18 @@ recurse_set_operations(Node *setOp, Query *parse,
{
RangeTblRef *rtr = (RangeTblRef *) setOp;
RangeTblEntry *rte = rt_fetch(rtr->rtindex, parse->rtable);
- Query *subquery = rte->subquery;
- Plan *subplan,
- *plan;
+ Query *subquery = rte->subquery;
+ Plan *subplan,
+ *plan;
Assert(subquery != NULL);
+
/*
* Generate plan for primitive subquery
*/
subplan = subquery_planner(subquery,
-1.0 /* default case */ );
+
/*
* Add a SubqueryScan with the caller-requested targetlist
*/
@@ -152,28 +154,30 @@ recurse_set_operations(Node *setOp, Query *parse,
else if (IsA(setOp, SetOperationStmt))
{
SetOperationStmt *op = (SetOperationStmt *) setOp;
- Plan *plan;
+ Plan *plan;
/* UNIONs are much different from INTERSECT/EXCEPT */
if (op->op == SETOP_UNION)
plan = generate_union_plan(op, parse, refnames_tlist);
else
plan = generate_nonunion_plan(op, parse, refnames_tlist);
+
/*
* If necessary, add a Result node to project the caller-requested
* output columns.
*
* XXX you don't really want to know about this: setrefs.c will apply
* replace_vars_with_subplan_refs() to the Result node's tlist.
- * This would fail if the input plan's non-resjunk tlist entries were
- * not all simple Vars equal() to the referencing Vars generated by
- * generate_setop_tlist(). However, since the input plan was
- * generated by generate_union_plan() or generate_nonunion_plan(),
- * the referencing Vars will equal the tlist entries they reference.
- * Ugly but I don't feel like making that code more general right now.
+ * This would fail if the input plan's non-resjunk tlist entries
+ * were not all simple Vars equal() to the referencing Vars
+ * generated by generate_setop_tlist(). However, since the input
+ * plan was generated by generate_union_plan() or
+ * generate_nonunion_plan(), the referencing Vars will equal the
+ * tlist entries they reference. Ugly but I don't feel like making
+ * that code more general right now.
*/
if (flag >= 0 ||
- ! tlist_same_datatypes(plan->targetlist, colTypes, junkOK))
+ !tlist_same_datatypes(plan->targetlist, colTypes, junkOK))
{
plan = (Plan *)
make_result(generate_setop_tlist(colTypes, flag, false,
@@ -199,8 +203,8 @@ static Plan *
generate_union_plan(SetOperationStmt *op, Query *parse,
List *refnames_tlist)
{
- List *planlist;
- Plan *plan;
+ List *planlist;
+ Plan *plan;
/*
* If any of my children are identical UNION nodes (same op, all-flag,
@@ -212,27 +216,29 @@ generate_union_plan(SetOperationStmt *op, Query *parse,
op, refnames_tlist),
recurse_union_children(op->rarg, parse,
op, refnames_tlist));
+
/*
* Append the child results together.
*
- * The tlist for an Append plan isn't important as far as the Append
- * is concerned, but we must make it look real anyway for the benefit
- * of the next plan level up.
+ * The tlist for an Append plan isn't important as far as the Append is
+ * concerned, but we must make it look real anyway for the benefit of
+ * the next plan level up.
*/
plan = (Plan *)
make_append(planlist,
false,
generate_setop_tlist(op->colTypes, -1, false,
- ((Plan *) lfirst(planlist))->targetlist,
- refnames_tlist));
+ ((Plan *) lfirst(planlist))->targetlist,
+ refnames_tlist));
+
/*
- * For UNION ALL, we just need the Append plan. For UNION,
- * need to add Sort and Unique nodes to produce unique output.
+ * For UNION ALL, we just need the Append plan. For UNION, need to
+ * add Sort and Unique nodes to produce unique output.
*/
- if (! op->all)
+ if (!op->all)
{
- List *tlist,
- *sortList;
+ List *tlist,
+ *sortList;
tlist = new_unsorted_tlist(plan->targetlist);
sortList = addAllTargetsToSortList(NIL, tlist);
@@ -249,12 +255,12 @@ static Plan *
generate_nonunion_plan(SetOperationStmt *op, Query *parse,
List *refnames_tlist)
{
- Plan *lplan,
- *rplan,
- *plan;
- List *tlist,
- *sortList;
- SetOpCmd cmd;
+ Plan *lplan,
+ *rplan,
+ *plan;
+ List *tlist,
+ *sortList;
+ SetOpCmd cmd;
/* Recurse on children, ensuring their outputs are marked */
lplan = recurse_set_operations(op->larg, parse,
@@ -263,12 +269,13 @@ generate_nonunion_plan(SetOperationStmt *op, Query *parse,
rplan = recurse_set_operations(op->rarg, parse,
op->colTypes, false, 1,
refnames_tlist);
+
/*
* Append the child results together.
*
- * The tlist for an Append plan isn't important as far as the Append
- * is concerned, but we must make it look real anyway for the benefit
- * of the next plan level up.
+ * The tlist for an Append plan isn't important as far as the Append is
+ * concerned, but we must make it look real anyway for the benefit of
+ * the next plan level up.
*/
plan = (Plan *)
make_append(makeList2(lplan, rplan),
@@ -276,9 +283,10 @@ generate_nonunion_plan(SetOperationStmt *op, Query *parse,
generate_setop_tlist(op->colTypes, 0, false,
lplan->targetlist,
refnames_tlist));
+
/*
- * Sort the child results, then add a SetOp plan node to
- * generate the correct output.
+ * Sort the child results, then add a SetOp plan node to generate the
+ * correct output.
*/
tlist = new_unsorted_tlist(plan->targetlist);
sortList = addAllTargetsToSortList(NIL, tlist);
@@ -293,11 +301,11 @@ generate_nonunion_plan(SetOperationStmt *op, Query *parse,
break;
default:
elog(ERROR, "generate_nonunion_plan: bogus operation code");
- cmd = SETOPCMD_INTERSECT; /* keep compiler quiet */
+ cmd = SETOPCMD_INTERSECT; /* keep compiler quiet */
break;
}
plan = (Plan *) make_setop(cmd, tlist, plan, sortList,
- length(op->colTypes)+1);
+ length(op->colTypes) + 1);
return plan;
}
@@ -322,20 +330,21 @@ recurse_union_children(Node *setOp, Query *parse,
{
/* Same UNION, so fold children into parent's subplan list */
return nconc(recurse_union_children(op->larg, parse,
- top_union, refnames_tlist),
+ top_union, refnames_tlist),
recurse_union_children(op->rarg, parse,
- top_union, refnames_tlist));
+ top_union, refnames_tlist));
}
}
+
/*
* Not same, so plan this child separately.
*
- * Note we disallow any resjunk columns in child results. This
- * is necessary since the Append node that implements the union
- * won't do any projection, and upper levels will get confused if
- * some of our output tuples have junk and some don't. This case
- * only arises when we have an EXCEPT or INTERSECT as child, else
- * there won't be resjunk anyway.
+ * Note we disallow any resjunk columns in child results. This is
+ * necessary since the Append node that implements the union won't do
+ * any projection, and upper levels will get confused if some of our
+ * output tuples have junk and some don't. This case only arises when
+ * we have an EXCEPT or INTERSECT as child, else there won't be
+ * resjunk anyway.
*/
return makeList1(recurse_set_operations(setOp, parse,
top_union->colTypes, false,
@@ -359,7 +368,7 @@ generate_setop_tlist(List *colTypes, int flag,
foreach(i, colTypes)
{
- Oid colType = (Oid) lfirsti(i);
+ Oid colType = (Oid) lfirsti(i);
TargetEntry *inputtle = (TargetEntry *) lfirst(input_tlist);
TargetEntry *reftle = (TargetEntry *) lfirst(refnames_tlist);
@@ -367,18 +376,19 @@ generate_setop_tlist(List *colTypes, int flag,
Assert(reftle->resdom->resno == resno);
Assert(!inputtle->resdom->resjunk);
Assert(!reftle->resdom->resjunk);
+
/*
* Generate columns referencing input columns and having
* appropriate data types and column names. Insert datatype
* coercions where necessary.
*
* HACK: constants in the input's targetlist are copied up as-is
- * rather than being referenced as subquery outputs. This is mainly
- * to ensure that when we try to coerce them to the output column's
- * datatype, the right things happen for UNKNOWN constants. But do
- * this only at the first level of subquery-scan plans; we don't
- * want phony constants appearing in the output tlists of upper-level
- * nodes!
+ * rather than being referenced as subquery outputs. This is
+ * mainly to ensure that when we try to coerce them to the output
+ * column's datatype, the right things happen for UNKNOWN
+ * constants. But do this only at the first level of
+ * subquery-scan plans; we don't want phony constants appearing in
+ * the output tlists of upper-level nodes!
*/
resdom = makeResdom((AttrNumber) resno++,
colType,
@@ -440,7 +450,7 @@ tlist_same_datatypes(List *tlist, List *colTypes, bool junkOK)
if (tle->resdom->resjunk)
{
- if (! junkOK)
+ if (!junkOK)
return false;
}
else
@@ -484,11 +494,11 @@ find_all_inheritors(Oid parentrel)
currentchildren = find_inheritance_children(currentrel);
/*
- * Add to the queue only those children not already seen.
- * This avoids making duplicate entries in case of multiple
- * inheritance paths from the same parent. (It'll also keep
- * us from getting into an infinite loop, though theoretically
- * there can't be any cycles in the inheritance graph anyway.)
+ * Add to the queue only those children not already seen. This
+ * avoids making duplicate entries in case of multiple inheritance
+ * paths from the same parent. (It'll also keep us from getting
+ * into an infinite loop, though theoretically there can't be any
+ * cycles in the inheritance graph anyway.)
*/
currentchildren = set_differencei(currentchildren, examined_relids);
unexamined_relids = set_unioni(unexamined_relids, currentchildren);
@@ -524,20 +534,21 @@ expand_inherted_rtentry(Query *parse, Index rti)
List *l;
/* Does RT entry allow inheritance? */
- if (! rte->inh)
+ if (!rte->inh)
return NIL;
Assert(parentOID != InvalidOid && rte->subquery == NULL);
/* Always clear the parent's inh flag, see above comments */
rte->inh = false;
/* Fast path for common case of childless table */
- if (! has_subclass(parentOID))
+ if (!has_subclass(parentOID))
return NIL;
/* Scan for all members of inheritance set */
inhOIDs = find_all_inheritors(parentOID);
+
/*
- * Check that there's at least one descendant, else treat as
- * no-child case. This could happen despite above has_subclass()
- * check, if table once had a child but no longer does.
+ * Check that there's at least one descendant, else treat as no-child
+ * case. This could happen despite above has_subclass() check, if
+ * table once had a child but no longer does.
*/
if (lnext(inhOIDs) == NIL)
return NIL;
@@ -545,18 +556,19 @@ expand_inherted_rtentry(Query *parse, Index rti)
inhRTIs = makeListi1(rti);
foreach(l, inhOIDs)
{
- Oid childOID = (Oid) lfirsti(l);
+ Oid childOID = (Oid) lfirsti(l);
RangeTblEntry *childrte;
- Index childRTindex;
+ Index childRTindex;
/* parent will be in the list too, so ignore it */
if (childOID == parentOID)
continue;
/*
- * Build an RTE for the child, and attach to query's rangetable list.
- * We copy most fields of the parent's RTE, but replace relation
- * real name and OID. Note that inh will be false at this point.
+ * Build an RTE for the child, and attach to query's rangetable
+ * list. We copy most fields of the parent's RTE, but replace
+ * relation real name and OID. Note that inh will be false at
+ * this point.
*/
childrte = copyObject(rte);
childrte->relname = get_rel_name(childOID);
@@ -575,12 +587,12 @@ expand_inherted_rtentry(Query *parse, Index rti)
* to old_rt_index to refer to new_rt_index.
*
* We also adjust varattno to match the new table by column name, rather
- * than column number. This hack makes it possible for child tables to have
+ * than column number. This hack makes it possible for child tables to have
* different column positions for the "same" attribute as a parent, which
* helps ALTER TABLE ADD COLUMN. Unfortunately this isn't nearly enough to
* make it work transparently; there are other places where things fall down
* if children and parents don't have the same column numbers for inherited
- * attributes. It'd be better to rip this code out and fix ALTER TABLE...
+ * attributes. It'd be better to rip this code out and fix ALTER TABLE...
*/
Node *
adjust_inherited_attrs(Node *node,
@@ -643,12 +655,13 @@ adjust_inherited_attrs_mutator(Node *node,
}
if (IsA(node, RangeTblRef))
{
- RangeTblRef *rtr = (RangeTblRef *) copyObject(node);
+ RangeTblRef *rtr = (RangeTblRef *) copyObject(node);
if (rtr->rtindex == context->old_rt_index)
rtr->rtindex = context->new_rt_index;
return (Node *) rtr;
}
+
/*
* We have to process RestrictInfo nodes specially: we do NOT want to
* copy the original subclauseindices list, since the new rel may have
@@ -656,8 +669,8 @@ adjust_inherited_attrs_mutator(Node *node,
*/
if (IsA(node, RestrictInfo))
{
- RestrictInfo *oldinfo = (RestrictInfo *) node;
- RestrictInfo *newinfo = makeNode(RestrictInfo);
+ RestrictInfo *oldinfo = (RestrictInfo *) node;
+ RestrictInfo *newinfo = makeNode(RestrictInfo);
/* Copy all flat-copiable fields */
memcpy(newinfo, oldinfo, sizeof(RestrictInfo));
@@ -666,18 +679,19 @@ adjust_inherited_attrs_mutator(Node *node,
adjust_inherited_attrs_mutator((Node *) oldinfo->clause, context);
newinfo->subclauseindices = NIL;
- newinfo->eval_cost = -1; /* reset this too */
- newinfo->left_pathkey = NIL; /* and these */
+ newinfo->eval_cost = -1;/* reset this too */
+ newinfo->left_pathkey = NIL; /* and these */
newinfo->right_pathkey = NIL;
newinfo->left_dispersion = -1;
newinfo->right_dispersion = -1;
return (Node *) newinfo;
}
+
/*
* NOTE: we do not need to recurse into sublinks, because they should
* already have been converted to subplans before we see them.
*/
return expression_tree_mutator(node, adjust_inherited_attrs_mutator,
- (void *) context);
+ (void *) context);
}
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index bc05908148b..83d2468b02c 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/clauses.c,v 1.82 2001/03/08 01:49:01 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/clauses.c,v 1.83 2001/03/22 03:59:39 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -544,8 +544,8 @@ check_subplans_for_ungrouped_vars_walker(Node *node,
/*
* If we find an aggregate function, do not recurse into its
- * arguments. Subplans invoked within aggregate calls are allowed
- * to receive ungrouped variables.
+ * arguments. Subplans invoked within aggregate calls are allowed to
+ * receive ungrouped variables.
*/
if (IsA(node, Aggref))
return false;
@@ -630,7 +630,7 @@ check_subplans_for_ungrouped_vars_walker(Node *node,
* Recursively search for noncachable functions within a clause.
*
* Returns true if any noncachable function (or operator implemented by a
- * noncachable function) is found. This test is needed so that we don't
+ * noncachable function) is found. This test is needed so that we don't
* mistakenly think that something like "WHERE random() < 0.5" can be treated
* as a constant qualification.
*
@@ -655,11 +655,11 @@ contain_noncachable_functions_walker(Node *node, void *context)
switch (expr->opType)
{
case OP_EXPR:
- if (! op_iscachable(((Oper *) expr->oper)->opno))
+ if (!op_iscachable(((Oper *) expr->oper)->opno))
return true;
break;
case FUNC_EXPR:
- if (! func_iscachable(((Func *) expr->oper)->funcid))
+ if (!func_iscachable(((Func *) expr->oper)->funcid))
return true;
break;
default:
@@ -680,13 +680,14 @@ contain_noncachable_functions_walker(Node *node, void *context)
* Detect whether a clause is "constant", ie, it contains no variables
* of the current query level and no uses of noncachable functions.
* Such a clause is not necessarily a true constant: it can still contain
- * Params and outer-level Vars. However, its value will be constant over
+ * Params and outer-level Vars. However, its value will be constant over
* any one scan of the current query, so it can be used as an indexscan
* key or (if a top-level qual) can be pushed up to become a gating qual.
*/
bool
is_pseudo_constant_clause(Node *clause)
{
+
/*
* We could implement this check in one recursive scan. But since the
* check for noncachable functions is both moderately expensive and
@@ -716,7 +717,7 @@ pull_constant_clauses(List *quals, List **constantQual)
foreach(q, quals)
{
- Node *qual = (Node *) lfirst(q);
+ Node *qual = (Node *) lfirst(q);
if (is_pseudo_constant_clause(qual))
constqual = lappend(constqual, qual);
@@ -1277,8 +1278,8 @@ eval_const_expressions_mutator(Node *node, void *context)
arg = eval_const_expressions_mutator(relabel->arg, context);
/*
- * If we find stacked RelabelTypes (eg, from foo :: int :: oid)
- * we can discard all but the top one.
+ * If we find stacked RelabelTypes (eg, from foo :: int :: oid) we
+ * can discard all but the top one.
*/
while (arg && IsA(arg, RelabelType))
arg = ((RelabelType *) arg)->arg;
@@ -1472,8 +1473,8 @@ simplify_op_or_func(Expr *expr, List *args)
* If the function is strict and has a constant-NULL input, it will
* never be called at all, so we can replace the call by a NULL
* constant even if there are other inputs that aren't constant.
- * Otherwise, we can only simplify if all inputs are constants.
- * We can skip the function lookup if neither case applies.
+ * Otherwise, we can only simplify if all inputs are constants. We can
+ * skip the function lookup if neither case applies.
*/
if (has_nonconst_input && !has_null_input)
return NULL;
@@ -1500,9 +1501,10 @@ simplify_op_or_func(Expr *expr, List *args)
funcid = func->funcid;
result_typeid = func->functype;
}
+
/*
- * we could use func_iscachable() here, but we need several fields
- * out of the func tuple, so might as well just look it up once.
+ * we could use func_iscachable() here, but we need several fields out
+ * of the func tuple, so might as well just look it up once.
*/
func_tuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(funcid),
@@ -1530,6 +1532,7 @@ simplify_op_or_func(Expr *expr, List *args)
*/
if (proisstrict && has_null_input)
{
+
/*
* It's strict and has NULL input, so must produce NULL output.
* Return a NULL constant of the right type.
@@ -1538,9 +1541,9 @@ simplify_op_or_func(Expr *expr, List *args)
}
/*
- * Otherwise, can simplify only if all inputs are constants.
- * (For a non-strict function, constant NULL inputs are treated
- * the same as constant non-NULL inputs.)
+ * Otherwise, can simplify only if all inputs are constants. (For a
+ * non-strict function, constant NULL inputs are treated the same as
+ * constant non-NULL inputs.)
*/
if (has_nonconst_input)
return NULL;
@@ -1565,10 +1568,10 @@ simplify_op_or_func(Expr *expr, List *args)
get_typlenbyval(result_typeid, &resultTypLen, &resultTypByVal);
/*
- * It is OK to pass a dummy econtext because none of the ExecEvalExpr()
- * code used in this situation will use econtext. That might seem
- * fortuitous, but it's not so unreasonable --- a constant expression
- * does not depend on context, by definition, n'est ce pas?
+ * It is OK to pass a dummy econtext because none of the
+ * ExecEvalExpr() code used in this situation will use econtext. That
+ * might seem fortuitous, but it's not so unreasonable --- a constant
+ * expression does not depend on context, by definition, n'est ce pas?
*/
econtext = MakeExprContext(NULL, CurrentMemoryContext);
@@ -1657,10 +1660,10 @@ simplify_op_or_func(Expr *expr, List *args)
* expression_tree_walker itself is called on a Query node, it does nothing
* and returns "false". The net effect is that unless the walker does
* something special at a Query node, sub-selects will not be visited
- * during an expression tree walk. This is exactly the behavior wanted
+ * during an expression tree walk. This is exactly the behavior wanted
* in many cases --- and for those walkers that do want to recurse into
* sub-selects, special behavior is typically needed anyway at the entry
- * to a sub-select (such as incrementing a depth counter). A walker that
+ * to a sub-select (such as incrementing a depth counter). A walker that
* wants to examine sub-selects should include code along the lines of:
*
* if (IsA(node, Query))
@@ -1780,9 +1783,10 @@ expression_tree_walker(Node *node,
/*
* If the SubLink has already been processed by
* subselect.c, it will have lefthand=NIL, and we need to
- * scan the oper list. Otherwise we only need to look at
- * the lefthand list (the incomplete Oper nodes in the oper
- * list are deemed uninteresting, perhaps even confusing).
+ * scan the oper list. Otherwise we only need to look at
+ * the lefthand list (the incomplete Oper nodes in the
+ * oper list are deemed uninteresting, perhaps even
+ * confusing).
*/
if (sublink->lefthand)
{
@@ -1794,9 +1798,10 @@ expression_tree_walker(Node *node,
if (walker((Node *) sublink->oper, context))
return true;
}
+
/*
- * Also invoke the walker on the sublink's Query node,
- * so it can recurse into the sub-query if it wants to.
+ * Also invoke the walker on the sublink's Query node, so
+ * it can recurse into the sub-query if it wants to.
*/
return walker(sublink->subselect, context);
}
@@ -1815,7 +1820,7 @@ expression_tree_walker(Node *node,
return walker(((TargetEntry *) node)->expr, context);
case T_FromExpr:
{
- FromExpr *from = (FromExpr *) node;
+ FromExpr *from = (FromExpr *) node;
if (walker(from->fromlist, context))
return true;
@@ -1825,7 +1830,7 @@ expression_tree_walker(Node *node,
break;
case T_JoinExpr:
{
- JoinExpr *join = (JoinExpr *) node;
+ JoinExpr *join = (JoinExpr *) node;
if (walker(join->larg, context))
return true;
@@ -1835,7 +1840,9 @@ expression_tree_walker(Node *node,
return true;
if (walker((Node *) join->colvars, context))
return true;
- /* alias clause, using list, colnames list are deemed
+
+ /*
+ * alias clause, using list, colnames list are deemed
* uninteresting.
*/
}
@@ -1890,7 +1897,7 @@ query_tree_walker(Query *query,
return true;
if (visitQueryRTEs)
{
- List *rt;
+ List *rt;
foreach(rt, query->rtable)
{
@@ -2176,8 +2183,8 @@ expression_tree_mutator(Node *node,
break;
case T_FromExpr:
{
- FromExpr *from = (FromExpr *) node;
- FromExpr *newnode;
+ FromExpr *from = (FromExpr *) node;
+ FromExpr *newnode;
FLATCOPY(newnode, from, FromExpr);
MUTATE(newnode->fromlist, from->fromlist, List *);
@@ -2187,8 +2194,8 @@ expression_tree_mutator(Node *node,
break;
case T_JoinExpr:
{
- JoinExpr *join = (JoinExpr *) node;
- JoinExpr *newnode;
+ JoinExpr *join = (JoinExpr *) node;
+ JoinExpr *newnode;
FLATCOPY(newnode, join, JoinExpr);
MUTATE(newnode->larg, join->larg, Node *);
@@ -2226,7 +2233,7 @@ expression_tree_mutator(Node *node,
* This routine exists just to reduce the number of places that need to know
* where all the expression subtrees of a Query are. Note it can be used
* for starting a walk at top level of a Query regardless of whether the
- * mutator intends to descend into subqueries. It is also useful for
+ * mutator intends to descend into subqueries. It is also useful for
* descending into subqueries within a mutator.
*
* The specified Query node is modified-in-place; do a FLATCOPY() beforehand
@@ -2252,8 +2259,8 @@ query_tree_mutator(Query *query,
MUTATE(query->havingQual, query->havingQual, Node *);
if (visitQueryRTEs)
{
- List *newrt = NIL;
- List *rt;
+ List *newrt = NIL;
+ List *rt;
foreach(rt, query->rtable)
{
diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c
index 81bb3bb9e88..ec98dbcb24f 100644
--- a/src/backend/optimizer/util/joininfo.c
+++ b/src/backend/optimizer/util/joininfo.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/joininfo.c,v 1.28 2001/01/24 19:43:00 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/joininfo.c,v 1.29 2001/03/22 03:59:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@ static JoinInfo *joininfo_member(List *join_relids, List *joininfo_list);
* exists.
*
*/
-static JoinInfo *
+static JoinInfo *
joininfo_member(List *join_relids, List *joininfo_list)
{
List *i;
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 1a0dfc67df7..cfba3ee395f 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/pathnode.c,v 1.70 2001/01/24 19:43:00 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/pathnode.c,v 1.71 2001/03/22 03:59:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -192,7 +192,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
* possible for more than one old path to be tossed out because
* new_path dominates it.
*/
- p1 = parent_rel->pathlist; /* cannot use foreach here */
+ p1 = parent_rel->pathlist; /* cannot use foreach here */
while (p1 != NIL)
{
Path *old_path = (Path *) lfirst(p1);
@@ -243,7 +243,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
*/
if (remove_old && parent_rel->pruneable)
{
- List *p1_next = lnext(p1);
+ List *p1_next = lnext(p1);
if (p1_prev)
lnext(p1_prev) = p1_next;
@@ -409,14 +409,15 @@ create_append_path(RelOptInfo *rel, List *subpaths)
pathnode->path.pathtype = T_Append;
pathnode->path.parent = rel;
- pathnode->path.pathkeys = NIL; /* result is always considered unsorted */
+ pathnode->path.pathkeys = NIL; /* result is always considered
+ * unsorted */
pathnode->subpaths = subpaths;
pathnode->path.startup_cost = 0;
pathnode->path.total_cost = 0;
foreach(l, subpaths)
{
- Path *subpath = (Path *) lfirst(l);
+ Path *subpath = (Path *) lfirst(l);
if (l == subpaths) /* first node? */
pathnode->path.startup_cost = subpath->startup_cost;
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index e4c3a077a30..4f711df203c 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/plancat.c,v 1.63 2001/01/24 19:43:00 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/plancat.c,v 1.64 2001/03/22 03:59:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -83,8 +83,8 @@ find_secondary_indexes(Oid relationObjectId)
Relation relation;
/*
- * We used to scan pg_index directly, but now the relcache offers
- * a cached list of OID indexes for each relation. So, get that list
+ * We used to scan pg_index directly, but now the relcache offers a
+ * cached list of OID indexes for each relation. So, get that list
* and then use the syscache to obtain pg_index entries.
*/
relation = heap_open(relationObjectId, AccessShareLock);
@@ -126,7 +126,7 @@ find_secondary_indexes(Oid relationObjectId)
char *predString;
predString = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(&index->indpred)));
+ PointerGetDatum(&index->indpred)));
info->indpred = (List *) stringToNode(predString);
pfree(predString);
}
@@ -213,11 +213,11 @@ restriction_selectivity(Oid functionObjectId,
float8 result;
result = DatumGetFloat8(OidFunctionCall5(functionObjectId,
- ObjectIdGetDatum(operatorObjectId),
- ObjectIdGetDatum(relationObjectId),
- Int16GetDatum(attributeNumber),
- constValue,
- Int32GetDatum(constFlag)));
+ ObjectIdGetDatum(operatorObjectId),
+ ObjectIdGetDatum(relationObjectId),
+ Int16GetDatum(attributeNumber),
+ constValue,
+ Int32GetDatum(constFlag)));
if (result < 0.0 || result > 1.0)
elog(ERROR, "restriction_selectivity: bad value %f", result);
@@ -246,11 +246,11 @@ join_selectivity(Oid functionObjectId,
float8 result;
result = DatumGetFloat8(OidFunctionCall5(functionObjectId,
- ObjectIdGetDatum(operatorObjectId),
- ObjectIdGetDatum(relationObjectId1),
- Int16GetDatum(attributeNumber1),
- ObjectIdGetDatum(relationObjectId2),
- Int16GetDatum(attributeNumber2)));
+ ObjectIdGetDatum(operatorObjectId),
+ ObjectIdGetDatum(relationObjectId1),
+ Int16GetDatum(attributeNumber1),
+ ObjectIdGetDatum(relationObjectId2),
+ Int16GetDatum(attributeNumber2)));
if (result < 0.0 || result > 1.0)
elog(ERROR, "join_selectivity: bad value %f", result);
@@ -277,13 +277,13 @@ find_inheritance_children(Oid inhparent)
HeapScanDesc scan;
HeapTuple inheritsTuple;
Oid inhrelid;
- ScanKeyData key[1];
+ ScanKeyData key[1];
/*
- * Can skip the scan if pg_class shows the relation has never had
- * a subclass.
+ * Can skip the scan if pg_class shows the relation has never had a
+ * subclass.
*/
- if (! has_subclass(inhparent))
+ if (!has_subclass(inhparent))
return NIL;
ScanKeyEntryInitialize(&key[0],
@@ -306,7 +306,7 @@ find_inheritance_children(Oid inhparent)
/*
* has_subclass
*
- * In the current implementation, has_subclass returns whether a
+ * In the current implementation, has_subclass returns whether a
* particular class *might* have a subclass. It will not return the
* correct result if a class had a subclass which was later dropped.
* This is because relhassubclass in pg_class is not updated when a
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index b9296fdfa00..9d5d6c080e7 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/tlist.c,v 1.49 2001/01/24 19:43:00 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/tlist.c,v 1.50 2001/03/22 03:59:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,6 +60,7 @@ matching_tlist_expr(Node *node, List *targetlist)
return (Node *) NULL;
}
+
#endif
/*
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index 1347bd9a522..cac0eee8276 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/var.c,v 1.29 2001/01/24 19:43:00 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/var.c,v 1.30 2001/03/22 03:59:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@ typedef struct
} pull_var_clause_context;
static bool pull_varnos_walker(Node *node,
- pull_varnos_context *context);
+ pull_varnos_context *context);
static bool contain_var_clause_walker(Node *node, void *context);
static bool pull_var_clause_walker(Node *node,
pull_var_clause_context *context);
@@ -90,15 +90,16 @@ pull_varnos_walker(Node *node, pull_varnos_context *context)
}
if (is_subplan(node))
{
+
/*
- * Already-planned subquery. Examine the args list (parameters
- * to be passed to subquery), as well as the "oper" list which
- * is executed by the outer query. But short-circuit recursion into
+ * Already-planned subquery. Examine the args list (parameters to
+ * be passed to subquery), as well as the "oper" list which is
+ * executed by the outer query. But short-circuit recursion into
* the subquery itself, which would be a waste of effort.
*/
Expr *expr = (Expr *) node;
- if (pull_varnos_walker((Node*) ((SubPlan*) expr->oper)->sublink->oper,
+ if (pull_varnos_walker((Node *) ((SubPlan *) expr->oper)->sublink->oper,
context))
return true;
if (pull_varnos_walker((Node *) expr->args,
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index ba0a3cfa348..a502cea5d67 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.181 2001/02/15 01:10:28 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.182 2001/03/22 03:59:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -228,7 +228,7 @@ transformStmt(ParseState *pstate, Node *parseTree)
(SelectStmt *) parseTree);
else
result = transformSetOperationStmt(pstate,
- (SelectStmt *) parseTree);
+ (SelectStmt *) parseTree);
break;
default:
@@ -302,11 +302,11 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
/*
* If a non-nil rangetable/namespace was passed in, and we are doing
* INSERT/SELECT, arrange to pass the rangetable/namespace down to the
- * SELECT. This can only happen if we are inside a CREATE RULE,
- * and in that case we want the rule's OLD and NEW rtable entries to
+ * SELECT. This can only happen if we are inside a CREATE RULE, and
+ * in that case we want the rule's OLD and NEW rtable entries to
* appear as part of the SELECT's rtable, not as outer references for
- * it. (Kluge!) The SELECT's joinlist is not affected however.
- * We must do this before adding the target table to the INSERT's rtable.
+ * it. (Kluge!) The SELECT's joinlist is not affected however. We
+ * must do this before adding the target table to the INSERT's rtable.
*/
if (stmt->selectStmt)
{
@@ -324,7 +324,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
/*
* Must get write lock on INSERT target table before scanning SELECT,
* else we will grab the wrong kind of initial lock if the target
- * table is also mentioned in the SELECT part. Note that the target
+ * table is also mentioned in the SELECT part. Note that the target
* table is not added to the joinlist or namespace.
*/
qry->resultRelation = setTargetTable(pstate, stmt->relname,
@@ -336,17 +336,17 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
if (stmt->selectStmt)
{
ParseState *sub_pstate = make_parsestate(pstate->parentParseState);
- Query *selectQuery;
+ Query *selectQuery;
RangeTblEntry *rte;
RangeTblRef *rtr;
/*
* Process the source SELECT.
*
- * It is important that this be handled just like a standalone SELECT;
- * otherwise the behavior of SELECT within INSERT might be different
- * from a stand-alone SELECT. (Indeed, Postgres up through 6.5 had
- * bugs of just that nature...)
+ * It is important that this be handled just like a standalone
+ * SELECT; otherwise the behavior of SELECT within INSERT might be
+ * different from a stand-alone SELECT. (Indeed, Postgres up
+ * through 6.5 had bugs of just that nature...)
*/
sub_pstate->p_rtable = sub_rtable;
sub_pstate->p_namespace = sub_namespace;
@@ -360,9 +360,10 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
Assert(selectQuery->commandType == CMD_SELECT);
if (selectQuery->into || selectQuery->isPortal)
elog(ERROR, "INSERT ... SELECT may not specify INTO");
+
/*
- * Make the source be a subquery in the INSERT's rangetable,
- * and add it to the INSERT's joinlist.
+ * Make the source be a subquery in the INSERT's rangetable, and
+ * add it to the INSERT's joinlist.
*/
rte = addRangeTableEntryForSubquery(pstate,
selectQuery,
@@ -373,18 +374,19 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
rtr->rtindex = length(pstate->p_rtable);
Assert(rte == rt_fetch(rtr->rtindex, pstate->p_rtable));
pstate->p_joinlist = lappend(pstate->p_joinlist, rtr);
+
/*
- * Generate a targetlist for the INSERT that selects all
- * the non-resjunk columns from the subquery. (We need this to
- * be separate from the subquery's tlist because we may add
- * columns, insert datatype coercions, etc.)
+ * Generate a targetlist for the INSERT that selects all the
+ * non-resjunk columns from the subquery. (We need this to be
+ * separate from the subquery's tlist because we may add columns,
+ * insert datatype coercions, etc.)
*
* HACK: constants in the INSERT's targetlist are copied up as-is
- * rather than being referenced as subquery outputs. This is mainly
- * to ensure that when we try to coerce them to the target column's
- * datatype, the right things happen for UNKNOWN constants.
- * Otherwise this fails:
- * INSERT INTO foo SELECT 'bar', ... FROM baz
+ * rather than being referenced as subquery outputs. This is
+ * mainly to ensure that when we try to coerce them to the target
+ * column's datatype, the right things happen for UNKNOWN
+ * constants. Otherwise this fails: INSERT INTO foo SELECT 'bar',
+ * ... FROM baz
*/
qry->targetList = NIL;
foreach(tl, selectQuery->targetList)
@@ -411,9 +413,10 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
}
else
{
+
/*
- * For INSERT ... VALUES, transform the given list of values
- * to form a targetlist for the INSERT.
+ * For INSERT ... VALUES, transform the given list of values to
+ * form a targetlist for the INSERT.
*/
qry->targetList = transformTargetList(pstate, stmt->targetList);
}
@@ -466,8 +469,8 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
* have defaults and were not assigned to by the user.
*
* XXX wouldn't it make more sense to do this further downstream, after
- * the rule rewriter? As is, altering a column default will not change
- * the behavior of INSERTs in already-defined rules.
+ * the rule rewriter? As is, altering a column default will not
+ * change the behavior of INSERTs in already-defined rules.
*/
rd_att = pstate->p_target_relation->rd_att;
if (rd_att->constr && rd_att->constr->num_defval > 0)
@@ -618,8 +621,8 @@ CreateIndexName(char *table_name, char *column_name,
* The type name for makeObjectName is label, or labelN if that's
* necessary to prevent collisions among multiple indexes for the same
* table. Note there is no check for collisions with already-existing
- * indexes, only among the indexes we're about to create now; this ought
- * to be improved someday.
+ * indexes, only among the indexes we're about to create now; this
+ * ought to be improved someday.
*/
strcpy(typename, label);
@@ -748,7 +751,7 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
constraint = makeNode(Constraint);
constraint->contype = CONSTR_UNIQUE;
- constraint->name = NULL; /* assign later */
+ constraint->name = NULL; /* assign later */
column->constraints = lappend(column->constraints,
constraint);
@@ -948,7 +951,7 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
else if (constraint->contype == CONSTR_PRIMARY)
index->idxname = makeObjectName(stmt->relname, NULL, "pkey");
else
- index->idxname = NULL; /* will set it later */
+ index->idxname = NULL; /* will set it later */
index->relname = stmt->relname;
index->accessMethod = "btree";
@@ -956,9 +959,9 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
index->withClause = NIL;
index->whereClause = NULL;
- foreach (keys, constraint->keys)
+ foreach(keys, constraint->keys)
{
- bool found = false;
+ bool found = false;
key = (Ident *) lfirst(keys);
Assert(IsA(key, Ident));
@@ -982,14 +985,14 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
else
{
/* try inherited tables */
- List *inhRelnames = stmt->inhRelnames;
- List *inher;
+ List *inhRelnames = stmt->inhRelnames;
+ List *inher;
- foreach (inher, inhRelnames)
+ foreach(inher, inhRelnames)
{
- Value *inh = lfirst(inher);
- Relation rel;
- int count;
+ Value *inh = lfirst(inher);
+ Relation rel;
+ int count;
Assert(IsA(inh, String));
rel = heap_openr(strVal(inh), AccessShareLock);
@@ -999,26 +1002,28 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
for (count = 0; count < rel->rd_att->natts; count++)
{
Form_pg_attribute inhattr = rel->rd_att->attrs[count];
- char *inhname = NameStr(inhattr->attname);
+ char *inhname = NameStr(inhattr->attname);
if (strcmp(key->name, inhname) == 0)
{
found = true;
+
/*
- * If the column is inherited, we currently have
- * no easy way to force it to be NOT NULL.
- * Only way I can see to fix this would be to
- * convert the inherited-column info to ColumnDef
- * nodes before we reach this point, and then
- * create the table from those nodes rather than
- * referencing the parent tables later. That
- * would likely be cleaner, but too much work
- * to contemplate right now. Instead, raise an
- * error if the inherited column won't be NOT NULL.
- * (Would a NOTICE be more reasonable?)
+ * If the column is inherited, we currently
+ * have no easy way to force it to be NOT
+ * NULL. Only way I can see to fix this would
+ * be to convert the inherited-column info to
+ * ColumnDef nodes before we reach this point,
+ * and then create the table from those nodes
+ * rather than referencing the parent tables
+ * later. That would likely be cleaner, but
+ * too much work to contemplate right now.
+ * Instead, raise an error if the inherited
+ * column won't be NOT NULL. (Would a NOTICE
+ * be more reasonable?)
*/
if (constraint->contype == CONSTR_PRIMARY &&
- ! inhattr->attnotnull)
+ !inhattr->attnotnull)
elog(ERROR, "inherited attribute \"%s\" cannot be a PRIMARY KEY because it is not marked NOT NULL",
inhname);
break;
@@ -1047,10 +1052,10 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
/*
* Scan the index list and remove any redundant index specifications.
- * This can happen if, for instance, the user writes SERIAL PRIMARY KEY
- * or SERIAL UNIQUE. A strict reading of SQL92 would suggest raising
- * an error instead, but that strikes me as too anal-retentive.
- * - tgl 2001-02-14
+ * This can happen if, for instance, the user writes SERIAL PRIMARY
+ * KEY or SERIAL UNIQUE. A strict reading of SQL92 would suggest
+ * raising an error instead, but that strikes me as too
+ * anal-retentive. - tgl 2001-02-14
*/
dlist = ilist;
ilist = NIL;
@@ -1075,12 +1080,13 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
if (equal(index->indexParams, priorindex->indexParams))
{
+
/*
* If the prior index is as yet unnamed, and this one
- * is named, then transfer the name to the prior index.
- * This ensures that if we have named and unnamed
- * constraints, we'll use (at least one of) the names
- * for the index.
+ * is named, then transfer the name to the prior
+ * index. This ensures that if we have named and
+ * unnamed constraints, we'll use (at least one of)
+ * the names for the index.
*/
if (priorindex->idxname == NULL)
priorindex->idxname = index->idxname;
@@ -1149,46 +1155,57 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
if (fkconstraint->constr_name == NULL)
fkconstraint->constr_name = "<unnamed>";
- /*
+ /*
* Check to see if the attributes mentioned by the constraint
* actually exist on this table.
*/
- if (fkconstraint->fk_attrs!=NIL) {
- int found=0;
- List *cols;
- List *fkattrs;
- Ident *fkattr = NULL;
- ColumnDef *col;
- foreach(fkattrs, fkconstraint->fk_attrs) {
- found=0;
- fkattr=lfirst(fkattrs);
- foreach(cols, stmt->tableElts) {
- col=lfirst(cols);
- if (strcmp(col->colname, fkattr->name)==0) {
- found=1;
+ if (fkconstraint->fk_attrs != NIL)
+ {
+ int found = 0;
+ List *cols;
+ List *fkattrs;
+ Ident *fkattr = NULL;
+ ColumnDef *col;
+
+ foreach(fkattrs, fkconstraint->fk_attrs)
+ {
+ found = 0;
+ fkattr = lfirst(fkattrs);
+ foreach(cols, stmt->tableElts)
+ {
+ col = lfirst(cols);
+ if (strcmp(col->colname, fkattr->name) == 0)
+ {
+ found = 1;
break;
}
}
if (!found)
break;
}
- if (!found) { /* try inherited tables */
- List *inher;
- List *inhRelnames=stmt->inhRelnames;
- Relation rel;
- foreach (inher, inhRelnames) {
- Value *inh=lfirst(inher);
- int count;
+ if (!found)
+ { /* try inherited tables */
+ List *inher;
+ List *inhRelnames = stmt->inhRelnames;
+ Relation rel;
+
+ foreach(inher, inhRelnames)
+ {
+ Value *inh = lfirst(inher);
+ int count;
Assert(IsA(inh, String));
- rel=heap_openr(strVal(inh), AccessShareLock);
+ rel = heap_openr(strVal(inh), AccessShareLock);
if (rel->rd_rel->relkind != RELKIND_RELATION)
elog(ERROR, "inherited table \"%s\" is not a relation",
- strVal(inh));
- for (count = 0; count < rel->rd_att->natts; count++) {
- char *name=NameStr(rel->rd_att->attrs[count]->attname);
- if (strcmp(fkattr->name, name) == 0) {
- found=1;
+ strVal(inh));
+ for (count = 0; count < rel->rd_att->natts; count++)
+ {
+ char *name = NameStr(rel->rd_att->attrs[count]->attname);
+
+ if (strcmp(fkattr->name, name) == 0)
+ {
+ found = 1;
break;
}
}
@@ -1197,9 +1214,8 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
break;
}
}
- else {
- found=1;
- }
+ else
+ found = 1;
if (!found)
elog(ERROR, "columns referenced in foreign key constraint not found.");
}
@@ -1238,35 +1254,44 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
fkconstraint->pktable_name);
}
}
- else {
- if (strcmp(fkconstraint->pktable_name, stmt->relname)!=0)
+ else
+ {
+ if (strcmp(fkconstraint->pktable_name, stmt->relname) != 0)
transformFkeyCheckAttrs(fkconstraint);
- else {
+ else
+ {
/* Get a unique/pk constraint from above */
- List *index;
- int found=0;
+ List *index;
+ int found = 0;
+
foreach(index, ilist)
{
- IndexStmt *ind=lfirst(index);
- IndexElem *indparm;
- List *indparms;
- List *pkattrs;
- Ident *pkattr;
- if (ind->unique) {
- int count=0;
- foreach(indparms, ind->indexParams) {
+ IndexStmt *ind = lfirst(index);
+ IndexElem *indparm;
+ List *indparms;
+ List *pkattrs;
+ Ident *pkattr;
+
+ if (ind->unique)
+ {
+ int count = 0;
+
+ foreach(indparms, ind->indexParams)
count++;
- }
- if (count!=length(fkconstraint->pk_attrs))
- found=0;
- else {
- foreach(pkattrs, fkconstraint->pk_attrs) {
- found=0;
- pkattr=lfirst(pkattrs);
- foreach(indparms, ind->indexParams) {
- indparm=lfirst(indparms);
- if (strcmp(indparm->name, pkattr->name)==0) {
- found=1;
+ if (count != length(fkconstraint->pk_attrs))
+ found = 0;
+ else
+ {
+ foreach(pkattrs, fkconstraint->pk_attrs)
+ {
+ found = 0;
+ pkattr = lfirst(pkattrs);
+ foreach(indparms, ind->indexParams)
+ {
+ indparm = lfirst(indparms);
+ if (strcmp(indparm->name, pkattr->name) == 0)
+ {
+ found = 1;
break;
}
}
@@ -1283,6 +1308,7 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
fkconstraint->pktable_name);
}
}
+
/*
* Build a CREATE CONSTRAINT TRIGGER statement for the CHECK
* action.
@@ -1309,13 +1335,13 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
fk_trigger->args = NIL;
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->constr_name));
+ makeString(fkconstraint->constr_name));
fk_trigger->args = lappend(fk_trigger->args,
makeString(stmt->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable_name));
+ makeString(fkconstraint->pktable_name));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->match_type));
+ makeString(fkconstraint->match_type));
fk_attr = fkconstraint->fk_attrs;
pk_attr = fkconstraint->pk_attrs;
if (length(fk_attr) != length(pk_attr))
@@ -1388,13 +1414,13 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
fk_trigger->args = NIL;
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->constr_name));
+ makeString(fkconstraint->constr_name));
fk_trigger->args = lappend(fk_trigger->args,
makeString(stmt->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable_name));
+ makeString(fkconstraint->pktable_name));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->match_type));
+ makeString(fkconstraint->match_type));
fk_attr = fkconstraint->fk_attrs;
pk_attr = fkconstraint->pk_attrs;
while (fk_attr != NIL)
@@ -1461,13 +1487,13 @@ transformCreateStmt(ParseState *pstate, CreateStmt *stmt)
fk_trigger->args = NIL;
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->constr_name));
+ makeString(fkconstraint->constr_name));
fk_trigger->args = lappend(fk_trigger->args,
makeString(stmt->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable_name));
+ makeString(fkconstraint->pktable_name));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->match_type));
+ makeString(fkconstraint->match_type));
fk_attr = fkconstraint->fk_attrs;
pk_attr = fkconstraint->pk_attrs;
while (fk_attr != NIL)
@@ -1558,18 +1584,18 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt)
/*
* To avoid deadlock, make sure the first thing we do is grab
- * AccessExclusiveLock on the target relation. This will be
- * needed by DefineQueryRewrite(), and we don't want to grab a lesser
- * lock beforehand. We don't need to hold a refcount on the relcache
+ * AccessExclusiveLock on the target relation. This will be needed by
+ * DefineQueryRewrite(), and we don't want to grab a lesser lock
+ * beforehand. We don't need to hold a refcount on the relcache
* entry, however.
*/
heap_close(heap_openr(stmt->object->relname, AccessExclusiveLock),
NoLock);
/*
- * NOTE: 'OLD' must always have a varno equal to 1 and 'NEW'
- * equal to 2. Set up their RTEs in the main pstate for use
- * in parsing the rule qualification.
+ * NOTE: 'OLD' must always have a varno equal to 1 and 'NEW' equal to
+ * 2. Set up their RTEs in the main pstate for use in parsing the
+ * rule qualification.
*/
Assert(pstate->p_rtable == NIL);
oldrte = addRangeTableEntry(pstate, stmt->object->relname,
@@ -1581,13 +1607,15 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt)
/* Must override addRangeTableEntry's default access-check flags */
oldrte->checkForRead = false;
newrte->checkForRead = false;
+
/*
* They must be in the namespace too for lookup purposes, but only add
* the one(s) that are relevant for the current kind of rule. In an
* UPDATE rule, quals must refer to OLD.field or NEW.field to be
- * unambiguous, but there's no need to be so picky for INSERT & DELETE.
- * (Note we marked the RTEs "inFromCl = true" above to allow unqualified
- * references to their fields.) We do not add them to the joinlist.
+ * unambiguous, but there's no need to be so picky for INSERT &
+ * DELETE. (Note we marked the RTEs "inFromCl = true" above to allow
+ * unqualified references to their fields.) We do not add them to the
+ * joinlist.
*/
switch (stmt->event)
{
@@ -1613,7 +1641,7 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt)
/* take care of the where clause */
stmt->whereClause = transformWhereClause(pstate, stmt->whereClause);
- if (length(pstate->p_rtable) != 2) /* naughty, naughty... */
+ if (length(pstate->p_rtable) != 2) /* naughty, naughty... */
elog(ERROR, "Rule WHERE condition may not contain references to other relations");
/* save info about sublinks in where clause */
@@ -1632,7 +1660,7 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt)
nothing_qry->commandType = CMD_NOTHING;
nothing_qry->rtable = pstate->p_rtable;
- nothing_qry->jointree = makeFromExpr(NIL, NULL); /* no join wanted */
+ nothing_qry->jointree = makeFromExpr(NIL, NULL); /* no join wanted */
stmt->actions = makeList1(nothing_qry);
}
@@ -1652,12 +1680,12 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt)
has_new;
/*
- * Set up OLD/NEW in the rtable for this statement. The entries
- * are marked not inFromCl because we don't want them to be
- * referred to by unqualified field names nor "*" in the rule
- * actions. We must add them to the namespace, however, or they
- * won't be accessible at all. We decide later whether to put
- * them in the joinlist.
+ * Set up OLD/NEW in the rtable for this statement. The
+ * entries are marked not inFromCl because we don't want them
+ * to be referred to by unqualified field names nor "*" in the
+ * rule actions. We must add them to the namespace, however,
+ * or they won't be accessible at all. We decide later
+ * whether to put them in the joinlist.
*/
oldrte = addRangeTableEntry(sub_pstate, stmt->object->relname,
makeAttr("*OLD*", NULL),
@@ -1676,7 +1704,8 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt)
/*
* If the action is INSERT...SELECT, OLD/NEW have been pushed
* down into the SELECT, and that's what we need to look at.
- * (Ugly kluge ... try to fix this when we redesign querytrees.)
+ * (Ugly kluge ... try to fix this when we redesign
+ * querytrees.)
*/
sub_qry = getInsertSelectQuery(top_subqry, NULL);
@@ -1716,19 +1745,21 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt)
}
/*
- * For efficiency's sake, add OLD to the rule action's jointree
- * only if it was actually referenced in the statement or qual.
+ * For efficiency's sake, add OLD to the rule action's
+ * jointree only if it was actually referenced in the
+ * statement or qual.
*
* For INSERT, NEW is not really a relation (only a reference to
* the to-be-inserted tuple) and should never be added to the
* jointree.
*
* For UPDATE, we treat NEW as being another kind of reference to
- * OLD, because it represents references to *transformed* tuples
- * of the existing relation. It would be wrong to enter NEW
- * separately in the jointree, since that would cause a double
- * join of the updated relation. It's also wrong to fail to make
- * a jointree entry if only NEW and not OLD is mentioned.
+ * OLD, because it represents references to *transformed*
+ * tuples of the existing relation. It would be wrong to
+ * enter NEW separately in the jointree, since that would
+ * cause a double join of the updated relation. It's also
+ * wrong to fail to make a jointree entry if only NEW and not
+ * OLD is mentioned.
*/
if (has_old || (has_new && stmt->event == CMD_UPDATE))
{
@@ -1772,12 +1803,12 @@ transformSelectStmt(ParseState *pstate, SelectStmt *stmt)
if (stmt->forUpdate)
elog(ERROR, "DECLARE/UPDATE is not supported"
"\n\tCursors must be READ ONLY");
+
/*
- * 15 august 1991 -- since 3.0 postgres does locking
- * right, we discovered that portals were violating
- * locking protocol. portal locks cannot span xacts.
- * as a short-term fix, we installed the check here.
- * -- mao
+ * 15 august 1991 -- since 3.0 postgres does locking right, we
+ * discovered that portals were violating locking protocol.
+ * portal locks cannot span xacts. as a short-term fix, we
+ * installed the check here. -- mao
*/
if (!IsTransactionBlock())
elog(ERROR, "DECLARE CURSOR may only be used in begin/end transaction blocks");
@@ -1785,7 +1816,7 @@ transformSelectStmt(ParseState *pstate, SelectStmt *stmt)
qry->into = stmt->portalname;
qry->isTemp = stmt->istemp;
qry->isPortal = TRUE;
- qry->isBinary = stmt->binary; /* internal portal */
+ qry->isBinary = stmt->binary; /* internal portal */
}
else
{
@@ -1881,8 +1912,8 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
qry->commandType = CMD_SELECT;
/*
- * Find leftmost leaf SelectStmt; extract the one-time-only items
- * from it and from the top-level node.
+ * Find leftmost leaf SelectStmt; extract the one-time-only items from
+ * it and from the top-level node.
*/
leftmostSelect = stmt->larg;
while (leftmostSelect && leftmostSelect->op != SETOP_NONE)
@@ -1902,8 +1933,8 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* These are not one-time, exactly, but we want to process them here
- * and not let transformSetOperationTree() see them --- else it'll just
- * recurse right back here!
+ * and not let transformSetOperationTree() see them --- else it'll
+ * just recurse right back here!
*/
sortClause = stmt->sortClause;
limitOffset = stmt->limitOffset;
@@ -1936,11 +1967,12 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
leftmostRTI = ((RangeTblRef *) node)->rtindex;
leftmostQuery = rt_fetch(leftmostRTI, pstate->p_rtable)->subquery;
Assert(leftmostQuery != NULL);
+
/*
* Generate dummy targetlist for outer query using column names of
- * leftmost select and common datatypes of topmost set operation.
- * Also make lists of the dummy vars and their names for use in
- * parsing ORDER BY.
+ * leftmost select and common datatypes of topmost set operation. Also
+ * make lists of the dummy vars and their names for use in parsing
+ * ORDER BY.
*/
qry->targetList = NIL;
targetvars = NIL;
@@ -1948,11 +1980,11 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
lefttl = leftmostQuery->targetList;
foreach(dtlist, sostmt->colTypes)
{
- Oid colType = (Oid) lfirsti(dtlist);
- Resdom *leftResdom = ((TargetEntry *) lfirst(lefttl))->resdom;
- char *colName = pstrdup(leftResdom->resname);
- Resdom *resdom;
- Node *expr;
+ Oid colType = (Oid) lfirsti(dtlist);
+ Resdom *leftResdom = ((TargetEntry *) lfirst(lefttl))->resdom;
+ char *colName = pstrdup(leftResdom->resname);
+ Resdom *resdom;
+ Node *expr;
resdom = makeResdom((AttrNumber) pstate->p_last_resno++,
colType,
@@ -1970,6 +2002,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
targetnames = lappend(targetnames, makeString(colName));
lefttl = lnext(lefttl);
}
+
/*
* Insert one-time items into top-level query
*
@@ -1983,12 +2016,12 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
if (forUpdate)
elog(ERROR, "DECLARE/UPDATE is not supported"
"\n\tCursors must be READ ONLY");
+
/*
- * 15 august 1991 -- since 3.0 postgres does locking
- * right, we discovered that portals were violating
- * locking protocol. portal locks cannot span xacts.
- * as a short-term fix, we installed the check here.
- * -- mao
+ * 15 august 1991 -- since 3.0 postgres does locking right, we
+ * discovered that portals were violating locking protocol.
+ * portal locks cannot span xacts. as a short-term fix, we
+ * installed the check here. -- mao
*/
if (!IsTransactionBlock())
elog(ERROR, "DECLARE CURSOR may only be used in begin/end transaction blocks");
@@ -2008,10 +2041,11 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
}
/*
- * As a first step towards supporting sort clauses that are expressions
- * using the output columns, generate a namespace entry that makes the
- * output columns visible. A JoinExpr node is handy for this, since
- * we can easily control the Vars generated upon matches.
+ * As a first step towards supporting sort clauses that are
+ * expressions using the output columns, generate a namespace entry
+ * that makes the output columns visible. A JoinExpr node is handy
+ * for this, since we can easily control the Vars generated upon
+ * matches.
*
* Note: we don't yet do anything useful with such cases, but at least
* "ORDER BY upper(foo)" will draw the right error message rather than
@@ -2065,7 +2099,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
static Node *
transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
{
- bool isLeaf;
+ bool isLeaf;
Assert(stmt && IsA(stmt, SelectStmt));
@@ -2104,9 +2138,9 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
if (isLeaf)
{
/* Process leaf SELECT */
- List *selectList;
- Query *selectQuery;
- char selectName[32];
+ List *selectList;
+ Query *selectQuery;
+ char selectName[32];
RangeTblEntry *rte;
RangeTblRef *rtr;
@@ -2114,13 +2148,14 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
* Transform SelectStmt into a Query.
*
* Note: previously transformed sub-queries don't affect the parsing
- * of this sub-query, because they are not in the toplevel pstate's
- * namespace list.
+ * of this sub-query, because they are not in the toplevel
+ * pstate's namespace list.
*/
selectList = parse_analyze((Node *) stmt, pstate);
Assert(length(selectList) == 1);
selectQuery = (Query *) lfirst(selectList);
+
/*
* Make the leaf query be a subquery in the top-level rangetable.
*/
@@ -2130,8 +2165,10 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
makeAttr(pstrdup(selectName),
NULL),
false);
+
/*
- * Return a RangeTblRef to replace the SelectStmt in the set-op tree.
+ * Return a RangeTblRef to replace the SelectStmt in the set-op
+ * tree.
*/
rtr = makeNode(RangeTblRef);
/* assume new rte is at end */
@@ -2143,8 +2180,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
{
/* Process an internal node (set operation node) */
SetOperationStmt *op = makeNode(SetOperationStmt);
- List *lcoltypes;
- List *rcoltypes;
+ List *lcoltypes;
+ List *rcoltypes;
const char *context;
context = (stmt->op == SETOP_UNION ? "UNION" :
@@ -2159,6 +2196,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
*/
op->larg = transformSetOperationTree(pstate, stmt->larg);
op->rarg = transformSetOperationTree(pstate, stmt->rarg);
+
/*
* Verify that the two children have the same number of non-junk
* columns, and determine the types of the merged output columns.
@@ -2171,9 +2209,9 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
op->colTypes = NIL;
while (lcoltypes != NIL)
{
- Oid lcoltype = (Oid) lfirsti(lcoltypes);
- Oid rcoltype = (Oid) lfirsti(rcoltypes);
- Oid rescoltype;
+ Oid lcoltype = (Oid) lfirsti(lcoltypes);
+ Oid rcoltype = (Oid) lfirsti(rcoltypes);
+ Oid rescoltype;
rescoltype = select_common_type(makeListi2(lcoltype, rcoltype),
context);
@@ -2197,9 +2235,9 @@ getSetColTypes(ParseState *pstate, Node *node)
{
RangeTblRef *rtr = (RangeTblRef *) node;
RangeTblEntry *rte = rt_fetch(rtr->rtindex, pstate->p_rtable);
- Query *selectQuery = rte->subquery;
- List *result = NIL;
- List *tl;
+ Query *selectQuery = rte->subquery;
+ List *result = NIL;
+ List *tl;
Assert(selectQuery != NULL);
/* Get types of non-junk columns */
@@ -2392,13 +2430,13 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt)
fk_trigger->args = NIL;
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->constr_name));
+ makeString(fkconstraint->constr_name));
fk_trigger->args = lappend(fk_trigger->args,
makeString(stmt->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable_name));
+ makeString(fkconstraint->pktable_name));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->match_type));
+ makeString(fkconstraint->match_type));
fk_attr = fkconstraint->fk_attrs;
pk_attr = fkconstraint->pk_attrs;
if (length(fk_attr) != length(pk_attr))
@@ -2469,13 +2507,13 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt)
fk_trigger->args = NIL;
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->constr_name));
+ makeString(fkconstraint->constr_name));
fk_trigger->args = lappend(fk_trigger->args,
makeString(stmt->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable_name));
+ makeString(fkconstraint->pktable_name));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->match_type));
+ makeString(fkconstraint->match_type));
fk_attr = fkconstraint->fk_attrs;
pk_attr = fkconstraint->pk_attrs;
while (fk_attr != NIL)
@@ -2540,13 +2578,13 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt)
fk_trigger->args = NIL;
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->constr_name));
+ makeString(fkconstraint->constr_name));
fk_trigger->args = lappend(fk_trigger->args,
makeString(stmt->relname));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->pktable_name));
+ makeString(fkconstraint->pktable_name));
fk_trigger->args = lappend(fk_trigger->args,
- makeString(fkconstraint->match_type));
+ makeString(fkconstraint->match_type));
fk_attr = fkconstraint->fk_attrs;
pk_attr = fkconstraint->pk_attrs;
while (fk_attr != NIL)
@@ -2613,7 +2651,7 @@ transformForUpdate(Query *qry, List *forUpdate)
}
else
{
- if (!intMember(i, rowMarks)) /* avoid duplicates */
+ if (!intMember(i, rowMarks)) /* avoid duplicates */
rowMarks = lappendi(rowMarks, i);
rte->checkForWrite = true;
}
@@ -2641,7 +2679,7 @@ transformForUpdate(Query *qry, List *forUpdate)
}
else
{
- if (!intMember(i, rowMarks)) /* avoid duplicates */
+ if (!intMember(i, rowMarks)) /* avoid duplicates */
rowMarks = lappendi(rowMarks, i);
rte->checkForWrite = true;
}
@@ -2662,10 +2700,10 @@ transformForUpdate(Query *qry, List *forUpdate)
* transformFkeyCheckAttrs -
*
* Try to make sure that the attributes of a referenced table
- * belong to a unique (or primary key) constraint.
+ * belong to a unique (or primary key) constraint.
*
*/
-static void
+static void
transformFkeyCheckAttrs(FkConstraint *fkconstraint)
{
Relation pkrel;
@@ -2696,7 +2734,7 @@ transformFkeyCheckAttrs(FkConstraint *fkconstraint)
foreach(indexoidscan, indexoidlist)
{
- Oid indexoid = lfirsti(indexoidscan);
+ Oid indexoid = lfirsti(indexoidscan);
HeapTuple indexTuple;
Form_pg_index indexStruct;
@@ -2710,24 +2748,28 @@ transformFkeyCheckAttrs(FkConstraint *fkconstraint)
if (indexStruct->indisunique)
{
- List *attrl;
+ List *attrl;
for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
- if (i!=length(fkconstraint->pk_attrs))
- found=false;
- else {
+ if (i != length(fkconstraint->pk_attrs))
+ found = false;
+ else
+ {
/* go through the fkconstraint->pk_attrs list */
foreach(attrl, fkconstraint->pk_attrs)
{
- Ident *attr=lfirst(attrl);
+ Ident *attr = lfirst(attrl);
+
found = false;
for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
{
- int pkattno = indexStruct->indkey[i];
- if (pkattno>0)
+ int pkattno = indexStruct->indkey[i];
+
+ if (pkattno > 0)
{
- char *name = NameStr(pkrel_attrs[pkattno - 1]->attname);
- if (strcmp(name, attr->name)==0)
+ char *name = NameStr(pkrel_attrs[pkattno - 1]->attname);
+
+ if (strcmp(name, attr->name) == 0)
{
found = true;
break;
@@ -2741,7 +2783,7 @@ transformFkeyCheckAttrs(FkConstraint *fkconstraint)
}
ReleaseSysCache(indexTuple);
if (found)
- break;
+ break;
}
if (!found)
elog(ERROR, "UNIQUE constraint matching given keys for referenced table \"%s\" not found",
@@ -2790,7 +2832,7 @@ transformFkeyGetPrimaryKey(FkConstraint *fkconstraint)
foreach(indexoidscan, indexoidlist)
{
- Oid indexoid = lfirsti(indexoidscan);
+ Oid indexoid = lfirsti(indexoidscan);
indexTuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexoid),
@@ -2826,7 +2868,7 @@ transformFkeyGetPrimaryKey(FkConstraint *fkconstraint)
Ident *pkattr = makeNode(Ident);
pkattr->name = DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(pkrel_attrs[pkattno - 1]->attname))));
+ NameGetDatum(&(pkrel_attrs[pkattno - 1]->attname))));
pkattr->indirection = NIL;
pkattr->isRel = false;
@@ -2935,7 +2977,7 @@ transformConstraintAttrs(List *constraintList)
static FromExpr *
makeFromExpr(List *fromlist, Node *quals)
{
- FromExpr *f = makeNode(FromExpr);
+ FromExpr *f = makeNode(FromExpr);
f->fromlist = fromlist;
f->quals = quals;
@@ -2978,19 +3020,20 @@ transformColumnType(ParseState *pstate, ColumnDef *column)
}
/*
- * Is this the name of a complex type? If so, implement
- * it as a set.
+ * Is this the name of a complex type? If so, implement it as a set.
*
* XXX this is a hangover from ancient Berkeley code that probably
* doesn't work anymore anyway.
*/
- if (typeTypeRelid(ctype) != InvalidOid)
- {
- /* (Eventually add in here that the set can only
- * contain one element.)
- */
- typename->setof = true;
- }
-
- ReleaseSysCache(ctype);
+ if (typeTypeRelid(ctype) != InvalidOid)
+ {
+
+ /*
+ * (Eventually add in here that the set can only contain one
+ * element.)
+ */
+ typename->setof = true;
+ }
+
+ ReleaseSysCache(ctype);
}
diff --git a/src/backend/parser/keywords.c b/src/backend/parser/keywords.c
index c8f5f2c0e92..402dbfd28ca 100644
--- a/src/backend/parser/keywords.c
+++ b/src/backend/parser/keywords.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/keywords.c,v 1.89 2001/02/21 18:53:46 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/keywords.c,v 1.90 2001/03/22 03:59:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@ static ScanKeyword ScanKeywords[] = {
{"aggregate", AGGREGATE},
{"all", ALL},
{"alter", ALTER},
- {"analyse", ANALYSE}, /* British spelling */
+ {"analyse", ANALYSE}, /* British spelling */
{"analyze", ANALYZE},
{"and", AND},
{"any", ANY},
@@ -312,16 +312,16 @@ ScanKeywordLookup(char *text)
return NULL;
/*
- * Apply an ASCII-only downcasing. We must not use tolower() since
- * it may produce the wrong translation in some locales (eg, Turkish),
+ * Apply an ASCII-only downcasing. We must not use tolower() since it
+ * may produce the wrong translation in some locales (eg, Turkish),
* and we don't trust isupper() very much either. In an ASCII-based
- * encoding the tests against A and Z are sufficient, but we also check
- * isupper() so that we will work correctly under EBCDIC. The actual
- * case conversion step should work for either ASCII or EBCDIC.
+ * encoding the tests against A and Z are sufficient, but we also
+ * check isupper() so that we will work correctly under EBCDIC. The
+ * actual case conversion step should work for either ASCII or EBCDIC.
*/
for (i = 0; i < len; i++)
{
- char ch = text[i];
+ char ch = text[i];
if (ch >= 'A' && ch <= 'Z' && isupper((unsigned char) ch))
ch += 'a' - 'A';
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 5d44bbefbde..cae712c1bbb 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_clause.c,v 1.77 2001/02/16 03:16:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_clause.c,v 1.78 2001/03/22 03:59:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,17 +39,17 @@
static char *clauseText[] = {"ORDER BY", "GROUP BY", "DISTINCT ON"};
static void extractUniqueColumns(List *common_colnames,
- List *src_colnames, List *src_colvars,
- List **res_colnames, List **res_colvars);
+ List *src_colnames, List *src_colvars,
+ List **res_colnames, List **res_colvars);
static Node *transformJoinUsingClause(ParseState *pstate,
- List *leftVars, List *rightVars);
+ List *leftVars, List *rightVars);
static Node *transformJoinOnClause(ParseState *pstate, JoinExpr *j,
- List *containedRels);
+ List *containedRels);
static RangeTblRef *transformTableEntry(ParseState *pstate, RangeVar *r);
static RangeTblRef *transformRangeSubselect(ParseState *pstate,
- RangeSubselect *r);
+ RangeSubselect *r);
static Node *transformFromClauseItem(ParseState *pstate, Node *n,
- List **containedRels);
+ List **containedRels);
static TargetEntry *findTargetlistEntry(ParseState *pstate, Node *node,
List *tlist, int clause);
static List *addTargetToSortList(TargetEntry *tle, List *sortlist,
@@ -78,10 +78,10 @@ transformFromClause(ParseState *pstate, List *frmList)
List *fl;
/*
- * The grammar will have produced a list of RangeVars, RangeSubselects,
- * and/or JoinExprs. Transform each one (possibly adding entries to the
- * rtable), check for duplicate refnames, and then add it to the joinlist
- * and namespace.
+ * The grammar will have produced a list of RangeVars,
+ * RangeSubselects, and/or JoinExprs. Transform each one (possibly
+ * adding entries to the rtable), check for duplicate refnames, and
+ * then add it to the joinlist and namespace.
*/
foreach(fl, frmList)
{
@@ -126,11 +126,11 @@ setTargetTable(ParseState *pstate, char *relname,
heap_close(pstate->p_target_relation, NoLock);
/*
- * Open target rel and grab suitable lock (which we will hold till
- * end of transaction).
+ * Open target rel and grab suitable lock (which we will hold till end
+ * of transaction).
*
- * analyze.c will eventually do the corresponding heap_close(),
- * but *not* release the lock.
+ * analyze.c will eventually do the corresponding heap_close(), but *not*
+ * release the lock.
*/
pstate->p_target_relation = heap_openr(relname, RowExclusiveLock);
@@ -148,10 +148,10 @@ setTargetTable(ParseState *pstate, char *relname,
* Override addRangeTableEntry's default checkForRead, and instead
* mark target table as requiring write access.
*
- * If we find an explicit reference to the rel later during
- * parse analysis, scanRTEForColumn will change checkForRead
- * to 'true' again. That can't happen for INSERT but it is
- * possible for UPDATE and DELETE.
+ * If we find an explicit reference to the rel later during parse
+ * analysis, scanRTEForColumn will change checkForRead to 'true'
+ * again. That can't happen for INSERT but it is possible for UPDATE
+ * and DELETE.
*/
rte->checkForRead = false;
rte->checkForWrite = true;
@@ -169,7 +169,7 @@ setTargetTable(ParseState *pstate, char *relname,
* Simplify InhOption (yes/no/default) into boolean yes/no.
*
* The reason we do things this way is that we don't want to examine the
- * SQL_inheritance option flag until parse_analyze is run. Otherwise,
+ * SQL_inheritance option flag until parse_analyze is run. Otherwise,
* we'd do the wrong thing with query strings that intermix SET commands
* with queries.
*/
@@ -178,7 +178,7 @@ interpretInhOption(InhOption inhOpt)
{
switch (inhOpt)
{
- case INH_NO:
+ case INH_NO:
return false;
case INH_YES:
return true;
@@ -246,7 +246,7 @@ transformJoinUsingClause(ParseState *pstate, List *leftVars, List *rightVars)
/*
* We cheat a little bit here by building an untransformed operator
- * tree whose leaves are the already-transformed Vars. This is OK
+ * tree whose leaves are the already-transformed Vars. This is OK
* because transformExpr() won't complain about already-transformed
* subnodes.
*/
@@ -288,7 +288,11 @@ transformJoinUsingClause(ParseState *pstate, List *leftVars, List *rightVars)
if (exprType(result) != BOOLOID)
{
- /* This could only happen if someone defines a funny version of '=' */
+
+ /*
+ * This could only happen if someone defines a funny version of
+ * '='
+ */
elog(ERROR, "JOIN/USING clause must return type bool, not type %s",
typeidTypeName(exprType(result)));
}
@@ -312,11 +316,12 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j,
/*
* This is a tad tricky, for two reasons. First, the namespace that
* the join expression should see is just the two subtrees of the JOIN
- * plus any outer references from upper pstate levels. So, temporarily
- * set this pstate's namespace accordingly. (We need not check for
- * refname conflicts, because transformFromClauseItem() already did.)
- * NOTE: this code is OK only because the ON clause can't legally alter
- * the namespace by causing implicit relation refs to be added.
+ * plus any outer references from upper pstate levels. So,
+ * temporarily set this pstate's namespace accordingly. (We need not
+ * check for refname conflicts, because transformFromClauseItem()
+ * already did.) NOTE: this code is OK only because the ON clause
+ * can't legally alter the namespace by causing implicit relation refs
+ * to be added.
*/
save_namespace = pstate->p_namespace;
pstate->p_namespace = makeList2(j->larg, j->rarg);
@@ -333,17 +338,18 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j,
/*
* Second, we need to check that the ON condition doesn't refer to any
- * rels outside the input subtrees of the JOIN. It could do that despite
- * our hack on the namespace if it uses fully-qualified names. So, grovel
- * through the transformed clause and make sure there are no bogus
- * references. (Outer references are OK, and are ignored here.)
+ * rels outside the input subtrees of the JOIN. It could do that
+ * despite our hack on the namespace if it uses fully-qualified names.
+ * So, grovel through the transformed clause and make sure there are
+ * no bogus references. (Outer references are OK, and are ignored
+ * here.)
*/
clause_varnos = pull_varnos(result);
foreach(l, clause_varnos)
{
- int varno = lfirsti(l);
+ int varno = lfirsti(l);
- if (! intMember(varno, containedRels))
+ if (!intMember(varno, containedRels))
{
elog(ERROR, "JOIN/ON clause refers to \"%s\", which is not part of JOIN",
rt_fetch(varno, pstate->p_rtable)->eref->relname);
@@ -400,21 +406,21 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
RangeTblRef *rtr;
/*
- * We require user to supply an alias for a subselect, per SQL92.
- * To relax this, we'd have to be prepared to gin up a unique alias
- * for an unlabeled subselect.
+ * We require user to supply an alias for a subselect, per SQL92. To
+ * relax this, we'd have to be prepared to gin up a unique alias for
+ * an unlabeled subselect.
*/
if (r->name == NULL)
elog(ERROR, "sub-select in FROM must have an alias");
/*
- * Analyze and transform the subquery. This is a bit tricky because
+ * Analyze and transform the subquery. This is a bit tricky because
* we don't want the subquery to be able to see any FROM items already
* created in the current query (per SQL92, the scope of a FROM item
- * does not include other FROM items). But it does need to be able to
- * see any further-up parent states, so we can't just pass a null parent
- * pstate link. So, temporarily make the current query level have an
- * empty namespace.
+ * does not include other FROM items). But it does need to be able to
+ * see any further-up parent states, so we can't just pass a null
+ * parent pstate link. So, temporarily make the current query level
+ * have an empty namespace.
*/
save_namespace = pstate->p_namespace;
pstate->p_namespace = NIL;
@@ -422,7 +428,7 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
pstate->p_namespace = save_namespace;
/*
- * Check that we got something reasonable. Some of these conditions
+ * Check that we got something reasonable. Some of these conditions
* are probably impossible given restrictions of the grammar, but
* check 'em anyway.
*/
@@ -513,9 +519,9 @@ transformFromClauseItem(ParseState *pstate, Node *n, List **containedRels)
*containedRels = nconc(l_containedRels, r_containedRels);
/*
- * Check for conflicting refnames in left and right subtrees. Must
- * do this because higher levels will assume I hand back a self-
- * consistent namespace subtree.
+ * Check for conflicting refnames in left and right subtrees.
+ * Must do this because higher levels will assume I hand back a
+ * self- consistent namespace subtree.
*/
checkNameSpaceConflicts(pstate, j->larg, j->rarg);
@@ -556,12 +562,11 @@ transformFromClauseItem(ParseState *pstate, Node *n, List **containedRels)
}
/*
- * Natural join does not explicitly specify columns; must
- * generate columns to join. Need to run through the list of
- * columns from each table or join result and match up the
- * column names. Use the first table, and check every column
- * in the second table for a match. (We'll check that the
- * matches were unique later on.)
+ * Natural join does not explicitly specify columns; must generate
+ * columns to join. Need to run through the list of columns from
+ * each table or join result and match up the column names. Use
+ * the first table, and check every column in the second table for
+ * a match. (We'll check that the matches were unique later on.)
* The result of this step is a list of column names just like an
* explicitly-written USING list.
*/
@@ -571,7 +576,7 @@ transformFromClauseItem(ParseState *pstate, Node *n, List **containedRels)
List *lx,
*rx;
- Assert(j->using == NIL); /* shouldn't have USING() too */
+ Assert(j->using == NIL); /* shouldn't have USING() too */
foreach(lx, l_colnames)
{
@@ -605,17 +610,18 @@ transformFromClauseItem(ParseState *pstate, Node *n, List **containedRels)
if (j->using)
{
+
/*
* JOIN/USING (or NATURAL JOIN, as transformed above).
- * Transform the list into an explicit ON-condition,
- * and generate a list of result columns.
+ * Transform the list into an explicit ON-condition, and
+ * generate a list of result columns.
*/
List *ucols = j->using;
List *l_usingvars = NIL;
List *r_usingvars = NIL;
List *ucol;
- Assert(j->quals == NULL); /* shouldn't have ON() too */
+ Assert(j->quals == NULL); /* shouldn't have ON() too */
foreach(ucol, ucols)
{
@@ -679,22 +685,22 @@ transformFromClauseItem(ParseState *pstate, Node *n, List **containedRels)
colvar = r_colvar;
break;
default:
- {
- /* Need COALESCE(l_colvar, r_colvar) */
- CaseExpr *c = makeNode(CaseExpr);
- CaseWhen *w = makeNode(CaseWhen);
- A_Expr *a = makeNode(A_Expr);
-
- a->oper = NOTNULL;
- a->lexpr = l_colvar;
- w->expr = (Node *) a;
- w->result = l_colvar;
- c->args = makeList1(w);
- c->defresult = r_colvar;
- colvar = transformExpr(pstate, (Node *) c,
- EXPR_COLUMN_FIRST);
- break;
- }
+ {
+ /* Need COALESCE(l_colvar, r_colvar) */
+ CaseExpr *c = makeNode(CaseExpr);
+ CaseWhen *w = makeNode(CaseWhen);
+ A_Expr *a = makeNode(A_Expr);
+
+ a->oper = NOTNULL;
+ a->lexpr = l_colvar;
+ w->expr = (Node *) a;
+ w->result = l_colvar;
+ c->args = makeList1(w);
+ c->defresult = r_colvar;
+ colvar = transformExpr(pstate, (Node *) c,
+ EXPR_COLUMN_FIRST);
+ break;
+ }
}
res_colvars = lappend(res_colvars, colvar);
}
@@ -730,6 +736,7 @@ transformFromClauseItem(ParseState *pstate, Node *n, List **containedRels)
*/
if (j->alias)
{
+
/*
* If a column alias list is specified, substitute the alias
* names into my output-column list
@@ -751,7 +758,8 @@ transformFromClauseItem(ParseState *pstate, Node *n, List **containedRels)
else
elog(ERROR, "transformFromClauseItem: unexpected node (internal error)"
"\n\t%s", nodeToString(n));
- return NULL; /* can't get here, just keep compiler quiet */
+ return NULL; /* can't get here, just keep compiler
+ * quiet */
}
@@ -848,8 +856,8 @@ findTargetlistEntry(ParseState *pstate, Node *node, List *tlist, int clause)
* is a matching column. If so, fall through to let
* transformExpr() do the rest. NOTE: if name could refer
* ambiguously to more than one column name exposed by FROM,
- * colnameToVar will elog(ERROR). That's just what
- * we want here.
+ * colnameToVar will elog(ERROR). That's just what we want
+ * here.
*/
if (colnameToVar(pstate, name) != NULL)
name = NULL;
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 69731ff89e1..1e7c8af7b0b 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_coerce.c,v 2.55 2001/02/27 07:07:00 ishii Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_coerce.c,v 2.56 2001/03/22 03:59:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,7 +78,7 @@ coerce_type(ParseState *pstate, Node *node, Oid inputTypeId,
{
/* We know the source constant is really of type 'text' */
char *val = DatumGetCString(DirectFunctionCall1(textout,
- con->constvalue));
+ con->constvalue));
newcon->constvalue = stringTypeDatum(targetType, val, atttypmod);
pfree(val);
@@ -227,9 +227,9 @@ can_coerce_type(int nargs, Oid *input_typeids, Oid *func_typeids)
/* don't choke on references to no-longer-existing types */
if (!typeidIsValid(inputTypeId))
- return false;
- if (!typeidIsValid(targetTypeId))
- return false;
+ return false;
+ if (!typeidIsValid(targetTypeId))
+ return false;
/*
* Else, try for explicit conversion using functions: look for a
@@ -240,7 +240,7 @@ can_coerce_type(int nargs, Oid *input_typeids, Oid *func_typeids)
oid_array[0] = inputTypeId;
ftup = SearchSysCache(PROCNAME,
- PointerGetDatum(typeidTypeName(targetTypeId)),
+ PointerGetDatum(typeidTypeName(targetTypeId)),
Int32GetDatum(1),
PointerGetDatum(oid_array),
0);
@@ -333,7 +333,7 @@ coerce_type_typmod(ParseState *pstate, Node *node,
*
* XXX this code is WRONG, since (for example) given the input (int4,int8)
* it will select int4, whereas according to SQL92 clause 9.3 the correct
- * answer is clearly int8. To fix this we need a notion of a promotion
+ * answer is clearly int8. To fix this we need a notion of a promotion
* hierarchy within type categories --- something more complete than
* just a single preferred type.
*/
@@ -349,7 +349,7 @@ select_common_type(List *typeids, const char *context)
pcategory = TypeCategory(ptype);
foreach(l, lnext(typeids))
{
- Oid ntype = (Oid) lfirsti(l);
+ Oid ntype = (Oid) lfirsti(l);
/* move on to next one if no new information... */
if (ntype && (ntype != UNKNOWNOID) && (ntype != ptype))
@@ -362,20 +362,21 @@ select_common_type(List *typeids, const char *context)
}
else if (TypeCategory(ntype) != pcategory)
{
+
/*
- * both types in different categories? then
- * not much hope...
+ * both types in different categories? then not much
+ * hope...
*/
elog(ERROR, "%s types \"%s\" and \"%s\" not matched",
- context, typeidTypeName(ptype), typeidTypeName(ntype));
+ context, typeidTypeName(ptype), typeidTypeName(ntype));
}
else if (IsPreferredType(pcategory, ntype)
&& !IsPreferredType(pcategory, ptype)
&& can_coerce_type(1, &ptype, &ntype))
{
+
/*
- * new one is preferred and can convert? then
- * take it...
+ * new one is preferred and can convert? then take it...
*/
ptype = ntype;
pcategory = TypeCategory(ptype);
@@ -384,16 +385,15 @@ select_common_type(List *typeids, const char *context)
}
/*
- * If all the inputs were UNKNOWN type --- ie, unknown-type literals ---
- * then resolve as type TEXT. This situation comes up with constructs
- * like
- * SELECT (CASE WHEN foo THEN 'bar' ELSE 'baz' END);
- * SELECT 'foo' UNION SELECT 'bar';
- * It might seem desirable to leave the construct's output type as
- * UNKNOWN, but that really doesn't work, because we'd probably end up
- * needing a runtime coercion from UNKNOWN to something else, and we
- * usually won't have it. We need to coerce the unknown literals while
- * they are still literals, so a decision has to be made now.
+ * If all the inputs were UNKNOWN type --- ie, unknown-type literals
+ * --- then resolve as type TEXT. This situation comes up with
+ * constructs like SELECT (CASE WHEN foo THEN 'bar' ELSE 'baz' END);
+ * SELECT 'foo' UNION SELECT 'bar'; It might seem desirable to leave
+ * the construct's output type as UNKNOWN, but that really doesn't
+ * work, because we'd probably end up needing a runtime coercion from
+ * UNKNOWN to something else, and we usually won't have it. We need
+ * to coerce the unknown literals while they are still literals, so a
+ * decision has to be made now.
*/
if (ptype == UNKNOWNOID)
ptype = TEXTOID;
@@ -420,9 +420,7 @@ coerce_to_common_type(ParseState *pstate, Node *node,
if (inputTypeId == targetTypeId)
return node; /* no work */
if (can_coerce_type(1, &inputTypeId, &targetTypeId))
- {
node = coerce_type(pstate, node, inputTypeId, targetTypeId, -1);
- }
else
{
elog(ERROR, "%s unable to convert to type \"%s\"",
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 69f34c890ed..9c60b17cf8c 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_expr.c,v 1.91 2001/02/16 03:16:58 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_expr.c,v 1.92 2001/03/22 03:59:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -384,9 +384,9 @@ transformExpr(ParseState *pstate, Node *expr, int precedence)
left_list = lnext(left_list);
/*
- * It's OK to use oper() not compatible_oper() here,
- * because make_subplan() will insert type coercion
- * calls if needed.
+ * It's OK to use oper() not compatible_oper()
+ * here, because make_subplan() will insert type
+ * coercion calls if needed.
*/
optup = oper(op,
exprType(lexpr),
@@ -458,10 +458,12 @@ transformExpr(ParseState *pstate, Node *expr, int precedence)
c->defresult = (Node *) n;
}
c->defresult = transformExpr(pstate, c->defresult, precedence);
+
/*
* Note: default result is considered the most significant
- * type in determining preferred type. This is how the code
- * worked before, but it seems a little bogus to me --- tgl
+ * type in determining preferred type. This is how the
+ * code worked before, but it seems a little bogus to me
+ * --- tgl
*/
typeids = lconsi(exprType(c->defresult), typeids);
@@ -571,7 +573,7 @@ transformIdent(ParseState *pstate, Ident *ident, int precedence)
* appear
*/
if (ident->indirection == NIL &&
- refnameRangeOrJoinEntry(pstate, ident->name, &sublevels_up) != NULL)
+ refnameRangeOrJoinEntry(pstate, ident->name, &sublevels_up) != NULL)
{
ident->isRel = TRUE;
result = (Node *) ident;
@@ -580,7 +582,7 @@ transformIdent(ParseState *pstate, Ident *ident, int precedence)
if (result == NULL || precedence == EXPR_COLUMN_FIRST)
{
/* try to find the ident as a column */
- Node *var = colnameToVar(pstate, ident->name);
+ Node *var = colnameToVar(pstate, ident->name);
if (var != NULL)
result = transformIndirection(pstate, var, ident->indirection);
@@ -852,7 +854,7 @@ parser_typecast_constant(Value *expr, TypeName *typename)
{
case T_Integer:
const_string = DatumGetCString(DirectFunctionCall1(int4out,
- Int32GetDatum(expr->val.ival)));
+ Int32GetDatum(expr->val.ival)));
string_palloced = true;
break;
case T_Float:
@@ -931,7 +933,7 @@ parser_typecast_expression(ParseState *pstate,
/*
* Given a TypeName node as returned by the grammar, generate the internal
- * name of the corresponding type. Note this does NOT check if the type
+ * name of the corresponding type. Note this does NOT check if the type
* exists or not.
*/
char *
@@ -939,11 +941,12 @@ TypeNameToInternalName(TypeName *typename)
{
if (typename->arrayBounds != NIL)
{
+
/*
* By convention, the name of an array type is the name of its
* element type with "_" prepended.
*/
- char *arrayname = palloc(strlen(typename->name) + 2);
+ char *arrayname = palloc(strlen(typename->name) + 2);
sprintf(arrayname, "_%s", typename->name);
return arrayname;
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index bece816a7f4..8d2f632f33a 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_func.c,v 1.100 2001/03/14 23:55:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_func.c,v 1.101 2001/03/22 03:59:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@ ParseNestedFuncOrColumn(ParseState *pstate, Attr *attr, int precedence)
if (attr->paramNo != NULL)
{
Param *param = (Param *) transformExpr(pstate,
- (Node *) attr->paramNo,
+ (Node *) attr->paramNo,
EXPR_RELATION_FIRST);
retval = ParseFuncOrColumn(pstate, strVal(lfirst(attr->attrs)),
@@ -277,7 +277,7 @@ ParseFuncOrColumn(ParseState *pstate, char *funcname, List *fargs,
if (nargs == 1 && !must_be_agg)
{
/* Is it a plain Relation name from the parser? */
- if (IsA(first_arg, Ident) && ((Ident *) first_arg)->isRel)
+ if (IsA(first_arg, Ident) &&((Ident *) first_arg)->isRel)
{
Ident *ident = (Ident *) first_arg;
@@ -337,7 +337,7 @@ ParseFuncOrColumn(ParseState *pstate, char *funcname, List *fargs,
if (nargs != 1)
elog(ERROR, "Aggregate functions may only have one parameter");
/* Agg's argument can't be a relation name, either */
- if (IsA(first_arg, Ident) && ((Ident *) first_arg)->isRel)
+ if (IsA(first_arg, Ident) &&((Ident *) first_arg)->isRel)
elog(ERROR, "Aggregate functions cannot be applied to relation names");
could_be_agg = true;
}
@@ -345,7 +345,7 @@ ParseFuncOrColumn(ParseState *pstate, char *funcname, List *fargs,
{
/* Try to parse as an aggregate if above-mentioned checks are OK */
could_be_agg = (nargs == 1) &&
- !(IsA(first_arg, Ident) && ((Ident *) first_arg)->isRel);
+ !(IsA(first_arg, Ident) &&((Ident *) first_arg)->isRel);
}
if (could_be_agg)
@@ -424,7 +424,7 @@ ParseFuncOrColumn(ParseState *pstate, char *funcname, List *fargs,
{
Node *arg = lfirst(i);
- if (IsA(arg, Ident) && ((Ident *) arg)->isRel)
+ if (IsA(arg, Ident) &&((Ident *) arg)->isRel)
{
RangeTblEntry *rte;
int vnum;
@@ -440,21 +440,18 @@ ParseFuncOrColumn(ParseState *pstate, char *funcname, List *fargs,
&sublevels_up);
if (rteorjoin == NULL)
- {
rte = addImplicitRTE(pstate, refname);
- }
else if (IsA(rteorjoin, RangeTblEntry))
- {
rte = (RangeTblEntry *) rteorjoin;
- }
else if (IsA(rteorjoin, JoinExpr))
{
+
/*
* We have f(x) or more likely x.f where x is a join and f
- * is not one of the attribute names of the join (else we'd
- * have recognized it above). We don't support functions on
- * join tuples (since we don't have a named type for the join
- * tuples), so error out.
+ * is not one of the attribute names of the join (else
+ * we'd have recognized it above). We don't support
+ * functions on join tuples (since we don't have a named
+ * type for the join tuples), so error out.
*/
elog(ERROR, "No such attribute or function %s.%s",
refname, funcname);
@@ -525,14 +522,14 @@ ParseFuncOrColumn(ParseState *pstate, char *funcname, List *fargs,
{ /* we know all of these fields already */
/*
- * We create a funcnode with a placeholder function seteval().
- * At runtime, seteval() will execute the function identified
- * by the funcid it receives as parameter.
+ * We create a funcnode with a placeholder function seteval(). At
+ * runtime, seteval() will execute the function identified by the
+ * funcid it receives as parameter.
*
* Example: retrieve (emp.mgr.name). The plan for this will scan the
- * emp relation, projecting out the mgr attribute, which is a funcid.
- * This function is then called (via seteval()) and "name" is
- * projected from its result.
+ * emp relation, projecting out the mgr attribute, which is a
+ * funcid. This function is then called (via seteval()) and "name"
+ * is projected from its result.
*/
funcid = F_SETEVAL;
rettype = toid;
@@ -639,11 +636,12 @@ ParseFuncOrColumn(ParseState *pstate, char *funcname, List *fargs,
retval = (Node *) expr;
/*
- * For sets, we want to project out the desired attribute of the tuples.
+ * For sets, we want to project out the desired attribute of the
+ * tuples.
*/
if (attisset)
{
- FieldSelect *fselect;
+ FieldSelect *fselect;
fselect = setup_field_select(retval, funcname, argrelid);
rettype = fselect->resulttype;
@@ -952,31 +950,31 @@ func_select_candidate(int nargs,
* columns.
*
* We do this by examining each unknown argument position to see if we
- * can determine a "type category" for it. If any candidate has an
+ * can determine a "type category" for it. If any candidate has an
* input datatype of STRING category, use STRING category (this bias
* towards STRING is appropriate since unknown-type literals look like
* strings). Otherwise, if all the candidates agree on the type
* category of this argument position, use that category. Otherwise,
* fail because we cannot determine a category.
*
- * If we are able to determine a type category, also notice whether
- * any of the candidates takes a preferred datatype within the category.
+ * If we are able to determine a type category, also notice whether any
+ * of the candidates takes a preferred datatype within the category.
*
- * Having completed this examination, remove candidates that accept
- * the wrong category at any unknown position. Also, if at least one
- * candidate accepted a preferred type at a position, remove candidates
- * that accept non-preferred types.
+ * Having completed this examination, remove candidates that accept the
+ * wrong category at any unknown position. Also, if at least one
+ * candidate accepted a preferred type at a position, remove
+ * candidates that accept non-preferred types.
*
* If we are down to one candidate at the end, we win.
*/
resolved_unknowns = false;
for (i = 0; i < nargs; i++)
{
- bool have_conflict;
+ bool have_conflict;
if (input_typeids[i] != UNKNOWNOID)
continue;
- resolved_unknowns = true; /* assume we can do it */
+ resolved_unknowns = true; /* assume we can do it */
slot_category[i] = INVALID_TYPE;
slot_has_preferred_type[i] = false;
have_conflict = false;
@@ -1012,7 +1010,11 @@ func_select_candidate(int nargs,
}
else
{
- /* Remember conflict, but keep going (might find STRING) */
+
+ /*
+ * Remember conflict, but keep going (might find
+ * STRING)
+ */
have_conflict = true;
}
}
@@ -1034,7 +1036,7 @@ func_select_candidate(int nargs,
current_candidate != NULL;
current_candidate = current_candidate->next)
{
- bool keepit = true;
+ bool keepit = true;
current_typeids = current_candidate->args;
for (i = 0; i < nargs; i++)
@@ -1185,7 +1187,7 @@ func_get_detail(char *funcname,
ftup = SearchSysCache(PROCNAME,
PointerGetDatum(funcname),
Int32GetDatum(nargs),
- PointerGetDatum(*true_typeids),
+ PointerGetDatum(*true_typeids),
0);
Assert(HeapTupleIsValid(ftup));
break;
@@ -1542,12 +1544,12 @@ ParseComplexProjection(ParseState *pstate,
Iter *iter = (Iter *) first_arg;
/*
- * If the argument of the Iter returns a tuple,
- * funcname may be a projection. If so, we stick
- * the FieldSelect *inside* the Iter --- this is
- * klugy, but necessary because ExecTargetList()
- * currently does the right thing only when the
- * Iter node is at the top level of a targetlist item.
+ * If the argument of the Iter returns a tuple, funcname
+ * may be a projection. If so, we stick the FieldSelect
+ * *inside* the Iter --- this is klugy, but necessary
+ * because ExecTargetList() currently does the right thing
+ * only when the Iter node is at the top level of a
+ * targetlist item.
*/
argtype = iter->itertype;
argrelid = typeidTypeRelid(argtype);
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index 36e43166aa9..b23dd7f1b98 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_node.c,v 1.52 2001/02/14 21:35:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_node.c,v 1.53 2001/03/22 03:59:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -136,7 +136,7 @@ make_op(char *opname, Node *ltree, Node *rtree)
newop = makeOper(oprid(tup),/* opno */
InvalidOid,/* opid */
- opform->oprresult); /* operator result type */
+ opform->oprresult); /* operator result type */
result = makeNode(Expr);
result->typeOid = opform->oprresult;
@@ -235,7 +235,7 @@ make_var(ParseState *pstate, RangeTblEntry *rte, int attrno)
* forceSlice If true, treat subscript as array slice in all cases
* assignFrom NULL for array fetch, else transformed expression for source.
*/
-ArrayRef *
+ArrayRef *
transformArraySubscripts(ParseState *pstate,
Node *arrayBase,
Oid arrayType,
@@ -449,7 +449,7 @@ make_const(Value *value)
typeid = FLOAT8OID;
typelen = sizeof(float8);
- typebyval = false; /* XXX might change someday */
+ typebyval = false; /* XXX might change someday */
}
else
{
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index cd557994f17..fdca83115d3 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_oper.c,v 1.47 2001/02/16 03:16:58 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_oper.c,v 1.48 2001/03/22 03:59:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ oprid(Operator op)
Oid
oprfuncid(Operator op)
{
- Form_pg_operator pgopform = (Form_pg_operator) GETSTRUCT(op);
+ Form_pg_operator pgopform = (Form_pg_operator) GETSTRUCT(op);
return pgopform->oprcode;
}
@@ -416,34 +416,35 @@ oper_select_candidate(int nargs,
}
/*
- * Second try: same algorithm as for unknown resolution in parse_func.c.
+ * Second try: same algorithm as for unknown resolution in
+ * parse_func.c.
*
* We do this by examining each unknown argument position to see if we
- * can determine a "type category" for it. If any candidate has an
+ * can determine a "type category" for it. If any candidate has an
* input datatype of STRING category, use STRING category (this bias
* towards STRING is appropriate since unknown-type literals look like
* strings). Otherwise, if all the candidates agree on the type
* category of this argument position, use that category. Otherwise,
* fail because we cannot determine a category.
*
- * If we are able to determine a type category, also notice whether
- * any of the candidates takes a preferred datatype within the category.
+ * If we are able to determine a type category, also notice whether any
+ * of the candidates takes a preferred datatype within the category.
*
- * Having completed this examination, remove candidates that accept
- * the wrong category at any unknown position. Also, if at least one
- * candidate accepted a preferred type at a position, remove candidates
- * that accept non-preferred types.
+ * Having completed this examination, remove candidates that accept the
+ * wrong category at any unknown position. Also, if at least one
+ * candidate accepted a preferred type at a position, remove
+ * candidates that accept non-preferred types.
*
* If we are down to one candidate at the end, we win.
*/
resolved_unknowns = false;
for (i = 0; i < nargs; i++)
{
- bool have_conflict;
+ bool have_conflict;
if (input_typeids[i] != UNKNOWNOID)
continue;
- resolved_unknowns = true; /* assume we can do it */
+ resolved_unknowns = true; /* assume we can do it */
slot_category[i] = INVALID_TYPE;
slot_has_preferred_type[i] = false;
have_conflict = false;
@@ -479,7 +480,11 @@ oper_select_candidate(int nargs,
}
else
{
- /* Remember conflict, but keep going (might find STRING) */
+
+ /*
+ * Remember conflict, but keep going (might find
+ * STRING)
+ */
have_conflict = true;
}
}
@@ -501,7 +506,7 @@ oper_select_candidate(int nargs,
current_candidate != NULL;
current_candidate = current_candidate->next)
{
- bool keepit = true;
+ bool keepit = true;
current_typeids = current_candidate->args;
for (i = 0; i < nargs; i++)
@@ -602,7 +607,8 @@ oper_inexact(char *op, Oid arg1, Oid arg2)
if (ncandidates == 0)
return NULL;
- /* Otherwise, check for compatible datatypes, and then try to resolve
+ /*
+ * Otherwise, check for compatible datatypes, and then try to resolve
* the conflict if more than one candidate remains.
*/
inputOids[0] = arg1;
@@ -659,18 +665,18 @@ oper(char *opname, Oid ltypeId, Oid rtypeId, bool noError)
*
* This is tighter than oper() because it will not return an operator that
* requires coercion of the input datatypes (but binary-compatible operators
- * are accepted). Otherwise, the semantics are the same.
+ * are accepted). Otherwise, the semantics are the same.
*/
Operator
compatible_oper(char *op, Oid arg1, Oid arg2, bool noError)
{
Operator optup;
- Form_pg_operator opform;
+ Form_pg_operator opform;
/* oper() will find the best available match */
optup = oper(op, arg1, arg2, noError);
if (optup == (Operator) NULL)
- return (Operator) NULL; /* must be noError case */
+ return (Operator) NULL; /* must be noError case */
/* but is it good enough? */
opform = (Form_pg_operator) GETSTRUCT(optup);
@@ -825,8 +831,11 @@ right_oper(char *op, Oid arg)
unary_op_error(op, arg, FALSE);
else
{
- /* We must run oper_select_candidate even if only one candidate,
- * otherwise we may falsely return a non-type-compatible operator.
+
+ /*
+ * We must run oper_select_candidate even if only one
+ * candidate, otherwise we may falsely return a
+ * non-type-compatible operator.
*/
targetOid = oper_select_candidate(1, &arg, candidates);
if (targetOid != NULL)
@@ -879,8 +888,11 @@ left_oper(char *op, Oid arg)
unary_op_error(op, arg, TRUE);
else
{
- /* We must run oper_select_candidate even if only one candidate,
- * otherwise we may falsely return a non-type-compatible operator.
+
+ /*
+ * We must run oper_select_candidate even if only one
+ * candidate, otherwise we may falsely return a
+ * non-type-compatible operator.
*/
targetOid = oper_select_candidate(1, &arg, candidates);
if (targetOid != NULL)
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index d9280529c4f..59be1044748 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_relation.c,v 1.52 2001/02/14 21:35:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_relation.c,v 1.53 2001/03/22 03:59:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,11 +31,11 @@
static Node *scanNameSpaceForRefname(ParseState *pstate, Node *nsnode,
- char *refname);
+ char *refname);
static Node *scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte,
- char *colname);
+ char *colname);
static Node *scanJoinForColumn(JoinExpr *join, char *colname,
- int sublevels_up);
+ int sublevels_up);
static bool isForUpdate(ParseState *pstate, char *relname);
static List *expandNamesVars(ParseState *pstate, List *names, List *vars);
static void warnAutoRange(ParseState *pstate, char *refname);
@@ -145,7 +145,8 @@ scanNameSpaceForRefname(ParseState *pstate, Node *nsnode,
if (j->alias)
{
if (strcmp(j->alias->relname, refname) == 0)
- return (Node *) j; /* matched a join alias */
+ return (Node *) j; /* matched a join alias */
+
/*
* Tables within an aliased join are invisible from outside
* the join, according to the scope rules of SQL92 (the join
@@ -154,7 +155,7 @@ scanNameSpaceForRefname(ParseState *pstate, Node *nsnode,
return NULL;
}
result = scanNameSpaceForRefname(pstate, j->larg, refname);
- if (! result)
+ if (!result)
result = scanNameSpaceForRefname(pstate, j->rarg, refname);
}
else if (IsA(nsnode, List))
@@ -185,7 +186,7 @@ scanNameSpaceForConflict(ParseState *pstate, Node *nsnode,
/*
* Recursively check for refname conflicts between two namespaces or
- * namespace subtrees. Raise an error if any is found.
+ * namespace subtrees. Raise an error if any is found.
*
* Works by recursively scanning namespace1 in the same way that
* scanNameSpaceForRefname does, and then looking in namespace2 for
@@ -214,6 +215,7 @@ checkNameSpaceConflicts(ParseState *pstate, Node *namespace1,
if (j->alias)
{
scanNameSpaceForConflict(pstate, namespace2, j->alias->relname);
+
/*
* Tables within an aliased join are invisible from outside
* the join, according to the scope rules of SQL92 (the join
@@ -229,9 +231,7 @@ checkNameSpaceConflicts(ParseState *pstate, Node *namespace1,
List *l;
foreach(l, (List *) namespace1)
- {
checkNameSpaceConflicts(pstate, lfirst(l), namespace2);
- }
}
else
elog(ERROR, "checkNameSpaceConflicts: unexpected node type %d",
@@ -290,8 +290,8 @@ scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte, char *colname)
List *c;
/*
- * Scan the user column names (or aliases) for a match.
- * Complain if multiple matches.
+ * Scan the user column names (or aliases) for a match. Complain if
+ * multiple matches.
*/
foreach(c, rte->eref->attrs)
{
@@ -354,7 +354,8 @@ scanJoinForColumn(JoinExpr *join, char *colname, int sublevels_up)
{
if (result)
elog(ERROR, "Column reference \"%s\" is ambiguous", colname);
- result = copyObject(nth(attnum-1, join->colvars));
+ result = copyObject(nth(attnum - 1, join->colvars));
+
/*
* If referencing an uplevel join item, we must adjust
* sublevels settings in the copied expression.
@@ -385,20 +386,20 @@ colnameToVar(ParseState *pstate, char *colname)
/*
* We need to look only at top-level namespace items, and even for
- * those, ignore RTEs that are marked as not inFromCl and not
- * the query's target relation.
+ * those, ignore RTEs that are marked as not inFromCl and not the
+ * query's target relation.
*/
foreach(ns, pstate->p_namespace)
{
- Node *nsnode = (Node *) lfirst(ns);
- Node *newresult = NULL;
+ Node *nsnode = (Node *) lfirst(ns);
+ Node *newresult = NULL;
if (IsA(nsnode, RangeTblRef))
{
int varno = ((RangeTblRef *) nsnode)->rtindex;
RangeTblEntry *rte = rt_fetch(varno, pstate->p_rtable);
- if (! rte->inFromCl &&
+ if (!rte->inFromCl &&
rte != pstate->p_target_rangetblentry)
continue;
@@ -452,7 +453,7 @@ qualifiedNameToVar(ParseState *pstate, char *refname, char *colname,
if (rteorjoin == NULL)
{
- if (! implicitRTEOK)
+ if (!implicitRTEOK)
return NULL;
rteorjoin = (Node *) addImplicitRTE(pstate, refname);
sublevels_up = 0;
@@ -505,9 +506,9 @@ addRangeTableEntry(ParseState *pstate,
/*
* Get the rel's OID. This access also ensures that we have an
- * up-to-date relcache entry for the rel. Since this is typically
- * the first access to a rel in a statement, be careful to get the
- * right access level depending on whether we're doing SELECT FOR UPDATE.
+ * up-to-date relcache entry for the rel. Since this is typically the
+ * first access to a rel in a statement, be careful to get the right
+ * access level depending on whether we're doing SELECT FOR UPDATE.
*/
lockmode = isForUpdate(pstate, relname) ? RowShareLock : AccessShareLock;
rel = heap_openr(relname, lockmode);
@@ -517,8 +518,8 @@ addRangeTableEntry(ParseState *pstate,
numaliases = length(eref->attrs);
/*
- * Since the rel is open anyway, let's check that the
- * number of column aliases is reasonable. - Thomas 2000-02-04
+ * Since the rel is open anyway, let's check that the number of column
+ * aliases is reasonable. - Thomas 2000-02-04
*/
maxattrs = RelationGetNumberOfAttributes(rel);
if (maxattrs < numaliases)
@@ -536,9 +537,9 @@ addRangeTableEntry(ParseState *pstate,
rte->eref = eref;
/*
- * Drop the rel refcount, but keep the access lock till end of transaction
- * so that the table can't be deleted or have its schema modified
- * underneath us.
+ * Drop the rel refcount, but keep the access lock till end of
+ * transaction so that the table can't be deleted or have its schema
+ * modified underneath us.
*/
heap_close(rel, NoLock);
@@ -557,11 +558,11 @@ addRangeTableEntry(ParseState *pstate,
rte->checkForRead = true;
rte->checkForWrite = false;
- rte->checkAsUser = InvalidOid; /* not set-uid by default, either */
+ rte->checkAsUser = InvalidOid; /* not set-uid by default, either */
/*
- * Add completed RTE to pstate's range table list, but not to join list
- * nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join
+ * list nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
@@ -637,8 +638,8 @@ addRangeTableEntryForSubquery(ParseState *pstate,
rte->checkAsUser = InvalidOid;
/*
- * Add completed RTE to pstate's range table list, but not to join list
- * nor namespace --- caller must do that if appropriate.
+ * Add completed RTE to pstate's range table list, but not to join
+ * list nor namespace --- caller must do that if appropriate.
*/
if (pstate != NULL)
pstate->p_rtable = lappend(pstate->p_rtable, rte);
@@ -665,7 +666,7 @@ isForUpdate(ParseState *pstate, char *relname)
else
{
/* just the named tables */
- List *l;
+ List *l;
foreach(l, pstate->p_forUpdate)
{
@@ -683,7 +684,7 @@ isForUpdate(ParseState *pstate, char *relname)
/*
* Add the given RTE as a top-level entry in the pstate's join list
- * and/or name space list. (We assume caller has checked for any
+ * and/or name space list. (We assume caller has checked for any
* namespace conflict.)
*/
void
@@ -854,9 +855,10 @@ expandJoinAttrs(ParseState *pstate, JoinExpr *join, int sublevels_up)
List *vars;
vars = copyObject(join->colvars);
+
/*
- * If referencing an uplevel join item, we must adjust
- * sublevels settings in the copied expression.
+ * If referencing an uplevel join item, we must adjust sublevels
+ * settings in the copied expression.
*/
if (sublevels_up > 0)
IncrementVarSublevelsUp((Node *) vars, sublevels_up, 0);
@@ -922,15 +924,17 @@ get_rte_attribute_name(RangeTblEntry *rte, AttrNumber attnum)
* If there is an alias, use it
*/
if (attnum > 0 && attnum <= length(rte->eref->attrs))
- return strVal(nth(attnum-1, rte->eref->attrs));
+ return strVal(nth(attnum - 1, rte->eref->attrs));
+
/*
- * Can get here for a system attribute (which never has an alias),
- * or if alias name list is too short (which probably can't happen
+ * Can get here for a system attribute (which never has an alias), or
+ * if alias name list is too short (which probably can't happen
* anymore). Neither of these cases is valid for a subselect RTE.
*/
if (rte->relid == InvalidOid)
elog(ERROR, "Invalid attnum %d for rangetable entry %s",
attnum, rte->eref->relname);
+
/*
* Use the real name of the table's column
*/
@@ -1007,6 +1011,7 @@ attnameIsSet(Relation rd, char *name)
}
return get_attisset(RelationGetRelid(rd), name);
}
+
#endif
#ifdef NOT_USED
@@ -1020,6 +1025,7 @@ attnumAttNelems(Relation rd, int attid)
{
return rd->rd_att->attrs[attid - 1]->attnelems;
}
+
#endif
/* given attribute id, return type of that attribute */
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index 6b566da7475..8f8ea4e168b 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_target.c,v 1.65 2001/02/14 21:35:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_target.c,v 1.66 2001/03/22 03:59:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -131,11 +131,11 @@ transformTargetList(ParseState *pstate, List *targetlist)
if (IsA(rteorjoin, RangeTblEntry))
p_target = nconc(p_target,
expandRelAttrs(pstate,
- (RangeTblEntry *) rteorjoin));
+ (RangeTblEntry *) rteorjoin));
else if (IsA(rteorjoin, JoinExpr))
p_target = nconc(p_target,
expandJoinAttrs(pstate,
- (JoinExpr *) rteorjoin,
+ (JoinExpr *) rteorjoin,
sublevels_up));
else
elog(ERROR, "transformTargetList: unexpected node type %d",
@@ -217,6 +217,7 @@ updateTargetListEntry(ParseState *pstate,
if (pstate->p_is_insert)
{
+
/*
* The command is INSERT INTO table (arraycol[subscripts]) ...
* so there is not really a source array value to work with.
@@ -229,6 +230,7 @@ updateTargetListEntry(ParseState *pstate,
}
else
{
+
/*
* Build a Var for the array to be updated.
*/
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index 5935b565b62..856affbdbe3 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_type.c,v 1.34 2001/01/24 19:43:03 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_type.c,v 1.35 2001/03/22 03:59:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -136,6 +136,7 @@ typeTypElem(Type typ)
return typtup->typelem;
}
+
#endif
#ifdef NOT_USED
@@ -149,6 +150,7 @@ typeInfunc(Type typ)
return typtup->typinput;
}
+
#endif
#ifdef NOT_USED
@@ -162,6 +164,7 @@ typeOutfunc(Type typ)
return typtup->typoutput;
}
+
#endif
/* Given a type structure and a string, returns the internal form of
@@ -218,10 +221,11 @@ typeidTypeName(Oid id)
if (!HeapTupleIsValid(tup))
elog(ERROR, "Unable to locate type oid %u in catalog", id);
typetuple = (Form_pg_type) GETSTRUCT(tup);
+
/*
* pstrdup here because result may need to outlive the syscache entry
- * (eg, it might end up as part of a parse tree that will outlive
- * the current transaction...)
+ * (eg, it might end up as part of a parse tree that will outlive the
+ * current transaction...)
*/
result = pstrdup(NameStr(typetuple->typname));
ReleaseSysCache(tup);
diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c
index 63eff93edf5..11f1fa6df37 100644
--- a/src/backend/parser/parser.c
+++ b/src/backend/parser/parser.c
@@ -14,7 +14,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parser.c,v 1.48 2001/01/24 19:43:03 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parser.c,v 1.49 2001/03/22 03:59:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,6 +30,7 @@
#if defined(FLEX_SCANNER)
extern void DeleteBuffer(void);
+
#endif /* FLEX_SCANNER */
char *parseString; /* the char* which holds the string to be
@@ -82,7 +83,7 @@ parser(char *str, Oid *typev, int nargs)
* token lookahead. We reduce these cases to one-token lookahead by combining
* tokens here, in order to keep the grammar LR(1).
*
- * Using a filter is simpler than trying to recognize multiword tokens
+ * Using a filter is simpler than trying to recognize multiword tokens
* directly in scan.l, because we'd have to allow for comments between the
* words ...
*/
diff --git a/src/backend/port/beos/sem.c b/src/backend/port/beos/sem.c
index 1a6d0d97a19..3b829817ee7 100644
--- a/src/backend/port/beos/sem.c
+++ b/src/backend/port/beos/sem.c
@@ -4,7 +4,7 @@
* BeOS System V Semaphores Emulation
*
* Copyright (c) 1999-2000, Cyril VELTER
- *
+ *
*-------------------------------------------------------------------------
*/
@@ -29,204 +29,227 @@
/* Control of a semaphore pool. The pool is an area in which we stored all
the semIds of the pool. The first 4 bytes are the number of semaphore allocated
-in the pool followed by SemIds */
+in the pool followed by SemIds */
-int semctl(int semId,int semNum,int flag,union semun semun)
+int
+semctl(int semId, int semNum, int flag, union semun semun)
{
- int32* Address;
- area_info info;
+ int32 *Address;
+ area_info info;
TRACEDBG("->semctl");
/* Try to find the pool */
- if (get_area_info(semId,&info)!=B_OK)
+ if (get_area_info(semId, &info) != B_OK)
{
/* pool is invalid (BeOS area id is invalid) */
- errno=EINVAL;
+ errno = EINVAL;
TRACEDBG("<-semctl invalid pool");
return -1;
}
-
+
/* Get the pool address */
- Address=(int32*)info.address;
- TRACEDBGP("--semctl address %d",Address);
-
-
+ Address = (int32 *) info.address;
+ TRACEDBGP("--semctl address %d", Address);
+
+
/* semNum might be 0 */
/* semun.array contain the sem initial values */
-
+
/* Fix the count of all sem of the pool to semun.array */
- if (flag==SETALL)
+ if (flag == SETALL)
{
- long i;
+ long i;
+
TRACEDBG("--semctl setall");
- for (i=0;i<Address[0];i++)
+ for (i = 0; i < Address[0]; i++)
{
- int32 cnt;
+ int32 cnt;
+
/* Get the current count */
- get_sem_count(Address[2*i+1],&cnt);
-
- TRACEDBGP("--semctl setall %d",semun.array[i]);
+ get_sem_count(Address[2 * i + 1], &cnt);
+
+ TRACEDBGP("--semctl setall %d", semun.array[i]);
/* Compute and set the new count (relative to the old one) */
- cnt-=semun.array[i];
- TRACEDBGPP("--semctl acquire id : %d cnt : %d",Address[2*i+1],cnt);
+ cnt -= semun.array[i];
+ TRACEDBGPP("--semctl acquire id : %d cnt : %d", Address[2 * i + 1], cnt);
if (cnt > 0)
- while(acquire_sem_etc(Address[2*i+1],cnt,0,0)==B_INTERRUPTED);
+ while (acquire_sem_etc(Address[2 * i + 1], cnt, 0, 0) == B_INTERRUPTED);
if (cnt < 0)
- release_sem_etc(Address[2*i+1],-cnt,0);
+ release_sem_etc(Address[2 * i + 1], -cnt, 0);
}
return 1;
}
-
+
/* Fix the count of one semaphore to semun.val */
- if (flag==SETVAL)
+ if (flag == SETVAL)
{
- int32 cnt;
- TRACEDBGP("--semctl setval %d",semun.val);
+ int32 cnt;
+
+ TRACEDBGP("--semctl setval %d", semun.val);
/* Get the current count */
- get_sem_count(Address[2*semNum+1],&cnt);
+ get_sem_count(Address[2 * semNum + 1], &cnt);
/* Compute and set the new count (relative to the old one) */
- cnt-=semun.val;
- TRACEDBGPP("--semctl acquire id : %d cnt : %d",Address[2*semNum+1],cnt);
+ cnt -= semun.val;
+ TRACEDBGPP("--semctl acquire id : %d cnt : %d", Address[2 * semNum + 1], cnt);
if (cnt > 0)
- while(acquire_sem_etc(Address[2*semNum+1],cnt,0,0)==B_INTERRUPTED);
+ while (acquire_sem_etc(Address[2 * semNum + 1], cnt, 0, 0) == B_INTERRUPTED);
if (cnt < 0)
- release_sem_etc(Address[2*semNum+1],-cnt,0);
+ release_sem_etc(Address[2 * semNum + 1], -cnt, 0);
return 1;
}
-
+
/* Get the last pid which accesed the sem */
- if (flag==GETPID)
+ if (flag == GETPID)
{
TRACEDBG("->semctl getpid");
- return Address[2*semNum+2];
+ return Address[2 * semNum + 2];
}
-
+
/* Delete the pool */
- if (flag==IPC_RMID)
+ if (flag == IPC_RMID)
{
- long i;
+ long i;
thread_info ti;
+
TRACEDBG("->semctl rmid");
- get_thread_info(find_thread(NULL),&ti);
-
+ get_thread_info(find_thread(NULL), &ti);
+
/* Loop over all semaphore to delete them */
- TRACEDBGP("->semctl nmbre %d",Address[0]);
- for (i=0;i<Address[0];i++)
+ TRACEDBGP("->semctl nmbre %d", Address[0]);
+ for (i = 0; i < Address[0]; i++)
{
- /* Make sure to have ownership of the semaphore (if created by another team) */
- TRACEDBGP("->semctl id %d",Address[2*i+1]);
- set_sem_owner(Address[2*i+1],ti.team);
-
+
+ /*
+ * Make sure to have ownership of the semaphore (if created by
+ * another team)
+ */
+ TRACEDBGP("->semctl id %d", Address[2 * i + 1]);
+ set_sem_owner(Address[2 * i + 1], ti.team);
+
/* Delete the semaphore */
- delete_sem(Address[2*i+1]);
+ delete_sem(Address[2 * i + 1]);
- /* Reset to an invalid semId (in case other process try to get the infos from a cloned area */
- Address[2*i+1]=0;
+ /*
+ * Reset to an invalid semId (in case other process try to get
+ * the infos from a cloned area
+ */
+ Address[2 * i + 1] = 0;
}
-
+
/* Set the semaphore count to 0 */
- Address[0]=0;
-
- /* Delete the area (it might be cloned by other process. Let them live with it,
- in all cases semIds are 0 so if another process try to use it, it will fail */
+ Address[0] = 0;
+
+ /*
+ * Delete the area (it might be cloned by other process. Let them
+ * live with it, in all cases semIds are 0 so if another process
+ * try to use it, it will fail
+ */
delete_area(semId);
return 1;
}
-
+
/* Get the current semaphore count */
- if (flag==GETNCNT)
+ if (flag == GETNCNT)
{
/* TO BE IMPLEMENTED */
TRACEDBG("--semctl getncnt");
- elog(ERROR,"beos : semctl error : GETNCNT not implemented");
+ elog(ERROR, "beos : semctl error : GETNCNT not implemented");
return 0;
}
-
+
/* Get the current semaphore count of the first semaphore in the pool */
- if (flag==GETVAL)
+ if (flag == GETVAL)
{
- int32 cnt;
+ int32 cnt;
+
TRACEDBG("--semctl getval");
- get_sem_count(Address[2*semNum+1],&cnt);
- TRACEDBGP("--semctl val %d",cnt);
+ get_sem_count(Address[2 * semNum + 1], &cnt);
+ TRACEDBGP("--semctl val %d", cnt);
return cnt;
}
- elog(ERROR,"beos : semctl error : unknown flag");
+ elog(ERROR, "beos : semctl error : unknown flag");
TRACEDBG("<-semctl unknown flag");
return 0;
}
/* Find a pool id based on IPC key */
-int semget(int semKey, int semNum, int flags)
+int
+semget(int semKey, int semNum, int flags)
{
- char Nom[50];
- area_id parea;
- void* Address;
+ char Nom[50];
+ area_id parea;
+ void *Address;
- TRACEDBGPP("->semget key : %d num : %d",semKey,semNum);
+ TRACEDBGPP("->semget key : %d num : %d", semKey, semNum);
/* Name of the area to find */
- sprintf(Nom,"SYSV_IPC_SEM : %d",semKey);
+ sprintf(Nom, "SYSV_IPC_SEM : %d", semKey);
/* find area */
- parea=find_area(Nom);
+ parea = find_area(Nom);
/* Test of area existance */
- if (parea!=B_NAME_NOT_FOUND)
+ if (parea != B_NAME_NOT_FOUND)
{
/* Area exist and creation is requested, error */
- if ((flags&IPC_CREAT)&&(flags&IPC_EXCL))
+ if ((flags & IPC_CREAT) && (flags & IPC_EXCL))
{
- errno=EEXIST;
+ errno = EEXIST;
return -1;
}
-
+
/* Get an area clone (in case it's not in our address space) */
- /* TODO : a check of address space might be done to avoid duplicate areas in the same address space*/
- parea=clone_area(Nom,&Address,B_ANY_ADDRESS,B_READ_AREA | B_WRITE_AREA,parea);
+
+ /*
+ * TODO : a check of address space might be done to avoid
+ * duplicate areas in the same address space
+ */
+ parea = clone_area(Nom, &Address, B_ANY_ADDRESS, B_READ_AREA | B_WRITE_AREA, parea);
return parea;
}
else
{
/* Area does not exist, but creation is requested, so create it */
- if (flags&IPC_CREAT)
+ if (flags & IPC_CREAT)
{
- int32* Address;
- void* Ad;
- long i;
+ int32 *Address;
+ void *Ad;
+ long i;
- /* Limit to 250 (8 byte per sem : 4 for the semid and 4 for the last pid
- which acceced the semaphore in a pool */
- if (semNum>250)
+ /*
+ * Limit to 250 (8 byte per sem : 4 for the semid and 4 for
+ * the last pid which acceced the semaphore in a pool
+ */
+ if (semNum > 250)
{
- errno=ENOSPC;
+ errno = ENOSPC;
return -1;
}
/* Create the shared memory area which will hold the pool */
- parea=create_area(Nom,&Ad,B_ANY_ADDRESS,4096,B_NO_LOCK,B_READ_AREA | B_WRITE_AREA);
- if ((parea==B_BAD_VALUE)|| (parea==B_NO_MEMORY)||(parea==B_ERROR))
+ parea = create_area(Nom, &Ad, B_ANY_ADDRESS, 4096, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
+ if ((parea == B_BAD_VALUE) || (parea == B_NO_MEMORY) || (parea == B_ERROR))
{
- errno=ENOMEM;
+ errno = ENOMEM;
return -1;
}
-
+
/* fill up informations (sem number and sem ids) */
- Address=(int32*)Ad;
- Address[0]=semNum;
- for (i=0;i<Address[0];i++)
+ Address = (int32 *) Ad;
+ Address[0] = semNum;
+ for (i = 0; i < Address[0]; i++)
{
/* Create the semaphores */
- Address[2*i+1]=create_sem(0,Nom);
-
- if ((Address[2*i+1]==B_BAD_VALUE)|| (Address[2*i+1]==B_NO_MEMORY)||(Address[2*i+1]==B_NO_MORE_SEMS))
+ Address[2 * i + 1] = create_sem(0, Nom);
+
+ if ((Address[2 * i + 1] == B_BAD_VALUE) || (Address[2 * i + 1] == B_NO_MEMORY) || (Address[2 * i + 1] == B_NO_MORE_SEMS))
{
- errno=ENOMEM;
+ errno = ENOMEM;
return -1;
}
}
@@ -236,59 +259,58 @@ int semget(int semKey, int semNum, int flags)
else
{
/* Area does not exist and no creation is requested */
- errno=ENOENT;
+ errno = ENOENT;
return -1;
}
}
}
/* Acquire or release in the semaphore pool */
-int semop(int semId, struct sembuf *sops, int nsops)
+int
+semop(int semId, struct sembuf * sops, int nsops)
{
- int32* Address; /*Pool address*/
- area_info info;
- long i;
- long ret;
+ int32 *Address; /* Pool address */
+ area_info info;
+ long i;
+ long ret;
/* Get the pool address (semId IS an area id) */
- get_area_info(semId,&info);
- Address=(int32*)info.address;
-
+ get_area_info(semId, &info);
+ Address = (int32 *) info.address;
+
/* Check the validity of semId (it should be an area id) */
- if ((semId==B_BAD_VALUE)||(semId==B_NO_MEMORY)||(semId==B_ERROR))
+ if ((semId == B_BAD_VALUE) || (semId == B_NO_MEMORY) || (semId == B_ERROR))
{
- errno=EINVAL;
+ errno = EINVAL;
return -1;
}
/* Perform acquire or release */
- for(i=0;i<nsops;i++)
+ for (i = 0; i < nsops; i++)
{
/* remember the PID */
- Address[2*(sops[i].sem_num)+2]=getpid();
-
+ Address[2 * (sops[i].sem_num) + 2] = getpid();
+
/* For each sem in the pool, check the operation to perform */
if (sops[i].sem_op < 0)
{
- /* Try acuiring the semaphore till we are not inteerupted by a signal */
- if (sops[i].sem_flg==IPC_NOWAIT)
+
+ /*
+ * Try acuiring the semaphore till we are not inteerupted by a
+ * signal
+ */
+ if (sops[i].sem_flg == IPC_NOWAIT)
{
/* Try to lock ... */
- while ((ret=acquire_sem_etc(Address[2*(sops[i].sem_num)+1],-sops[i].sem_op,B_RELATIVE_TIMEOUT,0))==B_INTERRUPTED);
- if (ret!=B_OK)
- {
+ while ((ret = acquire_sem_etc(Address[2 * (sops[i].sem_num) + 1], -sops[i].sem_op, B_RELATIVE_TIMEOUT, 0)) == B_INTERRUPTED);
+ if (ret != B_OK)
return EWOULDBLOCK;
- }
}
else
- {
- while (acquire_sem_etc(Address[2*(sops[i].sem_num)+1],-sops[i].sem_op,0,0)==B_INTERRUPTED);
- }
+ while (acquire_sem_etc(Address[2 * (sops[i].sem_num) + 1], -sops[i].sem_op, 0, 0) == B_INTERRUPTED);
}
if (sops[i].sem_op > 0)
- {
- release_sem_etc(Address[2*(sops[i].sem_num)+1],sops[i].sem_op,0);
- }
+ release_sem_etc(Address[2 * (sops[i].sem_num) + 1], sops[i].sem_op, 0);
}
return 0;
diff --git a/src/backend/port/beos/shm.c b/src/backend/port/beos/shm.c
index b30fc2b6b37..56e44d38535 100644
--- a/src/backend/port/beos/shm.c
+++ b/src/backend/port/beos/shm.c
@@ -4,7 +4,7 @@
* BeOS System V Shared Memory Emulation
*
* Copyright (c) 1999-2001, Cyril VELTER
- *
+ *
*-------------------------------------------------------------------------
*/
@@ -18,48 +18,61 @@ areas in copy on write mode */
/* Detach from a shared mem area based on its address */
-int shmdt(char* shmaddr)
+int
+shmdt(char *shmaddr)
{
/* Find area id for this address */
- area_id s;
- s=area_for(shmaddr);
+ area_id s;
+
+ s = area_for(shmaddr);
/* Delete area */
return delete_area(s);
}
/* Attach to an existing area */
-int* shmat(int memId,int m1,int m2)
+int *
+shmat(int memId, int m1, int m2)
{
/* Get our team id */
thread_info thinfo;
- team_info teinfo;
- area_info ainfo;
-
- get_thread_info(find_thread(NULL),&thinfo);
- get_team_info(thinfo.team,&teinfo);
-
+ team_info teinfo;
+ area_info ainfo;
+
+ get_thread_info(find_thread(NULL), &thinfo);
+ get_team_info(thinfo.team, &teinfo);
+
/* Get area teamid */
- if (get_area_info(memId,&ainfo)!=B_OK)
- printf("AREA %d Invalide\n",memId);
-
- if (ainfo.team==teinfo.team)
+ if (get_area_info(memId, &ainfo) != B_OK)
+ printf("AREA %d Invalide\n", memId);
+
+ if (ainfo.team == teinfo.team)
{
- /* the area is already in our address space, just return the address */
- return (int*)ainfo.address;
- }
+
+ /*
+ * the area is already in our address space, just return the
+ * address
+ */
+ return (int *) ainfo.address;
+ }
else
{
- /* the area is not in our address space, clone it before and return the address */
- area_id narea;
- narea = clone_area(ainfo.name,&(ainfo.address),B_CLONE_ADDRESS,B_READ_AREA | B_WRITE_AREA,memId);
- get_area_info(narea,&ainfo);
- return (int*)ainfo.address;
+
+ /*
+ * the area is not in our address space, clone it before and
+ * return the address
+ */
+ area_id narea;
+
+ narea = clone_area(ainfo.name, &(ainfo.address), B_CLONE_ADDRESS, B_READ_AREA | B_WRITE_AREA, memId);
+ get_area_info(narea, &ainfo);
+ return (int *) ainfo.address;
}
}
/* Control a shared mem area */
-int shmctl(int shmid, int flag, struct shmid_ds* dummy)
+int
+shmctl(int shmid, int flag, struct shmid_ds * dummy)
{
if (flag == IPC_RMID)
{
@@ -71,26 +84,26 @@ int shmctl(int shmid, int flag, struct shmid_ds* dummy)
{
/* Find any SYSV area with the shmid in its name */
- area_info inf;
- team_info infteam;
- int32 cookteam=0;
- char name[50];
- sprintf(name,"SYSV_IPC %d",shmid);
-
- dummy->shm_nattch=0;
-
+ area_info inf;
+ team_info infteam;
+ int32 cookteam = 0;
+ char name[50];
+
+ sprintf(name, "SYSV_IPC %d", shmid);
+
+ dummy->shm_nattch = 0;
+
while (get_next_team_info(&cookteam, &infteam) == B_OK)
- {
- int32 cook=0;
+ {
+ int32 cook = 0;
+
while (get_next_area_info(infteam.team, &cook, &inf) == B_OK)
{
- if (strcmp(name,inf.name) == 0)
- {
- dummy->shm_nattch++;
- }
+ if (strcmp(name, inf.name) == 0)
+ dummy->shm_nattch++;
}
}
-
+
errno = 0;
return 0;
}
@@ -99,31 +112,30 @@ int shmctl(int shmid, int flag, struct shmid_ds* dummy)
}
/* Get an area based on the IPC key */
-int shmget(int memKey,int size,int flag)
+int
+shmget(int memKey, int size, int flag)
{
- char nom[50];
- void* Address;
- area_id parea;
+ char nom[50];
+ void *Address;
+ area_id parea;
/* Area name */
- sprintf(nom,"SYSV_IPC_SHM : %d",memKey);
+ sprintf(nom, "SYSV_IPC_SHM : %d", memKey);
/* Find area */
- parea=find_area(nom);
-
+ parea = find_area(nom);
+
/* area exist, just return its id */
- if (parea!=B_NAME_NOT_FOUND)
- {
+ if (parea != B_NAME_NOT_FOUND)
return parea;
- }
/* area does not exist and no creation is requested : error */
- if (flag==0)
- {
+ if (flag == 0)
return -1;
- }
-
- /* area does not exist and its creation is requested, create it (be sure to have a 4ko multiple size */
- return create_area(nom,&Address,B_ANY_ADDRESS,((size/4096)+1)*4096,B_NO_LOCK,B_READ_AREA | B_WRITE_AREA);
-}
+ /*
+ * area does not exist and its creation is requested, create it (be
+ * sure to have a 4ko multiple size
+ */
+ return create_area(nom, &Address, B_ANY_ADDRESS, ((size / 4096) + 1) * 4096, B_NO_LOCK, B_READ_AREA | B_WRITE_AREA);
+}
diff --git a/src/backend/port/beos/support.c b/src/backend/port/beos/support.c
index 5dfe9e31976..bc5264e5490 100644
--- a/src/backend/port/beos/support.c
+++ b/src/backend/port/beos/support.c
@@ -4,19 +4,19 @@
* BeOS Support functions
*
* Copyright (c) 1999-2001, Cyril VELTER
- *
+ *
*-------------------------------------------------------------------------
*/
#include "postgres.h"
/* Support Globals */
-port_id beos_dl_port_in=0;
-port_id beos_dl_port_out=0;
-sem_id beos_shm_sem;
+port_id beos_dl_port_in = 0;
+port_id beos_dl_port_out = 0;
+sem_id beos_shm_sem;
/* Global var containing the postgres path */
-extern char pg_pathname[];
+extern char pg_pathname[];
/* Shared library loading doesn't work after fork in beos. The solution is to use an exact
@@ -27,185 +27,194 @@ postgres executable just run a loop to wait command on a port. Its only action i
the beos_dl_open will then remap the good areas in the backend address space. */
-image_id beos_dl_open(char * filename)
+image_id
+beos_dl_open(char *filename)
{
- image_id im;
+ image_id im;
- /* If a port doesn't exist, lauch support server */
- if ((beos_dl_port_in<=0)||(beos_dl_port_out<=0))
+ /* If a port doesn't exist, lauch support server */
+ if ((beos_dl_port_in <= 0) || (beos_dl_port_out <= 0))
{
/* Create communication port */
- beos_dl_port_in=create_port(50,"beos_support_in");
- beos_dl_port_out=create_port(50,"beos_support_in");
+ beos_dl_port_in = create_port(50, "beos_support_in");
+ beos_dl_port_out = create_port(50, "beos_support_in");
- if ((beos_dl_port_in<=0)||(beos_dl_port_out<=0))
+ if ((beos_dl_port_in <= 0) || (beos_dl_port_out <= 0))
{
- elog(NOTICE, "Error loading BeOS support server : can't create communication ports");
+ elog(NOTICE, "Error loading BeOS support server : can't create communication ports");
return B_ERROR;
}
else
{
- char Cmd[4000];
-
+ char Cmd[4000];
+
/* Build arg list */
- sprintf(Cmd,"%s -beossupportserver %d %d &",pg_pathname,(int)beos_dl_port_in,(int)beos_dl_port_out);
+ sprintf(Cmd, "%s -beossupportserver %d %d &", pg_pathname, (int) beos_dl_port_in, (int) beos_dl_port_out);
/* Lauch process */
system(Cmd);
}
}
-
+
/* Add-on loading */
-
+
/* Send command '1' (load) to the support server */
- write_port(beos_dl_port_in,1,filename,strlen(filename)+1);
-
+ write_port(beos_dl_port_in, 1, filename, strlen(filename) + 1);
+
/* Read Object Id */
- read_port(beos_dl_port_out,&im,NULL,0);
+ read_port(beos_dl_port_out, &im, NULL, 0);
/* Checking integrity */
- if (im<0)
- {
+ if (im < 0)
+ {
elog(NOTICE, "Can't load this add-on ");
- return B_ERROR;
+ return B_ERROR;
}
else
{
/* Map text and data segment in our address space */
- char datas[4000];
- int32 area;
- int32 resu;
- void* add;
-
+ char datas[4000];
+ int32 area;
+ int32 resu;
+ void *add;
+
/* read text segment id and address */
- read_port(beos_dl_port_out,&area,datas,4000);
- read_port(beos_dl_port_out,(void*)&add,datas,4000);
+ read_port(beos_dl_port_out, &area, datas, 4000);
+ read_port(beos_dl_port_out, (void *) &add, datas, 4000);
/* map text segment in our address space */
- resu=clone_area(datas,&add,B_EXACT_ADDRESS,B_READ_AREA|B_WRITE_AREA,area);
- if (resu<0)
+ resu = clone_area(datas, &add, B_EXACT_ADDRESS, B_READ_AREA | B_WRITE_AREA, area);
+ if (resu < 0)
{
/* If we can't map, we are in reload case */
/* delete the mapping */
- resu=delete_area(area_for(add));
+ resu = delete_area(area_for(add));
/* Remap */
- resu=clone_area(datas,&add,B_EXACT_ADDRESS,B_READ_AREA|B_WRITE_AREA,area);
- if (resu<0)
- {
+ resu = clone_area(datas, &add, B_EXACT_ADDRESS, B_READ_AREA | B_WRITE_AREA, area);
+ if (resu < 0)
elog(NOTICE, "Can't load this add-on : map text error");
- }
}
-
+
/* read text segment id and address */
- read_port(beos_dl_port_out,&area,datas,4000);
- read_port(beos_dl_port_out,(void*)&add,datas,4000);
+ read_port(beos_dl_port_out, &area, datas, 4000);
+ read_port(beos_dl_port_out, (void *) &add, datas, 4000);
/* map text segment in our address space */
- resu=clone_area(datas,&add,B_EXACT_ADDRESS,B_READ_AREA|B_WRITE_AREA,area);
- if (resu<0)
+ resu = clone_area(datas, &add, B_EXACT_ADDRESS, B_READ_AREA | B_WRITE_AREA, area);
+ if (resu < 0)
{
/* If we can't map, we are in reload case */
/* delete the mapping */
- resu=delete_area(area_for(add));
+ resu = delete_area(area_for(add));
/* Remap */
- resu=clone_area(datas,&add,B_EXACT_ADDRESS,B_READ_AREA|B_WRITE_AREA,area);
- if (resu<0)
- {
+ resu = clone_area(datas, &add, B_EXACT_ADDRESS, B_READ_AREA | B_WRITE_AREA, area);
+ if (resu < 0)
elog(NOTICE, "Can't load this add-on : map data error");
- }
}
-
+
return im;
}
}
-status_t beos_dl_close(image_id im)
+status_t
+beos_dl_close(image_id im)
{
/* unload add-on */
- int32 resu;
- write_port(beos_dl_port_in,2,&im,4);
- read_port(beos_dl_port_out,&resu,NULL,0);
+ int32 resu;
+
+ write_port(beos_dl_port_in, 2, &im, 4);
+ read_port(beos_dl_port_out, &resu, NULL, 0);
return resu;
}
/* Main support server loop */
-void beos_startup(int argc,char** argv)
+void
+beos_startup(int argc, char **argv)
{
if (strlen(argv[0]) >= 10 && !strcmp(argv[0] + strlen(argv[0]) - 10, "postmaster"))
{
- /* We are in the postmaster, create the protection semaphore for shared mem remapping */
- beos_shm_sem=create_sem(1,"beos_shm_sem");
+
+ /*
+ * We are in the postmaster, create the protection semaphore for
+ * shared mem remapping
+ */
+ beos_shm_sem = create_sem(1, "beos_shm_sem");
}
if (argc > 1 && strcmp(argv[1], "-beossupportserver") == 0)
{
/* We are in the support server, run it ... */
- port_id port_in;
- port_id port_out;
-
+ port_id port_in;
+ port_id port_out;
+
/* Get back port ids from arglist */
- sscanf(argv[2],"%d",(int*)(&port_in));
- sscanf(argv[3],"%d",(int*)(&port_out));
-
+ sscanf(argv[2], "%d", (int *) (&port_in));
+ sscanf(argv[3], "%d", (int *) (&port_out));
+
/* Main server loop */
for (;;)
- {
- int32 opcode=0;
- char datas[4000];
-
- /* Wait for a message from the backend :
- 1 : load a shared object
- 2 : unload a shared object
- any other : exit support server */
- read_port(port_in,&opcode,datas,4000);
-
- switch(opcode)
+ {
+ int32 opcode = 0;
+ char datas[4000];
+
+ /*
+ * Wait for a message from the backend : 1 : load a shared
+ * object 2 : unload a shared object any other : exit support
+ * server
+ */
+ read_port(port_in, &opcode, datas, 4000);
+
+ switch (opcode)
{
- image_id addon;
- image_info info_im;
- area_info info_ar;
-
- /* Load Add-On */
- case 1 :
-
+ image_id addon;
+ image_info info_im;
+ area_info info_ar;
+
+ /* Load Add-On */
+ case 1:
+
/* Load shared object */
- addon=load_add_on(datas);
-
+ addon = load_add_on(datas);
+
/* send back the shared object Id */
- write_port(port_out,addon,NULL,0);
-
+ write_port(port_out, addon, NULL, 0);
+
/* Get Shared Object infos */
- get_image_info(addon,&info_im);
-
+ get_image_info(addon, &info_im);
+
/* get text segment info */
- get_area_info(area_for(info_im.text),&info_ar);
+ get_area_info(area_for(info_im.text), &info_ar);
/* Send back area_id of text segment */
- write_port(port_out,info_ar.area,info_ar.name,strlen(info_ar.name)+1);
+ write_port(port_out, info_ar.area, info_ar.name, strlen(info_ar.name) + 1);
/* Send back real address of text segment */
- write_port(port_out,(int)info_ar.address,info_ar.name,strlen(info_ar.name)+1);
-
-
+ write_port(port_out, (int) info_ar.address, info_ar.name, strlen(info_ar.name) + 1);
+
+
/* get data segment info */
- get_area_info(area_for(info_im.data),&info_ar);
+ get_area_info(area_for(info_im.data), &info_ar);
/* Send back area_id of data segment */
- write_port(port_out,info_ar.area,info_ar.name,strlen(info_ar.name)+1);
+ write_port(port_out, info_ar.area, info_ar.name, strlen(info_ar.name) + 1);
/* Send back real address of data segment */
- write_port(port_out,(int)info_ar.address,info_ar.name,strlen(info_ar.name)+1);
- break;
- /* UnLoad Add-On */
- case 2 :
- /* Unload shared object and send back the result of the operation */
- write_port(port_out,unload_add_on(*((int*)(datas))),NULL,0);
- break;
- /* Cleanup and exit */
+ write_port(port_out, (int) info_ar.address, info_ar.name, strlen(info_ar.name) + 1);
+ break;
+ /* UnLoad Add-On */
+ case 2:
+
+ /*
+ * Unload shared object and send back the result of
+ * the operation
+ */
+ write_port(port_out, unload_add_on(*((int *) (datas))), NULL, 0);
+ break;
+ /* Cleanup and exit */
default:
/* Free system resources */
delete_port(port_in);
delete_port(port_out);
/* Exit */
exit(0);
- break;
+ break;
}
}
/* Never be there */
@@ -215,76 +224,80 @@ void beos_startup(int argc,char** argv)
-/* The behavior of fork is borken on beos regarding shared memory. In fact
+/* The behavior of fork is borken on beos regarding shared memory. In fact
all shared memory areas are clones in copy on write mode in the new process.
We need to do a remapping of these areas. Just afer the fork we performe the
following actions :
* Find all areas with a name begining by SYS_V_IPC_ in our process
- (areas created by the SYSV IPC emulation functions). The name is
- followed by the IPC KEY in decimal format
-
+ (areas created by the SYSV IPC emulation functions). The name is
+ followed by the IPC KEY in decimal format
+
* For each area we do :
-
+
* 1 : Get its name
* 2 : destroy it
- * 3 : find another area with the exact same name
+ * 3 : find another area with the exact same name
* 4 : clone it in our address space with a different name
-
+
There is a race condition in 3-4 : if there two fork in a very short
time, in step 3 we might end up with two areas with the same name, and no
possibility to find the postmaster one. So the whole process is protected
by a semaphore which is acquires just before the fork and released in case
of fork failure or just after the end of the remapping.*/
-
-void beos_before_backend_startup(void)
+
+void
+beos_before_backend_startup(void)
{
- /* Just before forking, acquire the semaphore */
- if(acquire_sem(beos_shm_sem)!=B_OK)
- exit(1); /* Fatal error, exiting with error */
+ /* Just before forking, acquire the semaphore */
+ if (acquire_sem(beos_shm_sem) != B_OK)
+ exit(1); /* Fatal error, exiting with error */
}
-void beos_backend_startup_failed(void)
+void
+beos_backend_startup_failed(void)
{
/* The foek failed, just release the semaphore */
release_sem(beos_shm_sem);
}
-void beos_backend_startup(void)
+void
+beos_backend_startup(void)
{
- char nom[50];
- char nvnom[50];
- area_info inf;
- int32 cook=0;
+ char nom[50];
+ char nvnom[50];
+ area_info inf;
+ int32 cook = 0;
/* Perform the remapping process */
/* Loop in all our team areas */
while (get_next_area_info(0, &cook, &inf) == B_OK)
{
- strcpy(nom,inf.name);
- strcpy(nvnom,inf.name);
- nom[9]=0;
- nvnom[5]='i';
+ strcpy(nom, inf.name);
+ strcpy(nvnom, inf.name);
+ nom[9] = 0;
+ nvnom[5] = 'i';
/* Is it a SYS V area ? */
- if (!strcmp(nom,"SYSV_IPC_"))
+ if (!strcmp(nom, "SYSV_IPC_"))
{
- void* area_address;
- area_id area_postmaster;
+ void *area_address;
+ area_id area_postmaster;
+
/* Get the area address */
- area_address=inf.address;
+ area_address = inf.address;
/* Destroy the bad area */
delete_area(inf.area);
/* Find the postmaster area */
- area_postmaster=find_area(inf.name);
+ area_postmaster = find_area(inf.name);
/* Compute new area name */
- sprintf(nvnom,"SYSV_IPC %d",area_postmaster);
+ sprintf(nvnom, "SYSV_IPC %d", area_postmaster);
/* Clone it at the exact same address */
- clone_area(nvnom,&area_address,B_CLONE_ADDRESS,B_READ_AREA|B_WRITE_AREA,area_postmaster);
+ clone_area(nvnom, &area_address, B_CLONE_ADDRESS, B_READ_AREA | B_WRITE_AREA, area_postmaster);
}
- }
+ }
/* remapping done release semaphore to allow other backend to startup */
diff --git a/src/backend/port/darwin/sem.c b/src/backend/port/darwin/sem.c
index 7dc7b0c9194..6130e6d72b2 100644
--- a/src/backend/port/darwin/sem.c
+++ b/src/backend/port/darwin/sem.c
@@ -4,13 +4,13 @@
* System V Semaphore Emulation
*
* Copyright (c) 1999, repas AEG Automation GmbH
- *
- * 2000-12-1 pmb@mac.com
- * - changed from anonymous to named semaphores for darwin
- * - this required changing sem_info from containig an array of sem_t to an array of sem_t*
+ *
+ * 2000-12-1 pmb@mac.com
+ * - changed from anonymous to named semaphores for darwin
+ * - this required changing sem_info from containig an array of sem_t to an array of sem_t*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/port/darwin/Attic/sem.c,v 1.2 2001/01/17 22:11:19 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/darwin/Attic/sem.c,v 1.3 2001/03/22 03:59:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,13 +27,13 @@
#include "storage/proc.h"
#include "port/darwin/sem.h"
-#define SEMMAX IPC_NMAXSEM
+#define SEMMAX IPC_NMAXSEM
#define SETMAX ((MAXBACKENDS + SEMMAX - 1) / SEMMAX)
#define OPMAX 8
#define MODE 0700
#define SHM_INFO_NAME "SysV_Sem_Info"
-#define SEM_NAME "/pgsql-darwin"
+#define SEM_NAME "/pgsql-darwin"
struct pending_ops
{
@@ -43,12 +43,12 @@ struct pending_ops
struct sem_info
{
- sem_t* sem;
+ sem_t *sem;
struct
{
key_t key;
int nsems;
- sem_t* sem[SEMMAX];/* array of POSIX semaphores */
+ sem_t *sem[SEMMAX];/* array of POSIX semaphores */
struct sem semV[SEMMAX]; /* array of System V semaphore
* structures */
struct pending_ops pendingOps[SEMMAX]; /* array of pending
@@ -64,12 +64,12 @@ semctl(int semid, int semnum, int cmd, /* ... */ union semun arg)
{
int r = 0;
- sem_wait(SemInfo->sem);
+ sem_wait(SemInfo->sem);
if (semid < 0 || semid >= SETMAX ||
semnum < 0 || semnum >= SemInfo->set[semid].nsems)
{
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = EINVAL;
return -1;
}
@@ -109,7 +109,7 @@ semctl(int semid, int semnum, int cmd, /* ... */ union semun arg)
case IPC_RMID:
for (semnum = 0; semnum < SemInfo->set[semid].nsems; semnum++)
{
- if (sem_close(SemInfo->set[semid].sem[semnum]) == -1)
+ if (sem_close(SemInfo->set[semid].sem[semnum]) == -1)
r = -1;
}
SemInfo->set[semid].key = -1;
@@ -117,12 +117,12 @@ semctl(int semid, int semnum, int cmd, /* ... */ union semun arg)
break;
default:
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = EINVAL;
return -1;
}
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
return r;
}
@@ -134,12 +134,12 @@ semget(key_t key, int nsems, int semflg)
semid,
semnum /* , semnum1 */ ;
int exist = 0;
- char semname[64];
+ char semname[64];
if (nsems < 0 || nsems > SEMMAX)
{
#ifdef DEBUG_IPC
- fprintf(stderr, "darwin semget aborting because nsems out of range. (%d)\n", nsems);
+ fprintf(stderr, "darwin semget aborting because nsems out of range. (%d)\n", nsems);
#endif
errno = EINVAL;
return -1;
@@ -149,13 +149,13 @@ semget(key_t key, int nsems, int semflg)
if (SemInfo == (struct sem_info *) - 1)
{
#ifdef DEBUG_IPC
- fprintf(stderr, "darwin initializing shared mem for semaphore shim.\n");
+ fprintf(stderr, "darwin initializing shared mem for semaphore shim.\n");
#endif
/* test if the shared memory already exists */
fd = shm_open(SHM_INFO_NAME, O_RDWR | O_CREAT | O_EXCL, MODE);
if (fd == -1 && errno == EEXIST)
{
-/* exist = 1; */
+/* exist = 1; */
shm_unlink(SHM_INFO_NAME);
fd = shm_open(SHM_INFO_NAME, O_RDWR | O_CREAT | O_EXCL, MODE);
}
@@ -163,7 +163,7 @@ semget(key_t key, int nsems, int semflg)
return fd;
shm_unlink(SHM_INFO_NAME);
/* The size may only be set once. Ignore errors. */
- ftruncate(fd, sizeof(struct sem_info));
+ ftruncate(fd, sizeof(struct sem_info));
SemInfo = mmap(NULL, sizeof(struct sem_info),
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (SemInfo == MAP_FAILED)
@@ -171,22 +171,22 @@ semget(key_t key, int nsems, int semflg)
if (!exist)
{
/* create semaphore for locking */
- sprintf(semname, "%s-map", SEM_NAME);
+ sprintf(semname, "%s-map", SEM_NAME);
#ifdef DEBUG_IPC
- fprintf(stderr, "darwin creating sem %s to cover shared mem.\n", semname);
+ fprintf(stderr, "darwin creating sem %s to cover shared mem.\n", semname);
#endif
- SemInfo->sem = sem_open(semname, O_CREAT, semflg & 0777, 1);
- sem_unlink(semname);
- sem_wait(SemInfo->sem);
+ SemInfo->sem = sem_open(semname, O_CREAT, semflg & 0777, 1);
+ sem_unlink(semname);
+ sem_wait(SemInfo->sem);
/* initilize shared memory */
memset(SemInfo->set, 0, sizeof(SemInfo->set));
for (semid = 0; semid < SETMAX; semid++)
SemInfo->set[semid].key = -1;
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
}
}
- sem_wait(SemInfo->sem);
+ sem_wait(SemInfo->sem);
if (key != IPC_PRIVATE)
{
@@ -196,7 +196,7 @@ semget(key_t key, int nsems, int semflg)
semid++;
if (!(semflg & IPC_CREAT) && semid >= SETMAX)
{
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = ENOENT;
return -1;
}
@@ -204,7 +204,7 @@ semget(key_t key, int nsems, int semflg)
{
if (semflg & IPC_CREAT && semflg & IPC_EXCL)
{
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = EEXIST;
return -1;
}
@@ -213,14 +213,14 @@ semget(key_t key, int nsems, int semflg)
if (nsems != 0 && SemInfo->set[semid].nsems < nsems)
{
#ifdef DEBUG_IPC
-fprintf(stderr, "darwin semget failed because if (nsems != 0 && SemInfo->set[semid].nsems < nsems) %d %d\n",
- nsems, SemInfo->set[semid].nsems);
+ fprintf(stderr, "darwin semget failed because if (nsems != 0 && SemInfo->set[semid].nsems < nsems) %d %d\n",
+ nsems, SemInfo->set[semid].nsems);
#endif
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = EINVAL;
return -1;
}
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
return semid;
}
}
@@ -233,28 +233,28 @@ fprintf(stderr, "darwin semget failed because if (nsems != 0 && SemInfo->set[sem
if (semid >= SETMAX)
{
#ifdef DEBUG_IPC
- fprintf(stderr, "darwin semget failed because all keys were -1 up to SETMAX\n");
+ fprintf(stderr, "darwin semget failed because all keys were -1 up to SETMAX\n");
#endif
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = ENOSPC;
return -1;
}
for (semnum = 0; semnum < nsems; semnum++)
{
- sprintf(semname, "%s-%d-%d", SEM_NAME, semid, semnum);
+ sprintf(semname, "%s-%d-%d", SEM_NAME, semid, semnum);
#ifdef DEBUG_IPC
- fprintf(stderr, "darwin creating sem %s to cover set %d num %dm.\n", semname, semid, semnum);
+ fprintf(stderr, "darwin creating sem %s to cover set %d num %dm.\n", semname, semid, semnum);
#endif
- SemInfo->set[semid].sem[semnum] = sem_open(semname, O_CREAT, semflg & 0777, 0);
- sem_unlink(semname);
+ SemInfo->set[semid].sem[semnum] = sem_open(semname, O_CREAT, semflg & 0777, 0);
+ sem_unlink(semname);
/* Currently sem_init always returns -1.
if( sem_init( &SemInfo->set[semid].sem[semnum], 1, 0 ) == -1 ) {
for( semnum1 = 0; semnum1 < semnum; semnum1++ ) {
- sem_close( SemInfo->set[semid].sem[semnum1] );
+ sem_close( SemInfo->set[semid].sem[semnum1] );
}
- sem_post( SemInfo->sem );
+ sem_post( SemInfo->sem );
return -1;
}
*/
@@ -263,7 +263,7 @@ fprintf(stderr, "darwin semget failed because if (nsems != 0 && SemInfo->set[sem
SemInfo->set[semid].key = key;
SemInfo->set[semid].nsems = nsems;
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
return semid;
}
@@ -277,11 +277,11 @@ semop(int semid, struct sembuf * sops, size_t nsops)
errno1 = 0,
op;
- sem_wait(SemInfo->sem);
+ sem_wait(SemInfo->sem);
if (semid < 0 || semid >= SETMAX)
{
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = EINVAL;
return -1;
}
@@ -289,7 +289,7 @@ semop(int semid, struct sembuf * sops, size_t nsops)
{
if ( /* sops[i].sem_num < 0 || */ sops[i].sem_num >= SemInfo->set[semid].nsems)
{
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = EFBIG;
return -1;
}
@@ -303,7 +303,7 @@ semop(int semid, struct sembuf * sops, size_t nsops)
{
if (sops[i].sem_flg & IPC_NOWAIT)
{
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = EAGAIN;
return -1;
}
@@ -311,15 +311,15 @@ semop(int semid, struct sembuf * sops, size_t nsops)
if (SemInfo->set[semid].pendingOps[sops[i].sem_num].idx >= OPMAX)
{
/* pending operations array overflow */
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = ERANGE;
return -1;
}
SemInfo->set[semid].pendingOps[sops[i].sem_num].op[SemInfo->set[semid].pendingOps[sops[i].sem_num].idx++] = sops[i].sem_op;
/* suspend */
- sem_post(SemInfo->sem); /* avoid deadlock */
- r1 = sem_wait(SemInfo->set[semid].sem[sops[i].sem_num]);
- sem_wait(SemInfo->sem);
+ sem_post(SemInfo->sem); /* avoid deadlock */
+ r1 = sem_wait(SemInfo->set[semid].sem[sops[i].sem_num]);
+ sem_wait(SemInfo->sem);
if (r1)
{
errno1 = errno;
@@ -343,7 +343,7 @@ semop(int semid, struct sembuf * sops, size_t nsops)
if (SemInfo->set[semid].pendingOps[sops[i].sem_num].op[SemInfo->set[semid].pendingOps[sops[i].sem_num].idx - 1] + op >= 0)
{
/* unsuspend processes */
- if (sem_post(SemInfo->set[semid].sem[sops[i].sem_num]))
+ if (sem_post(SemInfo->set[semid].sem[sops[i].sem_num]))
{
errno1 = errno;
r = -1;
@@ -364,14 +364,14 @@ semop(int semid, struct sembuf * sops, size_t nsops)
/* sops[i].sem_op == 0 */
{
/* not supported */
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = ENOSYS;
return -1;
}
SemInfo->set[semid].semV[sops[i].sem_num].sempid = getpid();
}
- sem_post(SemInfo->sem);
+ sem_post(SemInfo->sem);
errno = errno1;
return r;
diff --git a/src/backend/port/dynloader/aix.c b/src/backend/port/dynloader/aix.c
index c1556bb1c20..44ae28d67de 100644
--- a/src/backend/port/dynloader/aix.c
+++ b/src/backend/port/dynloader/aix.c
@@ -609,4 +609,4 @@ findMain(void)
return ret;
}
-#endif /* HAVE_DLOPEN */
+#endif /* HAVE_DLOPEN */
diff --git a/src/backend/port/dynloader/aix.h b/src/backend/port/dynloader/aix.h
index 9d8ff105c44..f34d07b0653 100644
--- a/src/backend/port/dynloader/aix.h
+++ b/src/backend/port/dynloader/aix.h
@@ -1,5 +1,5 @@
/*
- * $Id: aix.h,v 1.4 2001/02/10 02:31:26 tgl Exp $
+ * $Id: aix.h,v 1.5 2001/03/22 03:59:42 momjian Exp $
*
* @(#)dlfcn.h 1.4 revision of 95/04/25 09:36:52
* This is an unpublished work copyright (c) 1992 HELIOS Software GmbH
@@ -13,7 +13,7 @@
#include <dlfcn.h>
-#else /* HAVE_DLOPEN */
+#else /* HAVE_DLOPEN */
#ifdef __cplusplus
extern "C"
@@ -54,13 +54,13 @@ extern "C"
#endif
-#endif /* HAVE_DLOPEN */
+#endif /* HAVE_DLOPEN */
#include "utils/dynamic_loader.h"
#define pg_dlopen(f) dlopen(f, RTLD_LAZY)
#define pg_dlsym dlsym
-#define pg_dlclose dlclose
-#define pg_dlerror dlerror
+#define pg_dlclose dlclose
+#define pg_dlerror dlerror
#endif /* PORT_PROTOS_H */
diff --git a/src/backend/port/dynloader/beos.c b/src/backend/port/dynloader/beos.c
index 80bdbca0373..dd28c5c5f81 100644
--- a/src/backend/port/dynloader/beos.c
+++ b/src/backend/port/dynloader/beos.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/port/dynloader/Attic/beos.c,v 1.6 2001/02/10 02:31:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/dynloader/Attic/beos.c,v 1.7 2001/03/22 03:59:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -18,59 +18,66 @@
#include "utils/dynamic_loader.h"
-void *
+void *
pg_dlopen(char *filename)
{
- image_id* im;
-
- /* Handle memory allocation to store the Id of the shared object*/
- im=(image_id*)(malloc(sizeof(image_id)));
-
+ image_id *im;
+
+ /* Handle memory allocation to store the Id of the shared object */
+ im = (image_id *) (malloc(sizeof(image_id)));
+
/* Add-on loading */
- *im=beos_dl_open(filename);
-
+ *im = beos_dl_open(filename);
+
return im;
}
-char *
+char *
pg_dlerror()
{
static char errmsg[] = "Load Add-On failed";
+
return errmsg;
}
-PGFunction
+PGFunction
pg_dlsym(void *handle, char *funcname)
{
- PGFunction fpt;
+ PGFunction fpt;
/* Checking that "Handle" is valid */
- if ((handle) && ((*(int*)(handle))>=0))
+ if ((handle) && ((*(int *) (handle)) >= 0))
{
/* Loading symbol */
- if(get_image_symbol(*((int*)(handle)),funcname,B_SYMBOL_TYPE_TEXT,(void**)&fpt)==B_OK);
+ if (get_image_symbol(*((int *) (handle)), funcname, B_SYMBOL_TYPE_TEXT, (void **) &fpt) == B_OK);
{
- /* Sometime the loader return B_OK for an inexistant function with an invalid address !!!
- Check that the return address is in the image range */
- image_info info;
- get_image_info(*((int*)(handle)),&info);
- if ((fpt<info.text) || (fpt>=(info.text+info.text_size))) return NULL;
+
+ /*
+ * Sometime the loader return B_OK for an inexistant function
+ * with an invalid address !!! Check that the return address
+ * is in the image range
+ */
+ image_info info;
+
+ get_image_info(*((int *) (handle)), &info);
+ if ((fpt < info.text) ||(fpt >= (info.text +info.text_size)))
+ return NULL;
return fpt;
}
- elog(NOTICE, "loading symbol '%s' failed ",funcname);
+ elog(NOTICE, "loading symbol '%s' failed ", funcname);
}
elog(NOTICE, "add-on not loaded correctly");
return NULL;
}
-void
+void
pg_dlclose(void *handle)
{
/* Checking that "Handle" is valid */
- if ((handle) && ((*(int*)(handle))>=0))
+ if ((handle) && ((*(int *) (handle)) >= 0))
{
- if (beos_dl_close(*(image_id*)handle)!=B_OK)
+ if (beos_dl_close(*(image_id *) handle) != B_OK)
elog(NOTICE, "error while unloading add-on");
free(handle);
}
diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c
index 6c149b9f55e..9f95d4011f9 100644
--- a/src/backend/port/dynloader/darwin.c
+++ b/src/backend/port/dynloader/darwin.c
@@ -1,16 +1,17 @@
/*
* These routines were taken from the Apache source, but were made
- * available with a PostgreSQL-compatible license. Kudos Wilfredo
+ * available with a PostgreSQL-compatible license. Kudos Wilfredo
* S�nchez <wsanchez@apple.com>.
*
- * $Header: /cvsroot/pgsql/src/backend/port/dynloader/darwin.c,v 1.4 2000/12/11 00:49:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/dynloader/darwin.c,v 1.5 2001/03/22 03:59:42 momjian Exp $
*/
#include "postgres.h"
#include <mach-o/dyld.h>
#include "dynloader.h"
-void *pg_dlopen(char *filename)
+void *
+pg_dlopen(char *filename)
{
NSObjectFileImage image;
@@ -20,16 +21,18 @@ void *pg_dlopen(char *filename)
return NSLinkModule(image, filename, TRUE);
}
-void pg_dlclose(void *handle)
+void
+pg_dlclose(void *handle)
{
- NSUnLinkModule(handle,FALSE);
+ NSUnLinkModule(handle, FALSE);
return;
}
-PGFunction pg_dlsym(void *handle, char *funcname)
+PGFunction
+pg_dlsym(void *handle, char *funcname)
{
- NSSymbol symbol;
- char *symname = (char*)malloc(strlen(funcname)+2);
+ NSSymbol symbol;
+ char *symname = (char *) malloc(strlen(funcname) + 2);
sprintf(symname, "_%s", funcname);
if (NSIsSymbolNameDefined(symname))
@@ -41,11 +44,12 @@ PGFunction pg_dlsym(void *handle, char *funcname)
else
{
free(symname);
- return (PGFunction)NULL;
+ return (PGFunction) NULL;
}
}
-char *pg_dlerror(void)
+char *
+pg_dlerror(void)
{
return "no error message available";
}
diff --git a/src/backend/port/dynloader/darwin.h b/src/backend/port/dynloader/darwin.h
index 0e73eb2be67..da56c7690e0 100644
--- a/src/backend/port/dynloader/darwin.h
+++ b/src/backend/port/dynloader/darwin.h
@@ -1,8 +1,8 @@
-/* $Header: /cvsroot/pgsql/src/backend/port/dynloader/darwin.h,v 1.3 2000/12/11 00:49:54 tgl Exp $ */
+/* $Header: /cvsroot/pgsql/src/backend/port/dynloader/darwin.h,v 1.4 2001/03/22 03:59:42 momjian Exp $ */
#include "fmgr.h"
-void* pg_dlopen(char *filename);
+void *pg_dlopen(char *filename);
PGFunction pg_dlsym(void *handle, char *funcname);
void pg_dlclose(void *handle);
-char* pg_dlerror(void);
+char *pg_dlerror(void);
diff --git a/src/backend/port/dynloader/hpux.c b/src/backend/port/dynloader/hpux.c
index 73e50b71806..967967e0269 100644
--- a/src/backend/port/dynloader/hpux.c
+++ b/src/backend/port/dynloader/hpux.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/port/dynloader/hpux.c,v 1.17 2001/02/10 02:31:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/dynloader/hpux.c,v 1.18 2001/03/22 03:59:43 momjian Exp $
*
* NOTES
* all functions are defined here -- it's impossible to trace the
@@ -28,13 +28,14 @@
void *
pg_dlopen(char *filename)
{
+
/*
* Use BIND_IMMEDIATE so that undefined symbols cause a failure return
* from shl_load(), rather than an abort() later on when we attempt to
* call the library!
*/
shl_t handle = shl_load(filename,
- BIND_IMMEDIATE | BIND_VERBOSE | DYNAMIC_PATH,
+ BIND_IMMEDIATE | BIND_VERBOSE | DYNAMIC_PATH,
0L);
return (void *) handle;
diff --git a/src/backend/port/dynloader/solaris.h b/src/backend/port/dynloader/solaris.h
index 3c70b909524..c5f6ec18741 100644
--- a/src/backend/port/dynloader/solaris.h
+++ b/src/backend/port/dynloader/solaris.h
@@ -1,4 +1,4 @@
-/* $Header: /cvsroot/pgsql/src/backend/port/dynloader/solaris.h,v 1.2 2001/02/10 02:31:26 tgl Exp $ */
+/* $Header: /cvsroot/pgsql/src/backend/port/dynloader/solaris.h,v 1.3 2001/03/22 03:59:43 momjian Exp $ */
#ifndef DYNLOADER_SOLARIS_H
#define DYNLOADER_SOLARIS_H
@@ -11,4 +11,4 @@
#define pg_dlclose dlclose
#define pg_dlerror dlerror
-#endif /* DYNLOADER_SOLARIS_H */
+#endif /* DYNLOADER_SOLARIS_H */
diff --git a/src/backend/port/qnx4/shm.c b/src/backend/port/qnx4/shm.c
index 9958f799384..14c77f4fc9c 100644
--- a/src/backend/port/qnx4/shm.c
+++ b/src/backend/port/qnx4/shm.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/port/qnx4/Attic/shm.c,v 1.3 2001/03/13 01:17:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/qnx4/Attic/shm.c,v 1.4 2001/03/22 03:59:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -184,8 +184,10 @@ shmctl(int shmid, int cmd, struct shmid_ds * buf)
}
if (cmd == IPC_STAT)
{
- /* Can we support IPC_STAT? We only need shm_nattch ...
- * For now, punt and assume the shm seg does not exist.
+
+ /*
+ * Can we support IPC_STAT? We only need shm_nattch ... For now,
+ * punt and assume the shm seg does not exist.
*/
errno = EINVAL;
return -1;
diff --git a/src/backend/port/strtol.c b/src/backend/port/strtol.c
index d70f9590102..4e722755297 100644
--- a/src/backend/port/strtol.c
+++ b/src/backend/port/strtol.c
@@ -109,7 +109,7 @@ int base;
cutoff = neg ? -(unsigned long) LONG_MIN : LONG_MAX;
cutlim = cutoff % (unsigned long) base;
cutoff /= (unsigned long) base;
- for (acc = 0, any = 0; ; c = *s++)
+ for (acc = 0, any = 0;; c = *s++)
{
if (isdigit(c))
c -= '0';
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 044bc207f92..5ee2257e445 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -7,7 +7,7 @@
* message to setup a backend process.
*
* The postmaster also manages system-wide operations such as
- * startup, shutdown, and periodic checkpoints. The postmaster
+ * startup, shutdown, and periodic checkpoints. The postmaster
* itself doesn't do those operations, mind you --- it just forks
* off a subprocess to do them at the right times. It also takes
* care of resetting the system if a backend crashes.
@@ -15,7 +15,7 @@
* The postmaster process creates the shared memory and semaphore
* pools during startup, but as a rule does not touch them itself.
* In particular, it is not a member of the PROC array of backends
- * and so it cannot participate in lock-manager operations. Keeping
+ * and so it cannot participate in lock-manager operations. Keeping
* the postmaster away from shared memory operations makes it simpler
* and more reliable. The postmaster is almost always able to recover
* from crashes of individual backends by resetting shared memory;
@@ -28,7 +28,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/postmaster/postmaster.c,v 1.210 2001/03/14 17:58:46 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/postmaster/postmaster.c,v 1.211 2001/03/22 03:59:43 momjian Exp $
*
* NOTES
*
@@ -127,9 +127,9 @@ static Dllist *BackendList;
static Dllist *PortList;
/* The socket number we are listening for connections on */
-int PostPortNumber;
-char *UnixSocketDir;
-char *VirtualHost;
+int PostPortNumber;
+char *UnixSocketDir;
+char *VirtualHost;
/*
* MaxBackends is the actual limit on the number of backends we will
@@ -139,7 +139,7 @@ char *VirtualHost;
* memory area as well as cause the postmaster to grab more kernel
* semaphores, even if you never actually use that many backends.
*/
-int MaxBackends = DEF_MAXBACKENDS;
+int MaxBackends = DEF_MAXBACKENDS;
static char *progname = (char *) NULL;
@@ -158,10 +158,12 @@ static int ServerSock_INET = INVALID_SOCK; /* stream socket server */
#ifdef HAVE_UNIX_SOCKETS
static int ServerSock_UNIX = INVALID_SOCK; /* stream socket server */
+
#endif
#ifdef USE_SSL
static SSL_CTX *SSL_context = NULL; /* Global SSL context */
+
#endif
/*
@@ -179,16 +181,16 @@ static char ExtraOptions[MAXPGPATH];
static bool Reinit = true;
static int SendStop = false;
-bool NetServer = false; /* listen on TCP/IP */
-bool EnableSSL = false;
-bool SilentMode = false; /* silent mode (-S) */
+bool NetServer = false; /* listen on TCP/IP */
+bool EnableSSL = false;
+bool SilentMode = false; /* silent mode (-S) */
-int CheckPointTimeout = 300;
+int CheckPointTimeout = 300;
-static pid_t StartupPID = 0,
- ShutdownPID = 0,
- CheckPointPID = 0;
-static time_t checkpointed = 0;
+static pid_t StartupPID = 0,
+ ShutdownPID = 0,
+ CheckPointPID = 0;
+static time_t checkpointed = 0;
#define NoShutdown 0
#define SmartShutdown 1
@@ -196,7 +198,7 @@ static time_t checkpointed = 0;
static int Shutdown = NoShutdown;
-static bool FatalError = false; /* T if recovering from backend crash */
+static bool FatalError = false; /* T if recovering from backend crash */
/*
* State for assigning random salts and cancel keys.
@@ -262,7 +264,7 @@ checkDataDir(const char *checkdir)
fprintf(stderr, "%s does not know where to find the database system "
"data. You must specify the directory that contains the "
"database system either by specifying the -D invocation "
- "option or by setting the PGDATA environment variable.\n\n",
+ "option or by setting the PGDATA environment variable.\n\n",
progname);
ExitPostmaster(2);
}
@@ -292,7 +294,7 @@ PostmasterMain(int argc, char *argv[])
int opt;
int status;
char original_extraoptions[MAXPGPATH];
- char *potential_DataDir = NULL;
+ char *potential_DataDir = NULL;
IsUnderPostmaster = true; /* so that backends know this */
@@ -303,22 +305,22 @@ PostmasterMain(int argc, char *argv[])
real_argc = argc;
/*
- * Catch standard options before doing much else. This even works
- * on systems without getopt_long.
+ * Catch standard options before doing much else. This even works on
+ * systems without getopt_long.
*/
if (argc > 1)
{
- if (strcmp(argv[1], "--help")==0 || strcmp(argv[1], "-?")==0)
+ if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
usage(progname);
ExitPostmaster(0);
}
- if (strcmp(argv[1], "--version")==0 || strcmp(argv[1], "-V")==0)
+ if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
{
puts("postmaster (PostgreSQL) " PG_VERSION);
ExitPostmaster(0);
}
- }
+ }
/*
@@ -351,27 +353,26 @@ PostmasterMain(int argc, char *argv[])
/*
* Options setup
*/
- potential_DataDir = getenv("PGDATA"); /* default value */
+ potential_DataDir = getenv("PGDATA"); /* default value */
ResetAllOptions();
/*
- * First we must scan for a -D argument to get the data dir. Then
- * read the config file. Finally, scan all the other arguments.
- * (Command line switches override config file.)
+ * First we must scan for a -D argument to get the data dir. Then read
+ * the config file. Finally, scan all the other arguments. (Command
+ * line switches override config file.)
*
- * Note: The two lists of options must be exactly the same, even
- * though perhaps the first one would only have to be "D:" with
- * opterr turned off. But some versions of getopt (notably GNU)
- * are going to arbitrarily permute some "non-options" (according
- * to the local world view) which will result in some switches
- * being associated with the wrong argument. Death and destruction
- * will occur.
+ * Note: The two lists of options must be exactly the same, even though
+ * perhaps the first one would only have to be "D:" with opterr turned
+ * off. But some versions of getopt (notably GNU) are going to
+ * arbitrarily permute some "non-options" (according to the local
+ * world view) which will result in some switches being associated
+ * with the wrong argument. Death and destruction will occur.
*/
opterr = 1;
while ((opt = getopt(argc, argv, "A:a:B:b:c:D:d:Fh:ik:lm:MN:no:p:Ss-:")) != EOF)
{
- switch(opt)
+ switch (opt)
{
case 'D':
potential_DataDir = optarg;
@@ -400,7 +401,7 @@ PostmasterMain(int argc, char *argv[])
IgnoreSystemIndexes(false);
- optind = 1; /* start over */
+ optind = 1; /* start over */
#ifdef HAVE_INT_OPTRESET
optreset = 1;
#endif
@@ -449,7 +450,7 @@ PostmasterMain(int argc, char *argv[])
break;
#ifdef USE_SSL
case 'l':
- EnableSSL = true;
+ EnableSSL = true;
break;
#endif
case 'm':
@@ -514,25 +515,26 @@ PostmasterMain(int argc, char *argv[])
break;
case 'c':
case '-':
- {
- char *name, *value;
-
- ParseLongOption(optarg, &name, &value);
- if (!value)
{
- if (opt == '-')
- elog(ERROR, "--%s requires argument", optarg);
- else
- elog(ERROR, "-c %s requires argument", optarg);
+ char *name,
+ *value;
+
+ ParseLongOption(optarg, &name, &value);
+ if (!value)
+ {
+ if (opt == '-')
+ elog(ERROR, "--%s requires argument", optarg);
+ else
+ elog(ERROR, "-c %s requires argument", optarg);
+ }
+
+ SetConfigOption(name, value, PGC_POSTMASTER);
+ free(name);
+ if (value)
+ free(value);
+ break;
}
- SetConfigOption(name, value, PGC_POSTMASTER);
- free(name);
- if (value)
- free(value);
- break;
- }
-
default:
/* shouldn't get here */
fprintf(stderr, "Try '%s --help' for more information.\n", progname);
@@ -583,11 +585,11 @@ PostmasterMain(int argc, char *argv[])
*
* We want to do this before we try to grab the input sockets, because
* the data directory interlock is more reliable than the socket-file
- * interlock (thanks to whoever decided to put socket files in /tmp :-().
- * For the same reason, it's best to grab the TCP socket before the
- * Unix socket.
+ * interlock (thanks to whoever decided to put socket files in /tmp
+ * :-(). For the same reason, it's best to grab the TCP socket before
+ * the Unix socket.
*/
- if (! CreateDataDirLockFile(DataDir, true))
+ if (!CreateDataDirLockFile(DataDir, true))
ExitPostmaster(1);
/*
@@ -601,14 +603,14 @@ PostmasterMain(int argc, char *argv[])
ExitPostmaster(1);
}
if (EnableSSL)
- InitSSL();
+ InitSSL();
#endif
if (NetServer)
{
status = StreamServerPort(AF_INET, VirtualHost,
- (unsigned short) PostPortNumber, UnixSocketDir,
- &ServerSock_INET);
+ (unsigned short) PostPortNumber, UnixSocketDir,
+ &ServerSock_INET);
if (status != STATUS_OK)
{
fprintf(stderr, "%s: cannot create INET stream port\n",
@@ -619,8 +621,8 @@ PostmasterMain(int argc, char *argv[])
#ifdef HAVE_UNIX_SOCKETS
status = StreamServerPort(AF_UNIX, VirtualHost,
- (unsigned short) PostPortNumber, UnixSocketDir,
- &ServerSock_UNIX);
+ (unsigned short) PostPortNumber, UnixSocketDir,
+ &ServerSock_UNIX);
if (status != STATUS_OK)
{
fprintf(stderr, "%s: cannot create UNIX stream port\n",
@@ -644,8 +646,9 @@ PostmasterMain(int argc, char *argv[])
PortList = DLNewList();
/*
- * Record postmaster options. We delay this till now to avoid recording
- * bogus options (eg, NBuffers too high for available memory).
+ * Record postmaster options. We delay this till now to avoid
+ * recording bogus options (eg, NBuffers too high for available
+ * memory).
*/
if (!CreateOptsFile(argc, argv))
ExitPostmaster(1);
@@ -656,13 +659,15 @@ PostmasterMain(int argc, char *argv[])
pqinitmask();
PG_SETMASK(&BlockSig);
- pqsignal(SIGHUP, SIGHUP_handler); /* reread config file and have children do same */
+ pqsignal(SIGHUP, SIGHUP_handler); /* reread config file and have
+ * children do same */
pqsignal(SIGINT, pmdie); /* send SIGTERM and ShutdownDataBase */
pqsignal(SIGQUIT, pmdie); /* send SIGQUIT and die */
pqsignal(SIGTERM, pmdie); /* wait for children and ShutdownDataBase */
pqsignal(SIGALRM, SIG_IGN); /* ignored */
pqsignal(SIGPIPE, SIG_IGN); /* ignored */
- pqsignal(SIGUSR1, schedule_checkpoint); /* start a background checkpoint */
+ pqsignal(SIGUSR1, schedule_checkpoint); /* start a background
+ * checkpoint */
pqsignal(SIGUSR2, pmdie); /* send SIGUSR2, don't die */
pqsignal(SIGCHLD, reaper); /* handle child termination */
pqsignal(SIGTTIN, SIG_IGN); /* ignored */
@@ -677,7 +682,8 @@ PostmasterMain(int argc, char *argv[])
status = ServerLoop();
/*
- * ServerLoop probably shouldn't ever return, but if it does, close down.
+ * ServerLoop probably shouldn't ever return, but if it does, close
+ * down.
*/
ExitPostmaster(status != STATUS_OK);
@@ -749,7 +755,7 @@ usage(const char *progname)
printf(" -l enable SSL connections\n");
#endif
printf(" -N MAX-CONNECT maximum number of allowed connections (1..%d, default %d)\n",
- MAXBACKENDS, DEF_MAXBACKENDS);
+ MAXBACKENDS, DEF_MAXBACKENDS);
printf(" -o OPTIONS pass 'OPTIONS' to each backend server\n");
printf(" -p PORT port number to listen on (default %d)\n", DEF_PGPORT);
printf(" -S silent mode (start in background without logging output)\n");
@@ -782,20 +788,24 @@ ServerLoop(void)
for (;;)
{
- Port *port;
- fd_set rmask,
- wmask;
+ Port *port;
+ fd_set rmask,
+ wmask;
struct timeval *timeout = NULL;
- struct timeval timeout_tv;
+ struct timeval timeout_tv;
if (CheckPointPID == 0 && checkpointed &&
Shutdown == NoShutdown && !FatalError)
{
- time_t now = time(NULL);
+ time_t now = time(NULL);
if (CheckPointTimeout + checkpointed > now)
{
- /* Not time for checkpoint yet, so set a timeout for select */
+
+ /*
+ * Not time for checkpoint yet, so set a timeout for
+ * select
+ */
timeout_tv.tv_sec = CheckPointTimeout + checkpointed - now;
timeout_tv.tv_usec = 0;
timeout = &timeout_tv;
@@ -804,15 +814,18 @@ ServerLoop(void)
{
/* Time to make the checkpoint... */
CheckPointPID = CheckPointDataBase();
- /* if fork failed, schedule another try at 0.1 normal delay */
+
+ /*
+ * if fork failed, schedule another try at 0.1 normal
+ * delay
+ */
if (CheckPointPID == 0)
- {
checkpointed = now - (9 * CheckPointTimeout) / 10;
- }
}
}
#ifdef USE_SSL
+
/*
* If we are using SSL, there may be input data already read and
* pending in SSL's input buffers. If so, check for additional
@@ -955,19 +968,18 @@ ServerLoop(void)
if (status == STATUS_OK && port->pktInfo.state == Idle)
{
+
/*
* Can we accept a connection now?
*
- * Even though readStartupPacket() already checked,
- * we have to check again in case conditions changed
- * while negotiating authentication.
+ * Even though readStartupPacket() already checked, we have
+ * to check again in case conditions changed while
+ * negotiating authentication.
*/
- char *rejectMsg = canAcceptConnections();
+ char *rejectMsg = canAcceptConnections();
if (rejectMsg != NULL)
- {
PacketSendError(&port->pktInfo, rejectMsg);
- }
else
{
@@ -1008,7 +1020,7 @@ ServerLoop(void)
}
curr = next;
- } /* loop over active ports */
+ } /* loop over active ports */
}
}
@@ -1087,20 +1099,20 @@ readStartupPacket(void *arg, PacketLen len, void *pkt)
if (send(port->sock, &SSLok, 1, 0) != 1)
{
perror("Failed to send SSL negotiation response");
- return STATUS_ERROR; /* Close connection */
+ return STATUS_ERROR;/* Close connection */
}
#ifdef USE_SSL
if (SSLok == 'S')
{
- if (!(port->ssl = SSL_new(SSL_context)) ||
- !SSL_set_fd(port->ssl, port->sock) ||
- SSL_accept(port->ssl) <= 0)
- {
- fprintf(stderr, "Failed to initialize SSL connection: %s, errno: %d (%s)\n",
- ERR_reason_error_string(ERR_get_error()), errno, strerror(errno));
- return STATUS_ERROR;
- }
+ if (!(port->ssl = SSL_new(SSL_context)) ||
+ !SSL_set_fd(port->ssl, port->sock) ||
+ SSL_accept(port->ssl) <= 0)
+ {
+ fprintf(stderr, "Failed to initialize SSL connection: %s, errno: %d (%s)\n",
+ ERR_reason_error_string(ERR_get_error()), errno, strerror(errno));
+ return STATUS_ERROR;
+ }
}
#endif
/* ready for the normal startup packet */
@@ -1140,13 +1152,16 @@ readStartupPacket(void *arg, PacketLen len, void *pkt)
if (port->database[0] == '\0')
StrNCpy(port->database, si->user, sizeof(port->database));
- /* Truncate given database and user names to length of a Postgres name. */
+ /*
+ * Truncate given database and user names to length of a Postgres
+ * name.
+ */
/* This avoids lookup failures when overlength names are given. */
if ((int) sizeof(port->database) >= NAMEDATALEN)
- port->database[NAMEDATALEN-1] = '\0';
+ port->database[NAMEDATALEN - 1] = '\0';
if ((int) sizeof(port->user) >= NAMEDATALEN)
- port->user[NAMEDATALEN-1] = '\0';
+ port->user[NAMEDATALEN - 1] = '\0';
/* Check a user name was given. */
@@ -1158,8 +1173,8 @@ readStartupPacket(void *arg, PacketLen len, void *pkt)
}
/*
- * If we're going to reject the connection due to database state,
- * say so now instead of wasting cycles on an authentication exchange.
+ * If we're going to reject the connection due to database state, say
+ * so now instead of wasting cycles on an authentication exchange.
* (This also allows a pg_ping utility to be written.)
*/
rejectMsg = canAcceptConnections();
@@ -1358,13 +1373,15 @@ ClosePostmasterPorts(Port *myConn)
static void
reset_shared(unsigned short port)
{
+
/*
- * Reset assignment of shared mem and semaphore IPC keys.
- * Doing this means that in normal cases we'll assign the same keys
- * on each "cycle of life", and thereby avoid leaving dead IPC objects
+ * Reset assignment of shared mem and semaphore IPC keys. Doing this
+ * means that in normal cases we'll assign the same keys on each
+ * "cycle of life", and thereby avoid leaving dead IPC objects
* floating around if the postmaster crashes and is restarted.
*/
IpcInitKeyAssignment(port);
+
/*
* Create or re-create shared memory and semaphores.
*/
@@ -1540,10 +1557,13 @@ static void
reaper(SIGNAL_ARGS)
{
int save_errno = errno;
+
#ifdef HAVE_WAITPID
int status; /* backend exit status */
+
#else
union wait status; /* backend exit status */
+
#endif
int exitstatus;
int pid; /* process id of dead backend */
@@ -1589,7 +1609,7 @@ reaper(SIGNAL_ARGS)
ExitPostmaster(1);
}
StartupPID = 0;
- FatalError = false; /* done with recovery */
+ FatalError = false; /* done with recovery */
if (Shutdown > NoShutdown)
{
if (ShutdownPID > 0)
@@ -1618,7 +1638,8 @@ reaper(SIGNAL_ARGS)
{
/*
- * Wait for all children exit, then reset shmem and StartupDataBase.
+ * Wait for all children exit, then reset shmem and
+ * StartupDataBase.
*/
if (DLGetHead(BackendList) || StartupPID > 0 || ShutdownPID > 0)
{
@@ -1735,14 +1756,15 @@ CleanupProc(int pid,
bp = (Backend *) DLE_VAL(curr);
if (bp->pid != pid)
{
+
/*
* This backend is still alive. Unless we did so already,
* tell it to commit hara-kiri.
*
* SIGQUIT is the special signal that says exit without proc_exit
- * and let the user know what's going on. But if SendStop is set
- * (-s on command line), then we send SIGSTOP instead, so that we
- * can get core dumps from all backends by hand.
+ * and let the user know what's going on. But if SendStop is
+ * set (-s on command line), then we send SIGSTOP instead, so
+ * that we can get core dumps from all backends by hand.
*/
if (!FatalError)
{
@@ -1756,12 +1778,13 @@ CleanupProc(int pid,
}
else
{
+
/*
* Found entry for freshly-dead backend, so remove it.
*
- * Don't call ProcRemove() here, since shmem may be corrupted!
- * We are going to reinitialize shmem and semaphores anyway
- * once all the children are dead, so no need for it.
+ * Don't call ProcRemove() here, since shmem may be corrupted! We
+ * are going to reinitialize shmem and semaphores anyway once
+ * all the children are dead, so no need for it.
*/
DLRemove(curr);
free(bp);
@@ -2278,7 +2301,7 @@ static pid_t
SSDataBase(int xlop)
{
pid_t pid;
- Backend *bn;
+ Backend *bn;
fflush(stdout);
fflush(stderr);
@@ -2340,13 +2363,14 @@ SSDataBase(int xlop)
#endif
fprintf(stderr, "%s Data Base: fork failed: %s\n",
- ((xlop == BS_XLOG_STARTUP) ? "Startup" :
+ ((xlop == BS_XLOG_STARTUP) ? "Startup" :
((xlop == BS_XLOG_CHECKPOINT) ? "CheckPoint" :
"Shutdown")),
strerror(errno));
+
/*
- * fork failure is fatal during startup/shutdown, but there's
- * no need to choke if a routine checkpoint fails.
+ * fork failure is fatal during startup/shutdown, but there's no
+ * need to choke if a routine checkpoint fails.
*/
if (xlop == BS_XLOG_CHECKPOINT)
return 0;
@@ -2354,9 +2378,9 @@ SSDataBase(int xlop)
}
/*
- * The startup and shutdown processes are not considered normal backends,
- * but the checkpoint process is. Checkpoint must be added to the list
- * of backends.
+ * The startup and shutdown processes are not considered normal
+ * backends, but the checkpoint process is. Checkpoint must be added
+ * to the list of backends.
*/
if (xlop == BS_XLOG_CHECKPOINT)
{
@@ -2372,9 +2396,9 @@ SSDataBase(int xlop)
DLAddHead(BackendList, DLNewElem(bn));
/*
- * Since this code is executed periodically, it's a fine
- * place to do other actions that should happen every now
- * and then on no particular schedule. Such as...
+ * Since this code is executed periodically, it's a fine place to
+ * do other actions that should happen every now and then on no
+ * particular schedule. Such as...
*/
TouchSocketLockFile();
}
@@ -2389,10 +2413,10 @@ SSDataBase(int xlop)
static bool
CreateOptsFile(int argc, char *argv[])
{
- char fullprogname[MAXPGPATH];
- char *filename;
- FILE *fp;
- unsigned i;
+ char fullprogname[MAXPGPATH];
+ char *filename;
+ FILE *fp;
+ unsigned i;
if (FindExec(fullprogname, argv[0], "postmaster") == -1)
return false;
diff --git a/src/backend/regex/engine.c b/src/backend/regex/engine.c
index 17aa3c8a490..8bf0ee68954 100644
--- a/src/backend/regex/engine.c
+++ b/src/backend/regex/engine.c
@@ -80,25 +80,25 @@ struct match
pg_wchar *endp; /* end of string -- virtual NUL here */
pg_wchar *coldp; /* can be no match starting before here */
pg_wchar **lastpos; /* [nplus+1] */
- STATEVARS;
+ STATEVARS;
states st; /* current states */
states fresh; /* states for a fresh start */
states tmp; /* temporary */
states empty; /* empty set of states */
};
-static int matcher(struct re_guts *g, pg_wchar *string, size_t nmatch,
- regmatch_t *pmatch, int eflags);
-static pg_wchar *dissect(struct match *m, pg_wchar *start, pg_wchar *stop,
- sopno startst, sopno stopst);
-static pg_wchar *backref(struct match *m, pg_wchar *start, pg_wchar *stop,
- sopno startst, sopno stopst, sopno lev);
-static pg_wchar *fast(struct match *m, pg_wchar *start, pg_wchar *stop,
- sopno startst, sopno stopst);
-static pg_wchar *slow(struct match *m, pg_wchar *start, pg_wchar *stop,
- sopno startst, sopno stopst);
-static states step(struct re_guts *g, sopno start,
- sopno stop, states bef, int ch, states aft);
+static int matcher(struct re_guts * g, pg_wchar * string, size_t nmatch,
+ regmatch_t *pmatch, int eflags);
+static pg_wchar *dissect(struct match * m, pg_wchar * start, pg_wchar * stop,
+ sopno startst, sopno stopst);
+static pg_wchar *backref(struct match * m, pg_wchar * start, pg_wchar * stop,
+ sopno startst, sopno stopst, sopno lev);
+static pg_wchar *fast(struct match * m, pg_wchar * start, pg_wchar * stop,
+ sopno startst, sopno stopst);
+static pg_wchar *slow(struct match * m, pg_wchar * start, pg_wchar * stop,
+ sopno startst, sopno stopst);
+static states step(struct re_guts * g, sopno start,
+ sopno stop, states bef, int ch, states aft);
#define BOL (OUT+1)
#define EOL (BOL+1)
@@ -117,12 +117,13 @@ static states step(struct re_guts *g, sopno start,
#endif
#ifdef REDEBUG
-static void print(struct match *m, pg_wchar *caption, states st, int ch,
- FILE *d);
-static void at(struct match *m, pg_wchar *title, pg_wchar *start,
- pg_wchar *stop, sopno startst, sopno stopst);
+static void print(struct match * m, pg_wchar * caption, states st, int ch,
+ FILE *d);
+static void at(struct match * m, pg_wchar * title, pg_wchar * start,
+ pg_wchar * stop, sopno startst, sopno stopst);
static pg_wchar *pchar(int ch);
-static int pg_isprint(int c);
+static int pg_isprint(int c);
+
#endif
#ifdef REDEBUG
@@ -139,7 +140,7 @@ static int pg_isprint(int c);
* matcher - the actual matching engine
*/
static int /* 0 success, REG_NOMATCH failure */
-matcher(struct re_guts *g, pg_wchar *string, size_t nmatch,
+matcher(struct re_guts * g, pg_wchar * string, size_t nmatch,
regmatch_t *pmatch, int eflags)
{
pg_wchar *endp;
@@ -325,7 +326,7 @@ matcher(struct re_guts *g, pg_wchar *string, size_t nmatch,
* dissect - figure out what matched what, no back references
*/
static pg_wchar * /* == stop (success) always */
-dissect(struct match *m, pg_wchar *start, pg_wchar *stop,
+dissect(struct match * m, pg_wchar * start, pg_wchar * stop,
sopno startst, sopno stopst)
{
int i;
@@ -521,7 +522,7 @@ dissect(struct match *m, pg_wchar *start, pg_wchar *stop,
* lev is PLUS nesting level
*/
static pg_wchar * /* == stop (success) or NULL (failure) */
-backref(struct match *m, pg_wchar *start, pg_wchar *stop,
+backref(struct match * m, pg_wchar * start, pg_wchar * stop,
sopno startst, sopno stopst, sopno lev)
{
int i;
@@ -728,7 +729,7 @@ backref(struct match *m, pg_wchar *start, pg_wchar *stop,
* fast - step through the string at top speed
*/
static pg_wchar * /* where tentative match ended, or NULL */
-fast(struct match *m, pg_wchar *start, pg_wchar *stop,
+fast(struct match * m, pg_wchar * start, pg_wchar * stop,
sopno startst, sopno stopst)
{
states st = m->st;
@@ -817,7 +818,7 @@ fast(struct match *m, pg_wchar *start, pg_wchar *stop,
* slow - step through the string more deliberately
*/
static pg_wchar * /* where it ended */
-slow(struct match *m, pg_wchar *start, pg_wchar *stop,
+slow(struct match * m, pg_wchar * start, pg_wchar * stop,
sopno startst, sopno stopst)
{
states st = m->st;
@@ -901,7 +902,7 @@ slow(struct match *m, pg_wchar *start, pg_wchar *stop,
* step - map set of states reachable before char to set reachable after
*/
static states
-step(struct re_guts *g,
+step(struct re_guts * g,
sopno start, /* start state within strip */
sopno stop, /* state after stop state within strip */
states bef, /* states reachable before */
@@ -1023,7 +1024,7 @@ step(struct re_guts *g,
* print - print a set of states
*/
static void
-print(struct match *m, pg_wchar *caption, states st,
+print(struct match * m, pg_wchar * caption, states st,
int ch, FILE *d)
{
struct re_guts *g = m->g;
@@ -1049,7 +1050,7 @@ print(struct match *m, pg_wchar *caption, states st,
* at - print current situation
*/
static void
-at(struct match *m, pg_wchar *title, pg_wchar *start, pg_wchar *stop,
+at(struct match * m, pg_wchar * title, pg_wchar * start, pg_wchar * stop,
sopno startst, sopno stopst)
{
if (!(m->eflags & REG_TRACE))
diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c
index b45a3c52375..837a12c27ce 100644
--- a/src/backend/regex/regcomp.c
+++ b/src/backend/regex/regcomp.c
@@ -82,10 +82,13 @@ static void p_b_cclass(struct parse * p, cset *cs);
static void p_b_eclass(struct parse * p, cset *cs);
static pg_wchar p_b_symbol(struct parse * p);
static char p_b_coll_elem(struct parse * p, int endc);
+
#ifdef MULTIBYTE
static unsigned char othercase(int ch);
+
#else
static char othercase(int ch);
+
#endif
static void bothcases(struct parse * p, int ch);
static void ordinary(struct parse * p, int ch);
@@ -295,8 +298,8 @@ pg95_regcomp(regex_t *preg, const char *pattern, int cflags)
* p_ere - ERE parser top level, concatenation and alternation
*/
static void
-p_ere(struct parse *p,
- int stop) /* character this ERE should end at */
+p_ere(struct parse * p,
+ int stop) /* character this ERE should end at */
{
char c;
sopno prevback = 0;
@@ -342,7 +345,7 @@ p_ere(struct parse *p,
* p_ere_exp - parse one subERE, an atom possibly followed by a repetition op
*/
static void
-p_ere_exp(struct parse *p)
+p_ere_exp(struct parse * p)
{
pg_wchar c;
sopno pos;
@@ -501,7 +504,7 @@ p_ere_exp(struct parse *p)
* p_str - string (no metacharacters) "parser"
*/
static void
-p_str(struct parse *p)
+p_str(struct parse * p)
{
REQUIRE(MORE(), REG_EMPTY);
while (MORE())
@@ -520,7 +523,7 @@ p_str(struct parse *p)
* The amount of lookahead needed to avoid this kludge is excessive.
*/
static void
-p_bre(struct parse *p,
+p_bre(struct parse * p,
int end1, /* first terminating character */
int end2) /* second terminating character */
{
@@ -554,7 +557,7 @@ p_bre(struct parse *p,
* p_simp_re - parse a simple RE, an atom possibly followed by a repetition
*/
static int /* was the simple RE an unbackslashed $? */
-p_simp_re(struct parse *p,
+p_simp_re(struct parse * p,
int starordinary) /* is a leading * an ordinary character? */
{
int c;
@@ -691,7 +694,7 @@ p_simp_re(struct parse *p,
* p_count - parse a repetition count
*/
static int /* the value */
-p_count(struct parse *p)
+p_count(struct parse * p)
{
int count = 0;
int ndigits = 0;
@@ -713,7 +716,7 @@ p_count(struct parse *p)
* no set operations are done.
*/
static void
-p_bracket(struct parse *p)
+p_bracket(struct parse * p)
{
cset *cs = allocset(p);
int invert = 0;
@@ -806,7 +809,7 @@ p_bracket(struct parse *p)
* p_b_term - parse one term of a bracketed character list
*/
static void
-p_b_term(struct parse *p, cset *cs)
+p_b_term(struct parse * p, cset *cs)
{
pg_wchar c;
pg_wchar start,
@@ -878,7 +881,7 @@ p_b_term(struct parse *p, cset *cs)
* p_b_cclass - parse a character-class name and deal with it
*/
static void
-p_b_cclass(struct parse *p, cset *cs)
+p_b_cclass(struct parse * p, cset *cs)
{
pg_wchar *sp = p->next;
struct cclass *cp;
@@ -916,7 +919,7 @@ p_b_cclass(struct parse *p, cset *cs)
* This implementation is incomplete. xxx
*/
static void
-p_b_eclass(struct parse *p, cset *cs)
+p_b_eclass(struct parse * p, cset *cs)
{
char c;
@@ -928,7 +931,7 @@ p_b_eclass(struct parse *p, cset *cs)
* p_b_symbol - parse a character or [..]ed multicharacter collating symbol
*/
static pg_wchar /* value of symbol */
-p_b_symbol(struct parse *p)
+p_b_symbol(struct parse * p)
{
pg_wchar value;
@@ -946,7 +949,7 @@ p_b_symbol(struct parse *p)
* p_b_coll_elem - parse a collating-element name and look it up
*/
static char /* value of collating element */
-p_b_coll_elem(struct parse *p, int endc)
+p_b_coll_elem(struct parse * p, int endc)
{
pg_wchar *sp = p->next;
struct cname *cp;
@@ -1011,7 +1014,7 @@ othercase(int ch)
* Boy, is this implementation ever a kludge...
*/
static void
-bothcases(struct parse *p, int ch)
+bothcases(struct parse * p, int ch)
{
pg_wchar *oldnext = p->next;
pg_wchar *oldend = p->end;
@@ -1033,7 +1036,7 @@ bothcases(struct parse *p, int ch)
* ordinary - emit an ordinary character
*/
static void
-ordinary(struct parse *p, int ch)
+ordinary(struct parse * p, int ch)
{
cat_t *cap = p->g->categories;
@@ -1057,7 +1060,7 @@ ordinary(struct parse *p, int ch)
* Boy, is this implementation ever a kludge...
*/
static void
-nonnewline(struct parse *p)
+nonnewline(struct parse * p)
{
pg_wchar *oldnext = p->next;
pg_wchar *oldend = p->end;
@@ -1079,7 +1082,7 @@ nonnewline(struct parse *p)
* repeat - generate code for a bounded repetition, recursively if needed
*/
static void
-repeat(struct parse *p,
+repeat(struct parse * p,
sopno start, /* operand from here to end of strip */
int from, /* repeated from this number */
int to) /* to this number of times (maybe
@@ -1152,7 +1155,7 @@ repeat(struct parse *p,
* seterr - set an error condition
*/
static int /* useless but makes type checking happy */
-seterr(struct parse *p, int e)
+seterr(struct parse * p, int e)
{
if (p->error == 0) /* keep earliest error condition */
p->error = e;
@@ -1165,7 +1168,7 @@ seterr(struct parse *p, int e)
* allocset - allocate a set of characters for []
*/
static cset *
-allocset(struct parse *p)
+allocset(struct parse * p)
{
int no = p->g->ncsets++;
size_t nc;
@@ -1221,7 +1224,7 @@ allocset(struct parse *p)
* freeset - free a now-unused set
*/
static void
-freeset(struct parse *p, cset *cs)
+freeset(struct parse * p, cset *cs)
{
int i;
cset *top = &p->g->sets[p->g->ncsets];
@@ -1243,7 +1246,7 @@ freeset(struct parse *p, cset *cs)
* the same value!
*/
static int /* set number */
-freezeset(struct parse *p, cset *cs)
+freezeset(struct parse * p, cset *cs)
{
uch h = cs->hash;
int i;
@@ -1276,7 +1279,7 @@ freezeset(struct parse *p, cset *cs)
* firstch - return first character in a set (which must have at least one)
*/
static int /* character; there is no "none" value */
-firstch(struct parse *p, cset *cs)
+firstch(struct parse * p, cset *cs)
{
int i;
size_t css = (size_t) p->g->csetsize;
@@ -1292,7 +1295,7 @@ firstch(struct parse *p, cset *cs)
* nch - number of characters in a set
*/
static int
-nch(struct parse *p, cset *cs)
+nch(struct parse * p, cset *cs)
{
int i;
size_t css = (size_t) p->g->csetsize;
@@ -1308,7 +1311,7 @@ nch(struct parse *p, cset *cs)
* mcadd - add a collating element to a cset
*/
static void
-mcadd(struct parse *p, cset *cs, char *cp)
+mcadd(struct parse * p, cset *cs, char *cp)
{
size_t oldend = cs->smultis;
@@ -1334,7 +1337,7 @@ mcadd(struct parse *p, cset *cs, char *cp)
* is deferred.
*/
static void
-mcinvert(struct parse *p, cset *cs)
+mcinvert(struct parse * p, cset *cs)
{
assert(cs->multis == NULL); /* xxx */
}
@@ -1346,7 +1349,7 @@ mcinvert(struct parse *p, cset *cs)
* is deferred.
*/
static void
-mccase(struct parse *p, cset *cs)
+mccase(struct parse * p, cset *cs)
{
assert(cs->multis == NULL); /* xxx */
}
@@ -1355,7 +1358,7 @@ mccase(struct parse *p, cset *cs)
* isinsets - is this character in any sets?
*/
static int /* predicate */
-isinsets(struct re_guts *g, int c)
+isinsets(struct re_guts * g, int c)
{
uch *col;
int i;
@@ -1372,7 +1375,7 @@ isinsets(struct re_guts *g, int c)
* samesets - are these two characters in exactly the same sets?
*/
static int /* predicate */
-samesets(struct re_guts *g, int c1, int c2)
+samesets(struct re_guts * g, int c1, int c2)
{
uch *col;
int i;
@@ -1390,7 +1393,7 @@ samesets(struct re_guts *g, int c1, int c2)
* categorize - sort out character categories
*/
static void
-categorize(struct parse *p, struct re_guts *g)
+categorize(struct parse * p, struct re_guts * g)
{
cat_t *cats = g->categories;
int c;
@@ -1416,7 +1419,7 @@ categorize(struct parse *p, struct re_guts *g)
* dupl - emit a duplicate of a bunch of sops
*/
static sopno /* start of duplicate */
-dupl(struct parse *p,
+dupl(struct parse * p,
sopno start, /* from here */
sopno finish) /* to this less one */
{
@@ -1442,7 +1445,7 @@ dupl(struct parse *p,
* some changes to the data structures. Maybe later.
*/
static void
-doemit(struct parse *p, sop op, size_t opnd)
+doemit(struct parse * p, sop op, size_t opnd)
{
/* avoid making error situations worse */
if (p->error != 0)
@@ -1464,7 +1467,7 @@ doemit(struct parse *p, sop op, size_t opnd)
* doinsert - insert a sop into the strip
*/
static void
-doinsert(struct parse *p, sop op, size_t opnd, sopno pos)
+doinsert(struct parse * p, sop op, size_t opnd, sopno pos)
{
sopno sn;
sop s;
@@ -1498,7 +1501,7 @@ doinsert(struct parse *p, sop op, size_t opnd, sopno pos)
* dofwd - complete a forward reference
*/
static void
-dofwd(struct parse *p, sopno pos, sop value)
+dofwd(struct parse * p, sopno pos, sop value)
{
/* avoid making error situations worse */
if (p->error != 0)
@@ -1512,7 +1515,7 @@ dofwd(struct parse *p, sopno pos, sop value)
* enlarge - enlarge the strip
*/
static void
-enlarge(struct parse *p, sopno size)
+enlarge(struct parse * p, sopno size)
{
sop *sp;
@@ -1533,7 +1536,7 @@ enlarge(struct parse *p, sopno size)
* stripsnug - compact the strip
*/
static void
-stripsnug(struct parse *p, struct re_guts *g)
+stripsnug(struct parse * p, struct re_guts * g)
{
g->nstates = p->slen;
g->strip = (sop *) realloc((char *) p->strip, p->slen * sizeof(sop));
@@ -1554,7 +1557,7 @@ stripsnug(struct parse *p, struct re_guts *g)
* Note that must and mlen got initialized during setup.
*/
static void
-findmust(struct parse *p, struct re_guts *g)
+findmust(struct parse * p, struct re_guts * g)
{
sop *scan;
sop *start = 0;
@@ -1643,7 +1646,7 @@ findmust(struct parse *p, struct re_guts *g)
* pluscount - count + nesting
*/
static sopno /* nesting depth */
-pluscount(struct parse *p, struct re_guts *g)
+pluscount(struct parse * p, struct re_guts * g)
{
sop *scan;
sop s;
diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c
index 2d87d249bfd..cb52ad9afe4 100644
--- a/src/backend/regex/regexec.c
+++ b/src/backend/regex/regexec.c
@@ -73,7 +73,7 @@ static int nope = 0; /* for use in asserts; shuts lint up */
#define onestate long
#define INIT(o, n) ((o) = (1L << (n)))
#define INC(o) ((o) <<= 1)
-#define ISSTATEIN(v, o) ((v) & (o))
+#define ISSTATEIN(v, o) ((v) & (o))
/* some abbreviations; note that some of these know variable names! */
/* do "if I'm here, I can also be there" etc without branches */
#define FWD(dst, src, n) ((dst) |= ((src) & (here)) << (n))
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index d470cb9fe77..316b18316c6 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteDefine.c,v 1.58 2001/01/24 19:43:05 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteDefine.c,v 1.59 2001/03/22 03:59:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -175,9 +175,10 @@ DefineQueryRewrite(RuleStmt *stmt)
/*
* If we are installing an ON SELECT rule, we had better grab
* AccessExclusiveLock to ensure no SELECTs are currently running on
- * the event relation. For other types of rules, it might be sufficient
- * to grab ShareLock to lock out insert/update/delete actions. But
- * for now, let's just grab AccessExclusiveLock all the time.
+ * the event relation. For other types of rules, it might be
+ * sufficient to grab ShareLock to lock out insert/update/delete
+ * actions. But for now, let's just grab AccessExclusiveLock all the
+ * time.
*/
event_relation = heap_openr(event_obj->relname, AccessExclusiveLock);
ev_relid = RelationGetRelid(event_relation);
@@ -226,7 +227,7 @@ DefineQueryRewrite(RuleStmt *stmt)
{
List *tllist;
int i;
- char *expected_name;
+ char *expected_name;
/*
* So there cannot be INSTEAD NOTHING, ...
@@ -285,9 +286,10 @@ DefineQueryRewrite(RuleStmt *stmt)
/*
* Allow typmods to be different only if one of them is -1,
- * ie, "unspecified". This is necessary for cases like "numeric",
- * where the table will have a filled-in default length but the
- * select rule's expression will probably have typmod = -1.
+ * ie, "unspecified". This is necessary for cases like
+ * "numeric", where the table will have a filled-in default
+ * length but the select rule's expression will probably have
+ * typmod = -1.
*/
if (attr->atttypmod != resdom->restypmod &&
attr->atttypmod != -1 && resdom->restypmod != -1)
@@ -327,13 +329,13 @@ DefineQueryRewrite(RuleStmt *stmt)
/*
* Are we converting a relation to a view?
*
- * If so, check that the relation is empty because the storage
- * for the relation is going to be deleted.
+ * If so, check that the relation is empty because the storage for
+ * the relation is going to be deleted.
*/
if (event_relation->rd_rel->relkind != RELKIND_VIEW)
{
- HeapScanDesc scanDesc;
- HeapTuple tuple;
+ HeapScanDesc scanDesc;
+ HeapTuple tuple;
scanDesc = heap_beginscan(event_relation, 0, SnapshotNow, 0, NULL);
tuple = heap_getnext(scanDesc, 0);
@@ -341,7 +343,10 @@ DefineQueryRewrite(RuleStmt *stmt)
elog(ERROR, "Relation \"%s\" is not empty. Cannot convert it to view",
event_obj->relname);
- /* don't need heap_freetuple because we never got a valid tuple */
+ /*
+ * don't need heap_freetuple because we never got a valid
+ * tuple
+ */
heap_endscan(scanDesc);
RelisBecomingView = true;
@@ -368,10 +373,10 @@ DefineQueryRewrite(RuleStmt *stmt)
is_instead, event_attype);
/*
- * We want the rule's table references to be checked as though by
- * the rule owner, not the user referencing the rule. Therefore,
- * scan through the rule's rtables and set the checkAsUser field
- * on all rtable entries (except *OLD* and *NEW*).
+ * We want the rule's table references to be checked as though by the
+ * rule owner, not the user referencing the rule. Therefore, scan
+ * through the rule's rtables and set the checkAsUser field on all
+ * rtable entries (except *OLD* and *NEW*).
*/
foreach(l, action)
{
@@ -394,21 +399,21 @@ DefineQueryRewrite(RuleStmt *stmt)
actionP);
/*
- * Set pg_class 'relhasrules' field TRUE for event relation.
- * If appropriate, also modify the 'relkind' field to show that
- * the relation is now a view.
+ * Set pg_class 'relhasrules' field TRUE for event relation. If
+ * appropriate, also modify the 'relkind' field to show that the
+ * relation is now a view.
*
* Important side effect: an SI notice is broadcast to force all
- * backends (including me!) to update relcache entries with the new
- * rule.
+ * backends (including me!) to update relcache entries with the
+ * new rule.
*/
SetRelationRuleStatus(ev_relid, true, RelisBecomingView);
}
/*
- * IF the relation is becoming a view, delete the storage
- * files associated with it. NB: we had better have AccessExclusiveLock
- * to do this ...
+ * IF the relation is becoming a view, delete the storage files
+ * associated with it. NB: we had better have AccessExclusiveLock to
+ * do this ...
*/
if (RelisBecomingView)
smgrunlink(DEFAULT_SMGR, event_relation);
@@ -439,21 +444,20 @@ setRuleCheckAsUser(Query *qry, Oid userid)
if (rte->subquery)
{
+
/*
* Recurse into subquery in FROM
*/
setRuleCheckAsUser(rte->subquery, userid);
}
else
- {
rte->checkAsUser = userid;
- }
}
/* If there are sublinks, search for them and process their RTEs */
if (qry->hasSubLinks)
query_tree_walker(qry, setRuleCheckAsUser_walker, (void *) &userid,
- false /* already did the ones in rtable */);
+ false /* already did the ones in rtable */ );
}
/*
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 505d5f4350b..6ece2ae938f 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteHandler.c,v 1.89 2001/01/27 04:40:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteHandler.c,v 1.90 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@ static RewriteInfo *gatherRewriteMeta(Query *parsetree,
static List *adjustJoinTreeList(Query *parsetree, bool removert, int rt_index);
static void markQueryForUpdate(Query *qry, bool skipOldNew);
static List *matchLocks(CmdType event, RuleLock *rulelocks,
- int varno, Query *parsetree);
+ int varno, Query *parsetree);
static Query *fireRIRrules(Query *parsetree);
@@ -84,9 +84,9 @@ gatherRewriteMeta(Query *parsetree,
* Adjust rule action and qual to offset its varnos, so that we can
* merge its rtable into the main parsetree's rtable.
*
- * If the rule action is an INSERT...SELECT, the OLD/NEW rtable
- * entries will be in the SELECT part, and we have to modify that
- * rather than the top-level INSERT (kluge!).
+ * If the rule action is an INSERT...SELECT, the OLD/NEW rtable entries
+ * will be in the SELECT part, and we have to modify that rather than
+ * the top-level INSERT (kluge!).
*/
sub_action = getInsertSelectQuery(info->rule_action, &sub_action_ptr);
@@ -101,14 +101,14 @@ gatherRewriteMeta(Query *parsetree,
/*
* We want the main parsetree's rtable to end up as the concatenation
* of its original contents plus those of all the relevant rule
- * actions. Also store same into all the rule_action rtables.
- * Some of the entries may be unused after we finish rewriting, but
- * if we tried to clean those out we'd have a much harder job to
- * adjust RT indexes in the query's Vars. It's OK to have unused
- * RT entries, since planner will ignore them.
+ * actions. Also store same into all the rule_action rtables. Some of
+ * the entries may be unused after we finish rewriting, but if we
+ * tried to clean those out we'd have a much harder job to adjust RT
+ * indexes in the query's Vars. It's OK to have unused RT entries,
+ * since planner will ignore them.
*
- * NOTE KLUGY HACK: we assume the parsetree rtable had at least one
- * entry to begin with (OK enough, else where'd the rule come from?).
+ * NOTE KLUGY HACK: we assume the parsetree rtable had at least one entry
+ * to begin with (OK enough, else where'd the rule come from?).
* Because of this, if multiple rules nconc() their rtable additions
* onto parsetree->rtable, they'll all see the same rtable because
* they all have the same list head pointer.
@@ -119,24 +119,25 @@ gatherRewriteMeta(Query *parsetree,
/*
* Each rule action's jointree should be the main parsetree's jointree
- * plus that rule's jointree, but usually *without* the original rtindex
- * that we're replacing (if present, which it won't be for INSERT).
- * Note that if the rule action refers to OLD, its jointree will add
- * a reference to rt_index. If the rule action doesn't refer to OLD,
- * but either the rule_qual or the user query quals do, then we need to
- * keep the original rtindex in the jointree to provide data for the
- * quals. We don't want the original rtindex to be joined twice,
- * however, so avoid keeping it if the rule action mentions it.
+ * plus that rule's jointree, but usually *without* the original
+ * rtindex that we're replacing (if present, which it won't be for
+ * INSERT). Note that if the rule action refers to OLD, its jointree
+ * will add a reference to rt_index. If the rule action doesn't refer
+ * to OLD, but either the rule_qual or the user query quals do, then
+ * we need to keep the original rtindex in the jointree to provide
+ * data for the quals. We don't want the original rtindex to be
+ * joined twice, however, so avoid keeping it if the rule action
+ * mentions it.
*/
if (sub_action->jointree != NULL)
{
- bool keeporig;
- List *newjointree;
+ bool keeporig;
+ List *newjointree;
- keeporig = (! rangeTableEntry_used((Node *) sub_action->jointree,
- rt_index, 0)) &&
+ keeporig = (!rangeTableEntry_used((Node *) sub_action->jointree,
+ rt_index, 0)) &&
(rangeTableEntry_used(info->rule_qual, rt_index, 0) ||
- rangeTableEntry_used(parsetree->jointree->quals, rt_index, 0));
+ rangeTableEntry_used(parsetree->jointree->quals, rt_index, 0));
newjointree = adjustJoinTreeList(parsetree, !keeporig, rt_index);
sub_action->jointree->fromlist =
nconc(newjointree, sub_action->jointree->fromlist);
@@ -154,17 +155,17 @@ gatherRewriteMeta(Query *parsetree,
parsetree->hasSubLinks = TRUE;
/*
- * Event Qualification forces copying of parsetree and
- * splitting into two queries one w/rule_qual, one w/NOT
- * rule_qual. Also add user query qual onto rule action
+ * Event Qualification forces copying of parsetree and splitting into
+ * two queries one w/rule_qual, one w/NOT rule_qual. Also add user
+ * query qual onto rule action
*/
AddQual(sub_action, info->rule_qual);
AddQual(sub_action, parsetree->jointree->quals);
/*
- * Rewrite new.attribute w/ right hand side of target-list
- * entry for appropriate field name in insert/update.
+ * Rewrite new.attribute w/ right hand side of target-list entry for
+ * appropriate field name in insert/update.
*
* KLUGE ALERT: since ResolveNew returns a mutated copy, we can't just
* apply it to sub_action; we have to remember to update the sublink
@@ -207,7 +208,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
{
RangeTblRef *rtr = lfirst(jjt);
- if (IsA(rtr, RangeTblRef) && rtr->rtindex == rt_index)
+ if (IsA(rtr, RangeTblRef) &&rtr->rtindex == rt_index)
{
newjointree = lremove(rtr, newjointree);
break;
@@ -278,7 +279,7 @@ ApplyRetrieveRule(Query *parsetree,
elog(ERROR, "ApplyRetrieveRule: expected just one rule action");
if (rule->qual != NULL)
elog(ERROR, "ApplyRetrieveRule: can't handle qualified ON SELECT rule");
- if (! relation_level)
+ if (!relation_level)
elog(ERROR, "ApplyRetrieveRule: can't handle per-attribute ON SELECT rule");
/*
@@ -290,8 +291,8 @@ ApplyRetrieveRule(Query *parsetree,
rule_action = fireRIRrules(rule_action);
/*
- * VIEWs are really easy --- just plug the view query in as a subselect,
- * replacing the relation's original RTE.
+ * VIEWs are really easy --- just plug the view query in as a
+ * subselect, replacing the relation's original RTE.
*/
rte = rt_fetch(rt_index, parsetree->rtable);
@@ -317,6 +318,7 @@ ApplyRetrieveRule(Query *parsetree,
*/
if (intMember(rt_index, parsetree->rowMarks))
{
+
/*
* Remove the view from the list of rels that will actually be
* marked FOR UPDATE by the executor. It will still be access-
@@ -399,6 +401,7 @@ fireRIRonSubLink(Node *node, void *context)
sub->subselect = (Node *) fireRIRrules((Query *) (sub->subselect));
/* Fall through to process lefthand args of SubLink */
}
+
/*
* Do NOT recurse into Query nodes, because fireRIRrules already
* processed subselects of subselects for us.
@@ -462,17 +465,17 @@ fireRIRrules(Query *parsetree)
continue;
/*
- * This may well be the first access to the relation during
- * the current statement (it will be, if this Query was extracted
- * from a rule or somehow got here other than via the parser).
- * Therefore, grab the appropriate lock type for the relation,
- * and do not release it until end of transaction. This protects
- * the rewriter and planner against schema changes mid-query.
+ * This may well be the first access to the relation during the
+ * current statement (it will be, if this Query was extracted from
+ * a rule or somehow got here other than via the parser).
+ * Therefore, grab the appropriate lock type for the relation, and
+ * do not release it until end of transaction. This protects the
+ * rewriter and planner against schema changes mid-query.
*
- * If the relation is the query's result relation, then RewriteQuery()
- * already got the right lock on it, so we need no additional lock.
- * Otherwise, check to see if the relation is accessed FOR UPDATE
- * or not.
+ * If the relation is the query's result relation, then
+ * RewriteQuery() already got the right lock on it, so we need no
+ * additional lock. Otherwise, check to see if the relation is
+ * accessed FOR UPDATE or not.
*/
if (rt_index == parsetree->resultRelation)
lockmode = NoLock;
@@ -534,14 +537,14 @@ fireRIRrules(Query *parsetree)
*/
if (parsetree->hasSubLinks)
query_tree_walker(parsetree, fireRIRonSubLink, NULL,
- false /* already handled the ones in rtable */);
+ false /* already handled the ones in rtable */ );
/*
- * If the query was marked having aggregates, check if this is
- * still true after rewriting. Ditto for sublinks. Note there
- * should be no aggs in the qual at this point. (Does this code
- * still do anything useful? The view-becomes-subselect-in-FROM
- * approach doesn't look like it could remove aggs or sublinks...)
+ * If the query was marked having aggregates, check if this is still
+ * true after rewriting. Ditto for sublinks. Note there should be no
+ * aggs in the qual at this point. (Does this code still do anything
+ * useful? The view-becomes-subselect-in-FROM approach doesn't look
+ * like it could remove aggs or sublinks...)
*/
if (parsetree->hasAggs)
{
@@ -551,9 +554,7 @@ fireRIRrules(Query *parsetree)
elog(ERROR, "fireRIRrules: failed to remove aggs from qual");
}
if (parsetree->hasSubLinks)
- {
parsetree->hasSubLinks = checkExprHasSubLink((Node *) parsetree);
- }
return parsetree;
}
@@ -594,7 +595,7 @@ orderRules(List *locks)
* This is used to generate suitable "else clauses" for conditional INSTEAD
* rules.
*
- * The rule_qual may contain references to OLD or NEW. OLD references are
+ * The rule_qual may contain references to OLD or NEW. OLD references are
* replaced by references to the specified rt_index (the relation that the
* rule applies to). NEW references are only possible for INSERT and UPDATE
* queries on the relation itself, and so they should be replaced by copies
@@ -769,12 +770,12 @@ RewriteQuery(Query *parsetree, bool *instead_flag, List **qual_products)
rt_entry = rt_fetch(result_relation, parsetree->rtable);
/*
- * This may well be the first access to the result relation during
- * the current statement (it will be, if this Query was extracted
- * from a rule or somehow got here other than via the parser).
- * Therefore, grab the appropriate lock type for a result relation,
- * and do not release it until end of transaction. This protects the
- * rewriter and planner against schema changes mid-query.
+ * This may well be the first access to the result relation during the
+ * current statement (it will be, if this Query was extracted from a
+ * rule or somehow got here other than via the parser). Therefore,
+ * grab the appropriate lock type for a result relation, and do not
+ * release it until end of transaction. This protects the rewriter
+ * and planner against schema changes mid-query.
*/
rt_entry_relation = heap_openr(rt_entry->relname, RowExclusiveLock);
@@ -793,7 +794,7 @@ RewriteQuery(Query *parsetree, bool *instead_flag, List **qual_products)
qual_products);
}
- heap_close(rt_entry_relation, NoLock); /* keep lock! */
+ heap_close(rt_entry_relation, NoLock); /* keep lock! */
return product_queries;
}
@@ -912,7 +913,7 @@ QueryRewrite(Query *parsetree)
*/
foreach(l, querylist)
{
- Query *query = (Query *) lfirst(l);
+ Query *query = (Query *) lfirst(l);
query = fireRIRrules(query);
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index d83dafa3c63..663b67708ee 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteManip.c,v 1.55 2001/01/27 01:44:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteManip.c,v 1.56 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,9 +43,10 @@ static bool checkExprHasSubLink_walker(Node *node, void *context);
bool
checkExprHasAggs(Node *node)
{
+
/*
- * If a Query is passed, examine it --- but we will not recurse
- * into sub-Queries.
+ * If a Query is passed, examine it --- but we will not recurse into
+ * sub-Queries.
*/
if (node && IsA(node, Query))
return query_tree_walker((Query *) node, checkExprHasAggs_walker,
@@ -73,9 +74,10 @@ checkExprHasAggs_walker(Node *node, void *context)
bool
checkExprHasSubLink(Node *node)
{
+
/*
- * If a Query is passed, examine it --- but we will not recurse
- * into sub-Queries.
+ * If a Query is passed, examine it --- but we will not recurse into
+ * sub-Queries.
*/
if (node && IsA(node, Query))
return query_tree_walker((Query *) node, checkExprHasSubLink_walker,
@@ -101,7 +103,7 @@ checkExprHasSubLink_walker(Node *node, void *context)
*
* Find all Var nodes in the given tree with varlevelsup == sublevels_up,
* and increment their varno fields (rangetable indexes) by 'offset'.
- * The varnoold fields are adjusted similarly. Also, RangeTblRef nodes
+ * The varnoold fields are adjusted similarly. Also, RangeTblRef nodes
* in join trees and setOp trees are adjusted.
*
* NOTE: although this has the form of a walker, we cheat and modify the
@@ -133,7 +135,7 @@ OffsetVarNodes_walker(Node *node, OffsetVarNodes_context *context)
}
if (IsA(node, RangeTblRef))
{
- RangeTblRef *rtr = (RangeTblRef *) node;
+ RangeTblRef *rtr = (RangeTblRef *) node;
if (context->sublevels_up == 0)
rtr->rtindex += context->offset;
@@ -170,24 +172,22 @@ OffsetVarNodes(Node *node, int offset, int sublevels_up)
*/
if (node && IsA(node, Query))
{
- Query *qry = (Query *) node;
- List *l;
+ Query *qry = (Query *) node;
+ List *l;
/*
- * If we are starting at a Query, and sublevels_up is zero, then we
- * must also fix rangetable indexes in the Query itself --- namely
- * resultRelation and rowMarks entries. sublevels_up cannot be zero
- * when recursing into a subquery, so there's no need to have the
- * same logic inside OffsetVarNodes_walker.
+ * If we are starting at a Query, and sublevels_up is zero, then
+ * we must also fix rangetable indexes in the Query itself ---
+ * namely resultRelation and rowMarks entries. sublevels_up
+ * cannot be zero when recursing into a subquery, so there's no
+ * need to have the same logic inside OffsetVarNodes_walker.
*/
if (sublevels_up == 0)
{
if (qry->resultRelation)
qry->resultRelation += offset;
foreach(l, qry->rowMarks)
- {
lfirsti(l) += offset;
- }
}
query_tree_walker(qry, OffsetVarNodes_walker,
(void *) &context, true);
@@ -235,7 +235,7 @@ ChangeVarNodes_walker(Node *node, ChangeVarNodes_context *context)
}
if (IsA(node, RangeTblRef))
{
- RangeTblRef *rtr = (RangeTblRef *) node;
+ RangeTblRef *rtr = (RangeTblRef *) node;
if (context->sublevels_up == 0 &&
rtr->rtindex == context->rt_index)
@@ -274,15 +274,15 @@ ChangeVarNodes(Node *node, int rt_index, int new_index, int sublevels_up)
*/
if (node && IsA(node, Query))
{
- Query *qry = (Query *) node;
- List *l;
+ Query *qry = (Query *) node;
+ List *l;
/*
- * If we are starting at a Query, and sublevels_up is zero, then we
- * must also fix rangetable indexes in the Query itself --- namely
- * resultRelation and rowMarks entries. sublevels_up cannot be zero
- * when recursing into a subquery, so there's no need to have the
- * same logic inside ChangeVarNodes_walker.
+ * If we are starting at a Query, and sublevels_up is zero, then
+ * we must also fix rangetable indexes in the Query itself ---
+ * namely resultRelation and rowMarks entries. sublevels_up
+ * cannot be zero when recursing into a subquery, so there's no
+ * need to have the same logic inside ChangeVarNodes_walker.
*/
if (sublevels_up == 0)
{
@@ -541,11 +541,12 @@ getInsertSelectQuery(Query *parsetree, Query ***subquery_ptr)
return parsetree;
if (parsetree->commandType != CMD_INSERT)
return parsetree;
+
/*
- * Currently, this is ONLY applied to rule-action queries, and so
- * we expect to find the *OLD* and *NEW* placeholder entries in the
- * given query. If they're not there, it must be an INSERT/SELECT
- * in which they've been pushed down to the SELECT.
+ * Currently, this is ONLY applied to rule-action queries, and so we
+ * expect to find the *OLD* and *NEW* placeholder entries in the given
+ * query. If they're not there, it must be an INSERT/SELECT in which
+ * they've been pushed down to the SELECT.
*/
if (length(parsetree->rtable) >= 2 &&
strcmp(rt_fetch(PRS2_OLD_VARNO, parsetree->rtable)->eref->relname,
@@ -560,17 +561,17 @@ getInsertSelectQuery(Query *parsetree, Query ***subquery_ptr)
Assert(IsA(rtr, RangeTblRef));
selectrte = rt_fetch(rtr->rtindex, parsetree->rtable);
selectquery = selectrte->subquery;
- if (! (selectquery && IsA(selectquery, Query) &&
- selectquery->commandType == CMD_SELECT))
+ if (!(selectquery && IsA(selectquery, Query) &&
+ selectquery->commandType == CMD_SELECT))
elog(ERROR, "getInsertSelectQuery: expected to find SELECT subquery");
if (length(selectquery->rtable) >= 2 &&
- strcmp(rt_fetch(PRS2_OLD_VARNO, selectquery->rtable)->eref->relname,
- "*OLD*") == 0 &&
- strcmp(rt_fetch(PRS2_NEW_VARNO, selectquery->rtable)->eref->relname,
- "*NEW*") == 0)
+ strcmp(rt_fetch(PRS2_OLD_VARNO, selectquery->rtable)->eref->relname,
+ "*OLD*") == 0 &&
+ strcmp(rt_fetch(PRS2_NEW_VARNO, selectquery->rtable)->eref->relname,
+ "*NEW*") == 0)
{
if (subquery_ptr)
- *subquery_ptr = & (selectrte->subquery);
+ *subquery_ptr = &(selectrte->subquery);
return selectquery;
}
elog(ERROR, "getInsertSelectQuery: can't find rule placeholders");
@@ -591,11 +592,12 @@ AddQual(Query *parsetree, Node *qual)
if (parsetree->commandType == CMD_UTILITY)
{
+
/*
* Noplace to put the qual on a utility statement.
*
- * For now, we expect utility stmt to be a NOTIFY, so give a
- * specific error message for that case.
+ * For now, we expect utility stmt to be a NOTIFY, so give a specific
+ * error message for that case.
*/
if (parsetree->utilityStmt && IsA(parsetree->utilityStmt, NotifyStmt))
elog(ERROR, "Conditional NOTIFY is not implemented");
@@ -632,11 +634,12 @@ AddHavingQual(Query *parsetree, Node *havingQual)
if (parsetree->commandType == CMD_UTILITY)
{
+
/*
* Noplace to put the qual on a utility statement.
*
- * For now, we expect utility stmt to be a NOTIFY, so give a
- * specific error message for that case.
+ * For now, we expect utility stmt to be a NOTIFY, so give a specific
+ * error message for that case.
*/
if (parsetree->utilityStmt && IsA(parsetree->utilityStmt, NotifyStmt))
elog(ERROR, "Conditional NOTIFY is not implemented");
@@ -839,8 +842,8 @@ ResolveNew(Node *node, int target_varno, int sublevels_up,
/*
* Must be prepared to start with a Query or a bare expression tree;
- * if it's a Query, go straight to query_tree_mutator to make sure that
- * sublevels_up doesn't get incremented prematurely.
+ * if it's a Query, go straight to query_tree_mutator to make sure
+ * that sublevels_up doesn't get incremented prematurely.
*/
if (node && IsA(node, Query))
{
@@ -876,11 +879,11 @@ typedef struct
int *modified;
int *badsql;
int sublevels_up;
-} HandleRIRAttributeRule_context;
+} HandleRIRAttributeRule_context;
static Node *
HandleRIRAttributeRule_mutator(Node *node,
- HandleRIRAttributeRule_context *context)
+ HandleRIRAttributeRule_context * context)
{
if (node == NULL)
return NULL;
@@ -988,4 +991,4 @@ HandleRIRAttributeRule(Query *parsetree,
(void *) &context, true);
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
diff --git a/src/backend/rewrite/rewriteRemove.c b/src/backend/rewrite/rewriteRemove.c
index 7063b505b9f..15e3434721a 100644
--- a/src/backend/rewrite/rewriteRemove.c
+++ b/src/backend/rewrite/rewriteRemove.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteRemove.c,v 1.43 2001/01/24 19:43:05 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteRemove.c,v 1.44 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,7 +42,7 @@ RewriteGetRuleEventRel(char *rulename)
0, 0, 0);
if (!HeapTupleIsValid(htup))
elog(ERROR, "Rule or view \"%s\" not found",
- ((strncmp(rulename, "_RET", 4) == 0) ? (rulename + 4) : rulename));
+ ((strncmp(rulename, "_RET", 4) == 0) ? (rulename + 4) : rulename));
eventrel = ((Form_pg_rewrite) GETSTRUCT(htup))->ev_class;
ReleaseSysCache(htup);
@@ -102,15 +102,15 @@ RemoveRewriteRule(char *ruleName)
/*
* We had better grab AccessExclusiveLock so that we know no other
- * rule additions/deletions are going on for this relation. Else
- * we cannot set relhasrules correctly. Besides, we don't want to
- * be changing the ruleset while queries are executing on the rel.
+ * rule additions/deletions are going on for this relation. Else we
+ * cannot set relhasrules correctly. Besides, we don't want to be
+ * changing the ruleset while queries are executing on the rel.
*/
event_relation = heap_open(eventRelationOid, AccessExclusiveLock);
/* do not allow the removal of a view's SELECT rule */
if (event_relation->rd_rel->relkind == RELKIND_VIEW &&
- ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_type == '1' )
+ ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_type == '1')
elog(ERROR, "Cannot remove a view's SELECT rule");
hasMoreRules = event_relation->rd_rules != NULL &&
@@ -133,10 +133,9 @@ RemoveRewriteRule(char *ruleName)
/*
* Set pg_class 'relhasrules' field correctly for event relation.
*
- * Important side effect: an SI notice is broadcast to force all
- * backends (including me!) to update relcache entries with the
- * new rule set. Therefore, must do this even if relhasrules is
- * still true!
+ * Important side effect: an SI notice is broadcast to force all backends
+ * (including me!) to update relcache entries with the new rule set.
+ * Therefore, must do this even if relhasrules is still true!
*/
SetRelationRuleStatus(eventRelationOid, hasMoreRules, false);
diff --git a/src/backend/rewrite/rewriteSupport.c b/src/backend/rewrite/rewriteSupport.c
index 0a171bb897c..7578acbcedd 100644
--- a/src/backend/rewrite/rewriteSupport.c
+++ b/src/backend/rewrite/rewriteSupport.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteSupport.c,v 1.47 2001/01/24 19:43:05 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteSupport.c,v 1.48 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,8 @@ SetRelationRuleStatus(Oid relationId, bool relHasRules,
Relation idescs[Num_pg_class_indices];
/*
- * Find the tuple to update in pg_class, using syscache for the lookup.
+ * Find the tuple to update in pg_class, using syscache for the
+ * lookup.
*/
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
diff --git a/src/backend/storage/buffer/buf_init.c b/src/backend/storage/buffer/buf_init.c
index ff3d43fe8c0..03d6504db86 100644
--- a/src/backend/storage/buffer/buf_init.c
+++ b/src/backend/storage/buffer/buf_init.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_init.c,v 1.41 2001/01/24 19:43:05 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_init.c,v 1.42 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,8 +63,8 @@ long *PrivateRefCount; /* also used in freelist.c */
bits8 *BufferLocks; /* flag bits showing locks I have set */
BufferTag *BufferTagLastDirtied; /* tag buffer had when last
* dirtied by me */
-BufferBlindId *BufferBlindLastDirtied;
-bool *BufferDirtiedByMe; /* T if buf has been dirtied in cur xact */
+BufferBlindId *BufferBlindLastDirtied;
+bool *BufferDirtiedByMe; /* T if buf has been dirtied in cur xact */
/*
@@ -149,7 +149,8 @@ InitBufferPool(void)
/*
* It's probably not really necessary to grab the lock --- if there's
- * anyone else attached to the shmem at this point, we've got problems.
+ * anyone else attached to the shmem at this point, we've got
+ * problems.
*/
SpinAcquire(BufMgrLock);
@@ -240,13 +241,11 @@ InitBufferPoolAccess(void)
BufferDirtiedByMe = (bool *) calloc(NBuffers, sizeof(bool));
/*
- * Convert shmem offsets into addresses as seen by this process.
- * This is just to speed up the BufferGetBlock() macro.
+ * Convert shmem offsets into addresses as seen by this process. This
+ * is just to speed up the BufferGetBlock() macro.
*/
for (i = 0; i < NBuffers; i++)
- {
BufferBlockPointers[i] = (Block) MAKE_PTR(BufferDescriptors[i].data);
- }
/*
* Now that buffer access is initialized, set up a callback to shut it
diff --git a/src/backend/storage/buffer/buf_table.c b/src/backend/storage/buffer/buf_table.c
index f1512e0563e..9e8f1647786 100644
--- a/src/backend/storage/buffer/buf_table.c
+++ b/src/backend/storage/buffer/buf_table.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.20 2001/01/24 19:43:05 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.21 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -123,8 +123,8 @@ BufTableDelete(BufferDesc *buf)
/*
* Clear the buffer's tag. This doesn't matter for the hash table,
* since the buffer is already removed from it, but it ensures that
- * sequential searches through the buffer table won't think the
- * buffer is still valid for its old page.
+ * sequential searches through the buffer table won't think the buffer
+ * is still valid for its old page.
*/
buf->tag.rnode.relNode = InvalidOid;
buf->tag.rnode.tblNode = InvalidOid;
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index b247248a80c..45dcdaed6a9 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.108 2001/03/21 10:13:29 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.109 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -401,7 +401,7 @@ BufferAlloc(Relation reln,
bool smok;
/*
- * skip write error buffers
+ * skip write error buffers
*/
if ((buf->flags & BM_IO_ERROR) != 0)
{
@@ -409,6 +409,7 @@ BufferAlloc(Relation reln,
buf = (BufferDesc *) NULL;
continue;
}
+
/*
* Set BM_IO_IN_PROGRESS to keep anyone from doing anything
* with the contents of the buffer while we write it out. We
@@ -453,6 +454,7 @@ BufferAlloc(Relation reln,
}
else
{
+
/*
* BM_JUST_DIRTIED cleared by BufferReplace and shouldn't
* be setted by anyone. - vadim 01/17/97
@@ -689,9 +691,7 @@ ReleaseAndReadBuffer(Buffer buffer,
bufHdr = &BufferDescriptors[buffer - 1];
Assert(PrivateRefCount[buffer - 1] > 0);
if (PrivateRefCount[buffer - 1] > 1)
- {
PrivateRefCount[buffer - 1]--;
- }
else
{
SpinAcquire(BufMgrLock);
@@ -724,7 +724,7 @@ BufferSync()
BufferDesc *bufHdr;
Buffer buffer;
int status;
- RelFileNode rnode;
+ RelFileNode rnode;
XLogRecPtr recptr;
Relation reln = NULL;
@@ -754,8 +754,8 @@ BufferSync()
}
/*
- * IO synchronization. Note that we do it with unpinned buffer
- * to avoid conflicts with FlushRelationBuffers.
+ * IO synchronization. Note that we do it with unpinned buffer to
+ * avoid conflicts with FlushRelationBuffers.
*/
if (bufHdr->flags & BM_IO_IN_PROGRESS)
{
@@ -769,12 +769,12 @@ BufferSync()
}
/*
- * Here: no one doing IO for this buffer and it's dirty.
- * Pin buffer now and set IO state for it *before* acquiring
- * shlock to avoid conflicts with FlushRelationBuffers.
+ * Here: no one doing IO for this buffer and it's dirty. Pin
+ * buffer now and set IO state for it *before* acquiring shlock to
+ * avoid conflicts with FlushRelationBuffers.
*/
PinBuffer(bufHdr);
- StartBufferIO(bufHdr, false); /* output IO start */
+ StartBufferIO(bufHdr, false); /* output IO start */
buffer = BufferDescriptorGetBuffer(bufHdr);
rnode = bufHdr->tag.rnode;
@@ -810,16 +810,16 @@ BufferSync()
if (reln == (Relation) NULL)
{
status = smgrblindwrt(DEFAULT_SMGR,
- bufHdr->tag.rnode,
- bufHdr->tag.blockNum,
- (char *) MAKE_PTR(bufHdr->data),
- true); /* must fsync */
+ bufHdr->tag.rnode,
+ bufHdr->tag.blockNum,
+ (char *) MAKE_PTR(bufHdr->data),
+ true); /* must fsync */
}
else
{
status = smgrwrite(DEFAULT_SMGR, reln,
- bufHdr->tag.blockNum,
- (char *) MAKE_PTR(bufHdr->data));
+ bufHdr->tag.blockNum,
+ (char *) MAKE_PTR(bufHdr->data));
}
if (status == SM_FAIL) /* disk failure ?! */
@@ -827,9 +827,9 @@ BufferSync()
bufHdr->tag.blockNum, bufHdr->blind.relname);
/*
- * Note that it's safe to change cntxDirty here because of
- * we protect it from upper writers by share lock and from
- * other bufmgr routines by BM_IO_IN_PROGRESS
+ * Note that it's safe to change cntxDirty here because of we
+ * protect it from upper writers by share lock and from other
+ * bufmgr routines by BM_IO_IN_PROGRESS
*/
bufHdr->cntxDirty = false;
@@ -842,12 +842,11 @@ BufferSync()
SpinAcquire(BufMgrLock);
bufHdr->flags &= ~BM_IO_IN_PROGRESS; /* mark IO finished */
- TerminateBufferIO(bufHdr); /* Sync IO finished */
+ TerminateBufferIO(bufHdr); /* Sync IO finished */
/*
- * If this buffer was marked by someone as DIRTY while
- * we were flushing it out we must not clear DIRTY
- * flag - vadim 01/17/97
+ * If this buffer was marked by someone as DIRTY while we were
+ * flushing it out we must not clear DIRTY flag - vadim 01/17/97
*/
if (!(bufHdr->flags & BM_JUST_DIRTIED))
bufHdr->flags &= ~BM_DIRTY;
@@ -1020,6 +1019,7 @@ void
BufmgrCommit(void)
{
LocalBufferSync();
+
/*
* All files created in current transaction will be fsync-ed
*/
@@ -1065,8 +1065,8 @@ BufferReplace(BufferDesc *bufHdr)
SpinRelease(BufMgrLock);
/*
- * No need to lock buffer context - no one should be able to
- * end ReadBuffer
+ * No need to lock buffer context - no one should be able to end
+ * ReadBuffer
*/
recptr = BufferGetLSN(bufHdr);
XLogFlush(recptr);
@@ -1113,8 +1113,8 @@ BlockNumber
RelationGetNumberOfBlocks(Relation relation)
{
return ((relation->rd_myxactonly) ? relation->rd_nblocks :
- ((relation->rd_rel->relkind == RELKIND_VIEW) ? 0 :
- smgrnblocks(DEFAULT_SMGR, relation)));
+ ((relation->rd_rel->relkind == RELKIND_VIEW) ? 0 :
+ smgrnblocks(DEFAULT_SMGR, relation)));
}
/* ---------------------------------------------------------------------
@@ -1122,7 +1122,7 @@ RelationGetNumberOfBlocks(Relation relation)
*
* This function removes all the buffered pages for a relation
* from the buffer pool. Dirty pages are simply dropped, without
- * bothering to write them out first. This is NOT rollback-able,
+ * bothering to write them out first. This is NOT rollback-able,
* and so should be used only with extreme caution!
*
* We assume that the caller holds an exclusive lock on the relation,
@@ -1196,6 +1196,7 @@ recheck:
bufHdr->refcount == 1);
ReleaseBufferWithBufferLock(i);
}
+
/*
* And mark the buffer as no longer occupied by this rel.
*/
@@ -1212,7 +1213,7 @@ recheck:
* This is the same as DropRelationBuffers, except that the target
* relation is specified by RelFileNode.
*
- * This is NOT rollback-able. One legitimate use is to clear the
+ * This is NOT rollback-able. One legitimate use is to clear the
* buffer cache of buffers for a relation that is being deleted
* during transaction abort.
* --------------------------------------------------------------------
@@ -1278,6 +1279,7 @@ recheck:
bufHdr->refcount == 1);
ReleaseBufferWithBufferLock(i);
}
+
/*
* And mark the buffer as no longer occupied by this rel.
*/
@@ -1293,7 +1295,7 @@ recheck:
*
* This function removes all the buffers in the buffer cache for a
* particular database. Dirty pages are simply dropped, without
- * bothering to write them out first. This is used when we destroy a
+ * bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
* tree no longer exists. Implementation is pretty similar to
* DropRelationBuffers() which is for destroying just one relation.
@@ -1310,10 +1312,11 @@ DropBuffers(Oid dbid)
{
bufHdr = &BufferDescriptors[i - 1];
recheck:
+
/*
- * We know that currently database OID is tblNode but
- * this probably will be changed in future and this
- * func will be used to drop tablespace buffers.
+ * We know that currently database OID is tblNode but this
+ * probably will be changed in future and this func will be used
+ * to drop tablespace buffers.
*/
if (bufHdr->tag.rnode.tblNode == dbid)
{
@@ -1342,6 +1345,7 @@ recheck:
* backends are running in that database.
*/
Assert(bufHdr->flags & BM_FREE);
+
/*
* And mark the buffer as no longer occupied by this page.
*/
@@ -1383,8 +1387,8 @@ blockNum=%d, flags=0x%x, refcount=%d %ld)",
for (i = 0; i < NBuffers; ++i, ++buf)
{
printf("[%-2d] (%s, %d) flags=0x%x, refcnt=%d %ld)\n",
- i, buf->blind.relname, buf->tag.blockNum,
- buf->flags, buf->refcount, PrivateRefCount[i]);
+ i, buf->blind.relname, buf->tag.blockNum,
+ buf->flags, buf->refcount, PrivateRefCount[i]);
}
}
}
@@ -1441,7 +1445,7 @@ BufferPoolBlowaway()
*
* This function writes all dirty pages of a relation out to disk.
* Furthermore, pages that have blocknumber >= firstDelBlock are
- * actually removed from the buffer pool. An error code is returned
+ * actually removed from the buffer pool. An error code is returned
* if we fail to dump a dirty buffer or if we find one of
* the target pages is pinned into the cache.
*
@@ -1495,15 +1499,15 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
{
if (bufHdr->flags & BM_DIRTY || bufHdr->cntxDirty)
{
- status = smgrwrite(DEFAULT_SMGR, rel,
- bufHdr->tag.blockNum,
- (char *) MAKE_PTR(bufHdr->data));
+ status = smgrwrite(DEFAULT_SMGR, rel,
+ bufHdr->tag.blockNum,
+ (char *) MAKE_PTR(bufHdr->data));
if (status == SM_FAIL)
{
elog(NOTICE, "FlushRelationBuffers(%s (local), %u): block %u is dirty, could not flush it",
RelationGetRelationName(rel), firstDelBlock,
bufHdr->tag.blockNum);
- return(-1);
+ return (-1);
}
bufHdr->flags &= ~(BM_DIRTY | BM_JUST_DIRTIED);
bufHdr->cntxDirty = false;
@@ -1513,12 +1517,10 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
elog(NOTICE, "FlushRelationBuffers(%s (local), %u): block %u is referenced (%ld)",
RelationGetRelationName(rel), firstDelBlock,
bufHdr->tag.blockNum, LocalRefCount[i]);
- return(-2);
+ return (-2);
}
if (bufHdr->tag.blockNum >= firstDelBlock)
- {
bufHdr->tag.rnode.relNode = InvalidOid;
- }
}
}
return 0;
@@ -1559,10 +1561,10 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
SpinRelease(BufMgrLock);
status = smgrwrite(DEFAULT_SMGR, rel,
- bufHdr->tag.blockNum,
- (char *) MAKE_PTR(bufHdr->data));
+ bufHdr->tag.blockNum,
+ (char *) MAKE_PTR(bufHdr->data));
- if (status == SM_FAIL) /* disk failure ?! */
+ if (status == SM_FAIL) /* disk failure ?! */
elog(STOP, "FlushRelationBuffers: cannot write %u for %s",
bufHdr->tag.blockNum, bufHdr->blind.relname);
@@ -1573,9 +1575,10 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
TerminateBufferIO(bufHdr);
Assert(!(bufHdr->flags & BM_JUST_DIRTIED));
bufHdr->flags &= ~BM_DIRTY;
+
/*
- * Note that it's safe to change cntxDirty here because
- * of we protect it from upper writers by
+ * Note that it's safe to change cntxDirty here
+ * because of we protect it from upper writers by
* AccessExclusiveLock and from other bufmgr routines
* by BM_IO_IN_PROGRESS
*/
@@ -1593,9 +1596,7 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
return -2;
}
if (bufHdr->tag.blockNum >= firstDelBlock)
- {
BufTableDelete(bufHdr);
- }
}
}
SpinRelease(BufMgrLock);
@@ -1628,9 +1629,7 @@ ReleaseBuffer(Buffer buffer)
Assert(PrivateRefCount[buffer - 1] > 0);
if (PrivateRefCount[buffer - 1] > 1)
- {
PrivateRefCount[buffer - 1]--;
- }
else
{
SpinAcquire(BufMgrLock);
@@ -1671,9 +1670,7 @@ ReleaseBufferWithBufferLock(Buffer buffer)
Assert(PrivateRefCount[buffer - 1] > 0);
if (PrivateRefCount[buffer - 1] > 1)
- {
PrivateRefCount[buffer - 1]--;
- }
else
{
PrivateRefCount[buffer - 1] = 0;
@@ -2084,8 +2081,8 @@ LockBuffer(Buffer buffer, int mode)
*buflock |= BL_W_LOCK;
/*
- * This is not the best place to set cntxDirty flag (eg indices
- * do not always change buffer they lock in excl mode). But please
+ * This is not the best place to set cntxDirty flag (eg indices do
+ * not always change buffer they lock in excl mode). But please
* remember that it's critical to set cntxDirty *before* logging
* changes with XLogInsert() - see comments in BufferSync().
*/
@@ -2200,6 +2197,7 @@ InitBufferIO(void)
{
InProgressBuf = (BufferDesc *) 0;
}
+
#endif
/*
@@ -2245,7 +2243,7 @@ AbortBufferIO(void)
* NOTE: buffer must be excl locked.
*/
void
-MarkBufferForCleanup(Buffer buffer, void (*CleanupFunc)(Buffer))
+MarkBufferForCleanup(Buffer buffer, void (*CleanupFunc) (Buffer))
{
BufferDesc *bufHdr = &BufferDescriptors[buffer - 1];
@@ -2301,5 +2299,5 @@ BufferGetFileNode(Buffer buffer)
else
bufHdr = &BufferDescriptors[buffer - 1];
- return(bufHdr->tag.rnode);
+ return (bufHdr->tag.rnode);
}
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index a6d1b95f4fa..6e3cd756411 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/localbuf.c,v 1.39 2001/01/24 19:43:06 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/localbuf.c,v 1.40 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
/* a low tech search for now -- not optimized for scans */
for (i = 0; i < NLocBuffer; i++)
{
- if (LocalBufferDescriptors[i].tag.rnode.relNode ==
+ if (LocalBufferDescriptors[i].tag.rnode.relNode ==
reln->rd_node.relNode &&
LocalBufferDescriptors[i].tag.blockNum == blockNum)
{
@@ -125,8 +125,8 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
/*
* it's all ours now.
*
- * We need not in tblNode currently but will in future I think,
- * when we'll give up rel->rd_fd to fmgr cache.
+ * We need not in tblNode currently but will in future I think, when
+ * we'll give up rel->rd_fd to fmgr cache.
*/
bufHdr->tag.rnode = reln->rd_node;
bufHdr->tag.blockNum = blockNum;
@@ -142,12 +142,14 @@ LocalBufferAlloc(Relation reln, BlockNumber blockNum, bool *foundPtr)
if (data == NULL)
elog(FATAL, "Out of memory in LocalBufferAlloc");
+
/*
- * This is a bit of a hack: bufHdr->data needs to be a shmem offset
- * for consistency with the shared-buffer case, so make it one
- * even though it's not really a valid shmem offset.
+ * This is a bit of a hack: bufHdr->data needs to be a shmem
+ * offset for consistency with the shared-buffer case, so make it
+ * one even though it's not really a valid shmem offset.
*/
bufHdr->data = MAKE_OFFSET(data);
+
/*
* Set pointer for use by BufferGetBlock() macro.
*/
diff --git a/src/backend/storage/buffer/s_lock.c b/src/backend/storage/buffer/s_lock.c
index 6bb76c2cda8..647802a19ea 100644
--- a/src/backend/storage/buffer/s_lock.c
+++ b/src/backend/storage/buffer/s_lock.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/Attic/s_lock.c,v 1.34 2001/02/24 22:42:45 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/Attic/s_lock.c,v 1.35 2001/03/22 03:59:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,16 +43,16 @@
#define S_NSPINCYCLE 20
int s_spincycle[S_NSPINCYCLE] =
-{ 1, 10, 100, 1000,
- 10000, 1000, 1000, 1000,
- 10000, 1000, 1000, 10000,
- 1000, 1000, 10000, 1000,
- 10000, 1000, 10000, 30000
+{1, 10, 100, 1000,
+ 10000, 1000, 1000, 1000,
+ 10000, 1000, 1000, 10000,
+ 1000, 1000, 10000, 1000,
+ 10000, 1000, 10000, 30000
};
#define AVG_SPINCYCLE 5000 /* average entry in microsec: 100ms / 20 */
-#define DEFAULT_TIMEOUT (100*1000000) /* default timeout: 100 sec */
+#define DEFAULT_TIMEOUT (100*1000000) /* default timeout: 100 sec */
/*
@@ -74,10 +74,10 @@ s_lock_stuck(volatile slock_t *lock, const char *file, const int line)
/*
* s_lock_sleep() - sleep a pseudo-random amount of time, check for timeout
*
- * The 'timeout' is given in microsec, or may be 0 for "infinity". Note that
+ * The 'timeout' is given in microsec, or may be 0 for "infinity". Note that
* this will be a lower bound (a fairly loose lower bound, on most platforms).
*
- * 'microsec' is the number of microsec to delay per loop. Normally
+ * 'microsec' is the number of microsec to delay per loop. Normally
* 'microsec' is 0, specifying to use the next s_spincycle[] value.
* Some callers may pass a nonzero interval, specifying to use exactly that
* delay value rather than a pseudo-random delay.
@@ -98,7 +98,7 @@ s_lock_sleep(unsigned spins, int timeout, int microsec,
{
delay.tv_sec = 0;
delay.tv_usec = s_spincycle[spins % S_NSPINCYCLE];
- microsec = AVG_SPINCYCLE; /* use average to figure timeout */
+ microsec = AVG_SPINCYCLE; /* use average to figure timeout */
}
if (timeout > 0)
@@ -125,10 +125,11 @@ s_lock(volatile slock_t *lock, const char *file, const int line)
* If you are thinking of changing this code, be careful. This same
* loop logic is used in other places that call TAS() directly.
*
- * While waiting for a lock, we check for cancel/die interrupts (which
- * is a no-op if we are inside a critical section). The interrupt check
- * can be omitted in places that know they are inside a critical section.
- * Note that an interrupt must NOT be accepted after acquiring the lock.
+ * While waiting for a lock, we check for cancel/die interrupts (which is
+ * a no-op if we are inside a critical section). The interrupt check
+ * can be omitted in places that know they are inside a critical
+ * section. Note that an interrupt must NOT be accepted after
+ * acquiring the lock.
*/
while (TAS(lock))
{
@@ -155,8 +156,8 @@ static void
tas_dummy() /* really means: extern int tas(slock_t
* **lock); */
{
- __asm__ __volatile__(
-"\
+ __asm__ __volatile__(
+ "\
.global _tas \n\
_tas: \n\
movel sp@(0x4),a0 \n\
@@ -180,8 +181,8 @@ _success: \n\
static void
tas_dummy()
{
- __asm__ __volatile__(
-"\
+ __asm__ __volatile__(
+ "\
.globl tas \n\
.globl _tas \n\
_tas: \n\
@@ -200,15 +201,15 @@ success: \n\
");
}
-#endif /* __APPLE__ && __ppc__ */
+#endif /* __APPLE__ && __ppc__ */
#if defined(__powerpc__)
/* Note: need a nice gcc constrained asm version so it can be inlined */
static void
tas_dummy()
{
- __asm__ __volatile__(
-"\
+ __asm__ __volatile__(
+ "\
.global tas \n\
tas: \n\
lwarx 5,0,3 \n\
@@ -231,8 +232,8 @@ success: \n\
static void
tas_dummy()
{
- __asm__ _volatile__(
-"\
+ __asm__ _volatile__(
+ "\
.global tas \n\
tas: \n\
.frame $sp, 0, $31 \n\
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 7944ee8e3af..2ce6d31c38a 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/file/buffile.c,v 1.9 2001/01/24 19:43:06 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/file/buffile.c,v 1.10 2001/03/22 03:59:45 momjian Exp $
*
* NOTES:
*
@@ -163,6 +163,7 @@ BufFileCreate(File file)
{
return makeBufFile(file);
}
+
#endif
/*
@@ -574,5 +575,5 @@ BufFileTellBlock(BufFile *file)
blknum += file->curFile * RELSEG_SIZE;
return blknum;
}
-#endif
+#endif
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index c6a72b8f25d..137dd769e3f 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.73 2001/02/18 04:39:42 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.74 2001/03/22 03:59:45 momjian Exp $
*
* NOTES:
*
@@ -243,7 +243,7 @@ pg_fdatasync(int fd)
int
BasicOpenFile(FileName fileName, int fileFlags, int fileMode)
{
- int fd;
+ int fd;
tryAgain:
fd = open(fileName, fileFlags, fileMode);
@@ -253,7 +253,7 @@ tryAgain:
if (errno == EMFILE || errno == ENFILE)
{
- int save_errno = errno;
+ int save_errno = errno;
DO_DB(elog(DEBUG, "BasicOpenFile: not enough descs, retry, er= %d",
errno));
@@ -414,7 +414,7 @@ LruInsert(File file)
{
while (nfile + numAllocatedFiles >= pg_nofile())
{
- if (! ReleaseLruFile())
+ if (!ReleaseLruFile())
break;
}
@@ -460,6 +460,7 @@ ReleaseLruFile(void)
if (nfile > 0)
{
+
/*
* There are opened files and so there should be at least one used
* vfd in the ring.
@@ -660,7 +661,7 @@ fileNameOpenFile(FileName fileName,
while (nfile + numAllocatedFiles >= pg_nofile())
{
- if (! ReleaseLruFile())
+ if (!ReleaseLruFile())
break;
}
@@ -683,9 +684,10 @@ fileNameOpenFile(FileName fileName,
vfdP->fileFlags = fileFlags & ~(O_TRUNC | O_EXCL);
vfdP->fileMode = fileMode;
vfdP->seekPos = 0;
+
/*
- * Have to fsync file on commit. Alternative way - log
- * file creation and fsync log before actual file creation.
+ * Have to fsync file on commit. Alternative way - log file creation
+ * and fsync log before actual file creation.
*/
if (fileFlags & O_CREAT)
vfdP->fdstate = FD_DIRTY;
@@ -1083,7 +1085,7 @@ TryAgain:
if (errno == EMFILE || errno == ENFILE)
{
- int save_errno = errno;
+ int save_errno = errno;
DO_DB(elog(DEBUG, "AllocateFile: not enough descs, retry, er= %d",
errno));
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index eb8d488bdd3..375376abf83 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.63 2001/03/13 01:17:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.64 2001/03/22 03:59:45 momjian Exp $
*
* NOTES
*
@@ -71,7 +71,7 @@ static IpcSemaphoreId InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey,
int semStartValue, bool removeOnExit);
static void CallbackSemaphoreKill(int status, Datum semId);
static void *InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size,
- int permission);
+ int permission);
static void IpcMemoryDetach(int status, Datum shmaddr);
static void IpcMemoryDelete(int status, Datum shmId);
static void *PrivateMemoryCreate(uint32 size);
@@ -101,6 +101,7 @@ static struct ONEXIT
void (*function) ();
Datum arg;
} on_proc_exit_list[MAX_ON_EXITS],
+
on_shmem_exit_list[MAX_ON_EXITS];
static int on_proc_exit_index,
@@ -127,9 +128,9 @@ proc_exit(int code)
proc_exit_inprogress = true;
/*
- * Forget any pending cancel or die requests; we're doing our best
- * to close up shop already. Note that the signal handlers will not
- * set these flags again, now that proc_exit_inprogress is set.
+ * Forget any pending cancel or die requests; we're doing our best to
+ * close up shop already. Note that the signal handlers will not set
+ * these flags again, now that proc_exit_inprogress is set.
*/
InterruptPending = false;
ProcDiePending = false;
@@ -198,7 +199,7 @@ shmem_exit(int code)
* ----------------------------------------------------------------
*/
void
-on_proc_exit(void (*function) (), Datum arg)
+ on_proc_exit(void (*function) (), Datum arg)
{
if (on_proc_exit_index >= MAX_ON_EXITS)
elog(FATAL, "Out of on_proc_exit slots");
@@ -217,7 +218,7 @@ on_proc_exit(void (*function) (), Datum arg)
* ----------------------------------------------------------------
*/
void
-on_shmem_exit(void (*function) (), Datum arg)
+ on_shmem_exit(void (*function) (), Datum arg)
{
if (on_shmem_exit_index >= MAX_ON_EXITS)
elog(FATAL, "Out of on_shmem_exit slots");
@@ -282,11 +283,13 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey,
if (semId < 0)
{
+
/*
* Fail quietly if error indicates a collision with existing set.
- * One would expect EEXIST, given that we said IPC_EXCL, but perhaps
- * we could get a permission violation instead? Also, EIDRM might
- * occur if an old set is slated for destruction but not gone yet.
+ * One would expect EEXIST, given that we said IPC_EXCL, but
+ * perhaps we could get a permission violation instead? Also,
+ * EIDRM might occur if an old set is slated for destruction but
+ * not gone yet.
*/
if (errno == EEXIST || errno == EACCES
#ifdef EIDRM
@@ -294,11 +297,12 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey,
#endif
)
return -1;
+
/*
* Else complain and abort
*/
fprintf(stderr, "IpcSemaphoreCreate: semget(key=%d, num=%d, 0%o) failed: %s\n",
- (int) semKey, numSems, (IPC_CREAT|IPC_EXCL|permission),
+ (int) semKey, numSems, (IPC_CREAT | IPC_EXCL | permission),
strerror(errno));
if (errno == ENOSPC)
@@ -325,7 +329,7 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey,
if (errno == ERANGE)
fprintf(stderr,
"You possibly need to raise your kernel's SEMVMX value to be at least\n"
- "%d. Look into the PostgreSQL documentation for details.\n",
+ "%d. Look into the PostgreSQL documentation for details.\n",
semStartValue);
IpcSemaphoreKill(semId);
@@ -348,12 +352,14 @@ IpcSemaphoreKill(IpcSemaphoreId semId)
{
union semun semun;
- semun.val = 0; /* unused, but keep compiler quiet */
+ semun.val = 0; /* unused, but keep compiler quiet */
if (semctl(semId, 0, IPC_RMID, semun) < 0)
fprintf(stderr, "IpcSemaphoreKill: semctl(%d, 0, IPC_RMID, ...) failed: %s\n",
semId, strerror(errno));
- /* We used to report a failure via elog(NOTICE), but that's pretty
+
+ /*
+ * We used to report a failure via elog(NOTICE), but that's pretty
* pointless considering any client has long since disconnected ...
*/
}
@@ -393,13 +399,13 @@ IpcSemaphoreLock(IpcSemaphoreId semId, int sem, bool interruptOK)
* section already).
*
* Once we acquire the lock, we do NOT check for an interrupt before
- * returning. The caller needs to be able to record ownership of
+ * returning. The caller needs to be able to record ownership of
* the lock before any interrupt can be accepted.
*
* There is a window of a few instructions between CHECK_FOR_INTERRUPTS
- * and entering the semop() call. If a cancel/die interrupt occurs in
+ * and entering the semop() call. If a cancel/die interrupt occurs in
* that window, we would fail to notice it until after we acquire the
- * lock (or get another interrupt to escape the semop()). We can avoid
+ * lock (or get another interrupt to escape the semop()). We can avoid
* this problem by temporarily setting ImmediateInterruptOK = true
* before we do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this
* interval will execute directly. However, there is a huge pitfall:
@@ -426,7 +432,7 @@ IpcSemaphoreLock(IpcSemaphoreId semId, int sem, bool interruptOK)
if (errStatus == -1)
{
- fprintf(stderr, "IpcSemaphoreLock: semop(id=%d) failed: %s\n",
+ fprintf(stderr, "IpcSemaphoreLock: semop(id=%d) failed: %s\n",
semId, strerror(errno));
proc_exit(255);
}
@@ -503,7 +509,7 @@ IpcSemaphoreTryLock(IpcSemaphoreId semId, int sem)
return false; /* failed to lock it */
#endif
/* Otherwise we got trouble */
- fprintf(stderr, "IpcSemaphoreTryLock: semop(id=%d) failed: %s\n",
+ fprintf(stderr, "IpcSemaphoreTryLock: semop(id=%d) failed: %s\n",
semId, strerror(errno));
proc_exit(255);
}
@@ -516,7 +522,8 @@ int
IpcSemaphoreGetValue(IpcSemaphoreId semId, int sem)
{
union semun dummy; /* for Solaris */
- dummy.val = 0; /* unused */
+
+ dummy.val = 0; /* unused */
return semctl(semId, sem, GETVAL, dummy);
}
@@ -526,7 +533,8 @@ static pid_t
IpcSemaphoreGetLastPID(IpcSemaphoreId semId, int sem)
{
union semun dummy; /* for Solaris */
- dummy.val = 0; /* unused */
+
+ dummy.val = 0; /* unused */
return semctl(semId, sem, GETPID, dummy);
}
@@ -563,11 +571,13 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size, int permission)
if (shmid < 0)
{
+
/*
- * Fail quietly if error indicates a collision with existing segment.
- * One would expect EEXIST, given that we said IPC_EXCL, but perhaps
- * we could get a permission violation instead? Also, EIDRM might
- * occur if an old seg is slated for destruction but not gone yet.
+ * Fail quietly if error indicates a collision with existing
+ * segment. One would expect EEXIST, given that we said IPC_EXCL,
+ * but perhaps we could get a permission violation instead? Also,
+ * EIDRM might occur if an old seg is slated for destruction but
+ * not gone yet.
*/
if (errno == EEXIST || errno == EACCES
#ifdef EIDRM
@@ -575,6 +585,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size, int permission)
#endif
)
return NULL;
+
/*
* Else complain and abort
*/
@@ -584,7 +595,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size, int permission)
if (errno == EINVAL)
fprintf(stderr,
- "\nThis error can be caused by one of three things:\n\n"
+ "\nThis error can be caused by one of three things:\n\n"
"1. The maximum size for shared memory segments on your system was\n"
" exceeded. You need to raise the SHMMAX parameter in your kernel\n"
" to be at least %u bytes.\n\n"
@@ -618,7 +629,7 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size, int permission)
if (memAddress == (void *) -1)
{
- fprintf(stderr, "IpcMemoryCreate: shmat(id=%d) failed: %s\n",
+ fprintf(stderr, "IpcMemoryCreate: shmat(id=%d) failed: %s\n",
shmid, strerror(errno));
proc_exit(1);
}
@@ -643,7 +654,9 @@ IpcMemoryDetach(int status, Datum shmaddr)
if (shmdt(DatumGetPointer(shmaddr)) < 0)
fprintf(stderr, "IpcMemoryDetach: shmdt(%p) failed: %s\n",
DatumGetPointer(shmaddr), strerror(errno));
- /* We used to report a failure via elog(NOTICE), but that's pretty
+
+ /*
+ * We used to report a failure via elog(NOTICE), but that's pretty
* pointless considering any client has long since disconnected ...
*/
}
@@ -658,7 +671,9 @@ IpcMemoryDelete(int status, Datum shmId)
if (shmctl(DatumGetInt32(shmId), IPC_RMID, (struct shmid_ds *) NULL) < 0)
fprintf(stderr, "IpcMemoryDelete: shmctl(%d, %d, 0) failed: %s\n",
DatumGetInt32(shmId), IPC_RMID, strerror(errno));
- /* We used to report a failure via elog(NOTICE), but that's pretty
+
+ /*
+ * We used to report a failure via elog(NOTICE), but that's pretty
* pointless considering any client has long since disconnected ...
*/
}
@@ -669,22 +684,23 @@ IpcMemoryDelete(int status, Datum shmId)
bool
SharedMemoryIsInUse(IpcMemoryKey shmKey, IpcMemoryId shmId)
{
- struct shmid_ds shmStat;
+ struct shmid_ds shmStat;
/*
- * We detect whether a shared memory segment is in use by seeing whether
- * it (a) exists and (b) has any processes are attached to it.
+ * We detect whether a shared memory segment is in use by seeing
+ * whether it (a) exists and (b) has any processes are attached to it.
*
* If we are unable to perform the stat operation for a reason other than
- * nonexistence of the segment (most likely, because it doesn't belong to
- * our userid), assume it is in use.
+ * nonexistence of the segment (most likely, because it doesn't belong
+ * to our userid), assume it is in use.
*/
if (shmctl(shmId, IPC_STAT, &shmStat) < 0)
{
+
/*
* EINVAL actually has multiple possible causes documented in the
- * shmctl man page, but we assume it must mean the segment no longer
- * exists.
+ * shmctl man page, but we assume it must mean the segment no
+ * longer exists.
*/
if (errno == EINVAL)
return false;
@@ -718,7 +734,7 @@ PrivateMemoryCreate(uint32 size)
fprintf(stderr, "PrivateMemoryCreate: malloc(%u) failed\n", size);
proc_exit(1);
}
- MemSet(memAddress, 0, size); /* keep Purify quiet */
+ MemSet(memAddress, 0, size);/* keep Purify quiet */
/* Register on-exit routine to release storage */
on_shmem_exit(PrivateMemoryDelete, PointerGetDatum(memAddress));
@@ -763,14 +779,14 @@ IpcInitKeyAssignment(int port)
PGShmemHeader *
IpcMemoryCreate(uint32 size, bool makePrivate, int permission)
{
- void *memAddress;
+ void *memAddress;
PGShmemHeader *hdr;
/* Room for a header? */
Assert(size > MAXALIGN(sizeof(PGShmemHeader)));
/* Loop till we find a free IPC key */
- for (NextShmemSegID++ ; ; NextShmemSegID++)
+ for (NextShmemSegID++;; NextShmemSegID++)
{
IpcMemoryId shmid;
@@ -799,6 +815,7 @@ IpcMemoryCreate(uint32 size, bool makePrivate, int permission)
shmdt(memAddress);
continue; /* segment belongs to a non-Postgres app */
}
+
/*
* If the creator PID is my own PID or does not belong to any
* extant process, it's safe to zap it.
@@ -812,28 +829,32 @@ IpcMemoryCreate(uint32 size, bool makePrivate, int permission)
continue; /* segment belongs to a live process */
}
}
+
/*
- * The segment appears to be from a dead Postgres process, or
- * from a previous cycle of life in this same process. Zap it,
- * if possible. This probably shouldn't fail, but if it does,
- * assume the segment belongs to someone else after all,
- * and continue quietly.
+ * The segment appears to be from a dead Postgres process, or from
+ * a previous cycle of life in this same process. Zap it, if
+ * possible. This probably shouldn't fail, but if it does, assume
+ * the segment belongs to someone else after all, and continue
+ * quietly.
*/
shmdt(memAddress);
if (shmctl(shmid, IPC_RMID, (struct shmid_ds *) NULL) < 0)
continue;
+
/*
* Now try again to create the segment.
*/
memAddress = InternalIpcMemoryCreate(NextShmemSegID, size, permission);
if (memAddress)
break; /* successful create and attach */
+
/*
* Can only get here if some other process managed to create the
- * same shmem key before we did. Let him have that one,
- * loop around to try next key.
+ * same shmem key before we did. Let him have that one, loop
+ * around to try next key.
*/
}
+
/*
* OK, we created a new segment. Mark it as created by this process.
* The order of assignments here is critical so that another Postgres
@@ -843,6 +864,7 @@ IpcMemoryCreate(uint32 size, bool makePrivate, int permission)
hdr = (PGShmemHeader *) memAddress;
hdr->creatorPID = getpid();
hdr->magic = PGShmemMagic;
+
/*
* Initialize space allocation status for segment.
*/
@@ -862,27 +884,28 @@ IpcSemaphoreId
IpcSemaphoreCreate(int numSems, int permission,
int semStartValue, bool removeOnExit)
{
- IpcSemaphoreId semId;
+ IpcSemaphoreId semId;
union semun semun;
/* Loop till we find a free IPC key */
- for (NextSemaID++ ; ; NextSemaID++)
+ for (NextSemaID++;; NextSemaID++)
{
- pid_t creatorPID;
+ pid_t creatorPID;
/* Try to create new semaphore set */
- semId = InternalIpcSemaphoreCreate(NextSemaID, numSems+1,
+ semId = InternalIpcSemaphoreCreate(NextSemaID, numSems + 1,
permission, semStartValue,
removeOnExit);
if (semId >= 0)
break; /* successful create */
/* See if it looks to be leftover from a dead Postgres process */
- semId = semget(NextSemaID, numSems+1, 0);
+ semId = semget(NextSemaID, numSems + 1, 0);
if (semId < 0)
continue; /* failed: must be some other app's */
if (IpcSemaphoreGetValue(semId, numSems) != PGSemaMagic)
continue; /* sema belongs to a non-Postgres app */
+
/*
* If the creator PID is my own PID or does not belong to any
* extant process, it's safe to zap it.
@@ -896,46 +919,50 @@ IpcSemaphoreCreate(int numSems, int permission,
errno != ESRCH)
continue; /* sema belongs to a live process */
}
+
/*
* The sema set appears to be from a dead Postgres process, or
- * from a previous cycle of life in this same process. Zap it,
- * if possible. This probably shouldn't fail, but if it does,
- * assume the sema set belongs to someone else after all,
- * and continue quietly.
+ * from a previous cycle of life in this same process. Zap it, if
+ * possible. This probably shouldn't fail, but if it does, assume
+ * the sema set belongs to someone else after all, and continue
+ * quietly.
*/
semun.val = 0; /* unused, but keep compiler quiet */
if (semctl(semId, 0, IPC_RMID, semun) < 0)
continue;
+
/*
* Now try again to create the sema set.
*/
- semId = InternalIpcSemaphoreCreate(NextSemaID, numSems+1,
+ semId = InternalIpcSemaphoreCreate(NextSemaID, numSems + 1,
permission, semStartValue,
removeOnExit);
if (semId >= 0)
break; /* successful create */
+
/*
* Can only get here if some other process managed to create the
- * same sema key before we did. Let him have that one,
- * loop around to try next key.
+ * same sema key before we did. Let him have that one, loop
+ * around to try next key.
*/
}
+
/*
* OK, we created a new sema set. Mark it as created by this process.
* We do this by setting the spare semaphore to PGSemaMagic-1 and then
- * incrementing it with semop(). That leaves it with value PGSemaMagic
- * and sempid referencing this process.
+ * incrementing it with semop(). That leaves it with value
+ * PGSemaMagic and sempid referencing this process.
*/
- semun.val = PGSemaMagic-1;
+ semun.val = PGSemaMagic - 1;
if (semctl(semId, numSems, SETVAL, semun) < 0)
{
fprintf(stderr, "IpcSemaphoreCreate: semctl(id=%d, %d, SETVAL, %d) failed: %s\n",
- semId, numSems, PGSemaMagic-1, strerror(errno));
+ semId, numSems, PGSemaMagic - 1, strerror(errno));
if (errno == ERANGE)
fprintf(stderr,
"You possibly need to raise your kernel's SEMVMX value to be at least\n"
- "%d. Look into the PostgreSQL documentation for details.\n",
+ "%d. Look into the PostgreSQL documentation for details.\n",
PGSemaMagic);
proc_exit(1);
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 471e4298c4d..ed42e51a925 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.39 2001/01/24 19:43:07 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.40 2001/03/22 03:59:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
* for such a backend, the shared memory is already ready-to-go.
*
* If "makePrivate" is true then we only need private memory, not shared
- * memory. This is true for a standalone backend, false for a postmaster.
+ * memory. This is true for a standalone backend, false for a postmaster.
*/
void
CreateSharedMemoryAndSemaphores(bool makePrivate, int maxBackends)
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index cc41a36888c..caf94bda46c 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -8,14 +8,14 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.56 2001/01/24 19:43:07 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.57 2001/03/22 03:59:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* POSTGRES processes share one or more regions of shared memory.
* The shared memory is created by a postmaster and is inherited
- * by each backend via fork(). The routines in this file are used for
+ * by each backend via fork(). The routines in this file are used for
* allocating and binding to shared memory data structures.
*
* NOTES:
@@ -65,7 +65,7 @@
/* shared memory global variables */
-static PGShmemHeader *ShmemSegHdr; /* shared mem segment header */
+static PGShmemHeader *ShmemSegHdr; /* shared mem segment header */
SHMEM_OFFSET ShmemBase; /* start address of shared memory */
@@ -75,9 +75,9 @@ SPINLOCK ShmemLock; /* lock for shared memory allocation */
SPINLOCK ShmemIndexLock; /* lock for shmem index access */
-static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
+static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
-static bool ShmemBootstrap = false; /* bootstrapping shmem index? */
+static bool ShmemBootstrap = false; /* bootstrapping shmem index? */
/*
@@ -99,9 +99,9 @@ InitShmemAllocation(PGShmemHeader *seghdr)
/*
* Since ShmemInitHash calls ShmemInitStruct, which expects the
- * ShmemIndex hashtable to exist already, we have a bit of a circularity
- * problem in initializing the ShmemIndex itself. We set ShmemBootstrap
- * to tell ShmemInitStruct to fake it.
+ * ShmemIndex hashtable to exist already, we have a bit of a
+ * circularity problem in initializing the ShmemIndex itself. We set
+ * ShmemBootstrap to tell ShmemInitStruct to fake it.
*/
ShmemIndex = (HTAB *) NULL;
ShmemBootstrap = true;
@@ -373,6 +373,7 @@ ShmemInitStruct(char *name, Size size, bool *foundPtr)
if (!ShmemIndex)
{
+
/*
* If the shmem index doesn't exist, we are bootstrapping: we must
* be trying to init the shmem index itself.
diff --git a/src/backend/storage/ipc/shmqueue.c b/src/backend/storage/ipc/shmqueue.c
index 7fa6ac84b92..b840596a6ff 100644
--- a/src/backend/storage/ipc/shmqueue.c
+++ b/src/backend/storage/ipc/shmqueue.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmqueue.c,v 1.15 2001/01/24 19:43:07 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmqueue.c,v 1.16 2001/03/22 03:59:45 momjian Exp $
*
* NOTES
*
@@ -152,7 +152,8 @@ SHMQueueInsertAfter(SHM_QUEUE *queue, SHM_QUEUE *elem)
dumpQ(queue, "in SHMQueueInsertAfter: end");
#endif
}
-#endif /* NOT_USED */
+
+#endif /* NOT_USED */
/*--------------------
* SHMQueueNext -- Get the next element from a queue
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index fb37e428cb7..526923593f9 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.27 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.28 2001/03/22 03:59:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -411,6 +411,7 @@ GetUndoRecPtr(void)
if (pOffset != INVALID_OFFSET)
{
PROC *proc = (PROC *) MAKE_PTR(pOffset);
+
tempr = proc->logRec;
if (tempr.xrecoff == 0)
continue;
@@ -422,5 +423,5 @@ GetUndoRecPtr(void)
SpinRelease(SInvalLock);
- return(urec);
+ return (urec);
}
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index 959b70de5f1..06ba354d94a 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.37 2001/01/24 19:43:07 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.38 2001/03/22 03:59:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,6 +35,7 @@ static void SISetProcStateInvalid(SISeg *segP);
int
SInvalShmemSize(int maxBackends)
{
+
/*
* Figure space needed. Note sizeof(SISeg) includes the first
* ProcState entry.
@@ -91,7 +92,7 @@ SIBackendInit(SISeg *segP)
/* Look for a free entry in the procState array */
for (index = 0; index < segP->lastBackend; index++)
{
- if (segP->procState[index].nextMsgNum < 0) /* inactive slot? */
+ if (segP->procState[index].nextMsgNum < 0) /* inactive slot? */
{
stateP = &segP->procState[index];
break;
@@ -108,9 +109,10 @@ SIBackendInit(SISeg *segP)
}
else
{
+
/*
- * elog() with spinlock held is probably not too cool, but this
- * condition should never happen anyway.
+ * elog() with spinlock held is probably not too cool, but
+ * this condition should never happen anyway.
*/
elog(NOTICE, "SIBackendInit: no free procState slot available");
MyBackendId = InvalidBackendId;
diff --git a/src/backend/storage/ipc/spin.c b/src/backend/storage/ipc/spin.c
index 479e0b27662..33308f0cc1f 100644
--- a/src/backend/storage/ipc/spin.c
+++ b/src/backend/storage/ipc/spin.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/Attic/spin.c,v 1.31 2001/01/24 19:43:07 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/Attic/spin.c,v 1.32 2001/03/22 03:59:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -83,17 +83,18 @@ typedef struct slock
} SLock;
#ifdef LOCK_DEBUG
-bool Trace_spinlocks = false;
+bool Trace_spinlocks = false;
inline static void
-PRINT_SLDEBUG(const char * where, SPINLOCK lockid, const SLock * lock)
+PRINT_SLDEBUG(const char *where, SPINLOCK lockid, const SLock *lock)
{
- if (Trace_spinlocks)
- elog(DEBUG, "%s: id=%d", where, lockid);
+ if (Trace_spinlocks)
+ elog(DEBUG, "%s: id=%d", where, lockid);
}
-#else /* not LOCK_DEBUG */
+
+#else /* not LOCK_DEBUG */
#define PRINT_SLDEBUG(a,b,c)
-#endif /* not LOCK_DEBUG */
+#endif /* not LOCK_DEBUG */
static SLock *SLockArray = NULL;
@@ -146,15 +147,17 @@ SpinAcquire(SPINLOCK lockid)
SLock *slckP = &(SLockArray[lockid]);
PRINT_SLDEBUG("SpinAcquire", lockid, slckP);
+
/*
- * Acquire the lock, then record that we have done so (for recovery
- * in case of elog(ERROR) while holding the lock). Note we assume
- * here that S_LOCK will not accept cancel/die interrupts once it has
+ * Acquire the lock, then record that we have done so (for recovery in
+ * case of elog(ERROR) while holding the lock). Note we assume here
+ * that S_LOCK will not accept cancel/die interrupts once it has
* acquired the lock. However, interrupts should be accepted while
* waiting, if InterruptHoldoffCount is zero.
*/
S_LOCK(&(slckP->shlock));
PROC_INCR_SLOCK(lockid);
+
/*
* Lock out cancel/die interrupts until we exit the code section
* protected by the spinlock. This ensures that interrupts will not
@@ -162,7 +165,7 @@ SpinAcquire(SPINLOCK lockid)
*/
HOLD_INTERRUPTS();
- PRINT_SLDEBUG("SpinAcquire/done", lockid, slckP);
+ PRINT_SLDEBUG("SpinAcquire/done", lockid, slckP);
}
void
@@ -170,26 +173,29 @@ SpinRelease(SPINLOCK lockid)
{
SLock *slckP = &(SLockArray[lockid]);
- PRINT_SLDEBUG("SpinRelease", lockid, slckP);
+ PRINT_SLDEBUG("SpinRelease", lockid, slckP);
+
/*
* Check that we are actually holding the lock we are releasing. This
* can be done only after MyProc has been initialized.
*/
- Assert(!MyProc || MyProc->sLocks[lockid] > 0);
+ Assert(!MyProc || MyProc->sLocks[lockid] > 0);
+
/*
* Record that we no longer hold the spinlock, and release it.
*/
PROC_DECR_SLOCK(lockid);
S_UNLOCK(&(slckP->shlock));
+
/*
* Exit the interrupt holdoff entered in SpinAcquire().
*/
RESUME_INTERRUPTS();
- PRINT_SLDEBUG("SpinRelease/done", lockid, slckP);
+ PRINT_SLDEBUG("SpinRelease/done", lockid, slckP);
}
-#else /* !HAS_TEST_AND_SET */
+#else /* !HAS_TEST_AND_SET */
/*
* No TAS, so spinlocks are implemented using SysV semaphores.
@@ -217,9 +223,9 @@ SpinRelease(SPINLOCK lockid)
static IpcSemaphoreId *SpinLockIds = NULL;
-static int numSpinSets = 0; /* number of sema sets used */
-static int numSpinLocks = 0; /* total number of semas allocated */
-static int nextSpinLock = 0; /* next free spinlock index */
+static int numSpinSets = 0; /* number of sema sets used */
+static int numSpinLocks = 0; /* total number of semas allocated */
+static int nextSpinLock = 0; /* next free spinlock index */
static void SpinFreeAllSemaphores(void);
@@ -238,17 +244,18 @@ SLockShmemSize(void)
void
CreateSpinlocks(PGShmemHeader *seghdr)
{
- int i;
+ int i;
if (SpinLockIds == NULL)
{
+
/*
- * Compute number of spinlocks needed. If this logic gets any more
- * complicated, it should be distributed into the affected modules,
- * similar to the way shmem space estimation is handled.
+ * Compute number of spinlocks needed. If this logic gets any
+ * more complicated, it should be distributed into the affected
+ * modules, similar to the way shmem space estimation is handled.
*
- * For now, though, we just need the fixed spinlocks (MAX_SPINS),
- * two spinlocks per shared disk buffer, and four spinlocks for XLOG.
+ * For now, though, we just need the fixed spinlocks (MAX_SPINS), two
+ * spinlocks per shared disk buffer, and four spinlocks for XLOG.
*/
numSpinLocks = (int) MAX_SPINS + 2 * NBuffers + 4;
@@ -265,11 +272,11 @@ CreateSpinlocks(PGShmemHeader *seghdr)
SpinLockIds[i] = -1;
/*
- * Arrange to delete semas on exit --- set this up now so that we
- * will clean up if allocation fails. We use our own freeproc,
- * rather than IpcSemaphoreCreate's removeOnExit option, because
- * we don't want to fill up the on_shmem_exit list with a separate
- * entry for each semaphore set.
+ * Arrange to delete semas on exit --- set this up now so that we will
+ * clean up if allocation fails. We use our own freeproc, rather than
+ * IpcSemaphoreCreate's removeOnExit option, because we don't want to
+ * fill up the on_shmem_exit list with a separate entry for each
+ * semaphore set.
*/
on_shmem_exit(SpinFreeAllSemaphores, 0);
@@ -320,12 +327,13 @@ SpinFreeAllSemaphores(void)
void
SpinAcquire(SPINLOCK lock)
{
+
/*
* See the TAS() version of this routine for primary commentary.
*
* NOTE we must pass interruptOK = false to IpcSemaphoreLock, to ensure
- * that a cancel/die interrupt cannot prevent us from recording ownership
- * of a lock we have just acquired.
+ * that a cancel/die interrupt cannot prevent us from recording
+ * ownership of a lock we have just acquired.
*/
IpcSemaphoreLock(SpinLockIds[0], lock, false);
PROC_INCR_SLOCK(lock);
@@ -348,7 +356,7 @@ SpinRelease(SPINLOCK lock)
semval = IpcSemaphoreGetValue(SpinLockIds[0], lock);
Assert(semval < 1);
#endif
- Assert(!MyProc || MyProc->sLocks[lockid] > 0);
+ Assert(!MyProc || MyProc->sLocks[lockid] > 0);
PROC_DECR_SLOCK(lock);
IpcSemaphoreUnlock(SpinLockIds[0], lock);
RESUME_INTERRUPTS();
@@ -384,7 +392,7 @@ int
tas_sema(volatile slock_t *lock)
{
/* Note that TAS macros return 0 if *success* */
- return ! IpcSemaphoreTryLock(lock->semId, lock->sem);
+ return !IpcSemaphoreTryLock(lock->semId, lock->sem);
}
-#endif /* !HAS_TEST_AND_SET */
+#endif /* !HAS_TEST_AND_SET */
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index bcf65f6004b..dba45d6590d 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/large_object/inv_api.c,v 1.85 2001/02/10 02:31:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/large_object/inv_api.c,v 1.86 2001/03/22 03:59:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,7 +44,7 @@
static int32
getbytealen(bytea *data)
{
- Assert(! VARATT_IS_EXTENDED(data));
+ Assert(!VARATT_IS_EXTENDED(data));
if (VARSIZE(data) < VARHDRSZ)
elog(ERROR, "getbytealen: VARSIZE(data) < VARHDRSZ. This is internal error.");
return (VARSIZE(data) - VARHDRSZ);
@@ -75,7 +75,8 @@ inv_create(int flags)
elog(ERROR, "inv_create: large object %u already exists. This is internal error.", file_oid);
/*
- * Create the LO by writing an empty first page for it in pg_largeobject
+ * Create the LO by writing an empty first page for it in
+ * pg_largeobject
*/
(void) LargeObjectCreate(file_oid);
@@ -93,13 +94,17 @@ inv_create(int flags)
retval->id = file_oid;
retval->offset = 0;
- if (flags & INV_WRITE) {
+ if (flags & INV_WRITE)
+ {
retval->flags = IFS_WRLOCK | IFS_RDLOCK;
retval->heap_r = heap_openr(LargeObjectRelationName, RowExclusiveLock);
- } else if (flags & INV_READ) {
+ }
+ else if (flags & INV_READ)
+ {
retval->flags = IFS_RDLOCK;
retval->heap_r = heap_openr(LargeObjectRelationName, AccessShareLock);
- } else
+ }
+ else
elog(ERROR, "inv_create: invalid flags: %d", flags);
retval->index_r = index_openr(LargeObjectLOidPNIndex);
@@ -118,21 +123,25 @@ inv_open(Oid lobjId, int flags)
{
LargeObjectDesc *retval;
- if (! LargeObjectExists(lobjId))
+ if (!LargeObjectExists(lobjId))
elog(ERROR, "inv_open: large object %u not found", lobjId);
-
+
retval = (LargeObjectDesc *) palloc(sizeof(LargeObjectDesc));
retval->id = lobjId;
retval->offset = 0;
- if (flags & INV_WRITE) {
+ if (flags & INV_WRITE)
+ {
retval->flags = IFS_WRLOCK | IFS_RDLOCK;
retval->heap_r = heap_openr(LargeObjectRelationName, RowExclusiveLock);
- } else if (flags & INV_READ) {
+ }
+ else if (flags & INV_READ)
+ {
retval->flags = IFS_RDLOCK;
retval->heap_r = heap_openr(LargeObjectRelationName, AccessShareLock);
- } else
+ }
+ else
elog(ERROR, "inv_open: invalid flags: %d", flags);
retval->index_r = index_openr(LargeObjectLOidPNIndex);
@@ -185,16 +194,16 @@ inv_drop(Oid lobjId)
static uint32
inv_getsize(LargeObjectDesc *obj_desc)
{
- bool found = false;
- uint32 lastbyte = 0;
- ScanKeyData skey[1];
- IndexScanDesc sd;
- RetrieveIndexResult indexRes;
- HeapTupleData tuple;
- Buffer buffer;
- Form_pg_largeobject data;
- bytea *datafield;
- bool pfreeit;
+ bool found = false;
+ uint32 lastbyte = 0;
+ ScanKeyData skey[1];
+ IndexScanDesc sd;
+ RetrieveIndexResult indexRes;
+ HeapTupleData tuple;
+ Buffer buffer;
+ Form_pg_largeobject data;
+ bytea *datafield;
+ bool pfreeit;
Assert(PointerIsValid(obj_desc));
@@ -210,10 +219,10 @@ inv_getsize(LargeObjectDesc *obj_desc)
tuple.t_data = NULL;
/*
- * Because the pg_largeobject index is on both loid and pageno,
- * but we constrain only loid, a backwards scan should visit all
- * pages of the large object in reverse pageno order. So, it's
- * sufficient to examine the first valid tuple (== last valid page).
+ * Because the pg_largeobject index is on both loid and pageno, but we
+ * constrain only loid, a backwards scan should visit all pages of the
+ * large object in reverse pageno order. So, it's sufficient to
+ * examine the first valid tuple (== last valid page).
*/
while ((indexRes = index_getnext(sd, BackwardScanDirection)))
{
@@ -238,7 +247,7 @@ inv_getsize(LargeObjectDesc *obj_desc)
ReleaseBuffer(buffer);
break;
}
-
+
index_endscan(sd);
if (!found)
@@ -259,15 +268,15 @@ inv_seek(LargeObjectDesc *obj_desc, int offset, int whence)
obj_desc->offset = offset;
break;
case SEEK_CUR:
- if (offset < 0 && obj_desc->offset < ((uint32) (- offset)))
+ if (offset < 0 && obj_desc->offset < ((uint32) (-offset)))
elog(ERROR, "inv_seek: invalid offset: %d", offset);
obj_desc->offset += offset;
break;
case SEEK_END:
{
- uint32 size = inv_getsize(obj_desc);
+ uint32 size = inv_getsize(obj_desc);
- if (offset < 0 && size < ((uint32) (- offset)))
+ if (offset < 0 && size < ((uint32) (-offset)))
elog(ERROR, "inv_seek: invalid offset: %d", offset);
obj_desc->offset = size + offset;
}
@@ -289,20 +298,20 @@ inv_tell(LargeObjectDesc *obj_desc)
int
inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes)
{
- int nread = 0;
- int n;
- int off;
- int len;
- int32 pageno = (int32) (obj_desc->offset / LOBLKSIZE);
- uint32 pageoff;
- ScanKeyData skey[2];
- IndexScanDesc sd;
- RetrieveIndexResult indexRes;
- HeapTupleData tuple;
- Buffer buffer;
- Form_pg_largeobject data;
- bytea *datafield;
- bool pfreeit;
+ int nread = 0;
+ int n;
+ int off;
+ int len;
+ int32 pageno = (int32) (obj_desc->offset / LOBLKSIZE);
+ uint32 pageoff;
+ ScanKeyData skey[2];
+ IndexScanDesc sd;
+ RetrieveIndexResult indexRes;
+ HeapTupleData tuple;
+ Buffer buffer;
+ Form_pg_largeobject data;
+ bytea *datafield;
+ bool pfreeit;
Assert(PointerIsValid(obj_desc));
Assert(buf != NULL);
@@ -335,13 +344,13 @@ inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes)
if (tuple.t_data == NULL)
continue;
-
+
data = (Form_pg_largeobject) GETSTRUCT(&tuple);
/*
* We assume the indexscan will deliver pages in order. However,
- * there may be missing pages if the LO contains unwritten "holes".
- * We want missing sections to read out as zeroes.
+ * there may be missing pages if the LO contains unwritten
+ * "holes". We want missing sections to read out as zeroes.
*/
pageoff = ((uint32) data->pageno) * LOBLKSIZE;
if (pageoff > obj_desc->offset)
@@ -393,28 +402,28 @@ inv_read(LargeObjectDesc *obj_desc, char *buf, int nbytes)
int
inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
{
- int nwritten = 0;
- int n;
- int off;
- int len;
- int32 pageno = (int32) (obj_desc->offset / LOBLKSIZE);
- ScanKeyData skey[2];
- IndexScanDesc sd;
- RetrieveIndexResult indexRes;
- HeapTupleData oldtuple;
- Buffer buffer;
- Form_pg_largeobject olddata;
- bool neednextpage;
- bytea *datafield;
- bool pfreeit;
- char workbuf[LOBLKSIZE + VARHDRSZ];
- char *workb = VARATT_DATA(workbuf);
- HeapTuple newtup;
- Datum values[Natts_pg_largeobject];
- char nulls[Natts_pg_largeobject];
- char replace[Natts_pg_largeobject];
- bool write_indices;
- Relation idescs[Num_pg_largeobject_indices];
+ int nwritten = 0;
+ int n;
+ int off;
+ int len;
+ int32 pageno = (int32) (obj_desc->offset / LOBLKSIZE);
+ ScanKeyData skey[2];
+ IndexScanDesc sd;
+ RetrieveIndexResult indexRes;
+ HeapTupleData oldtuple;
+ Buffer buffer;
+ Form_pg_largeobject olddata;
+ bool neednextpage;
+ bytea *datafield;
+ bool pfreeit;
+ char workbuf[LOBLKSIZE + VARHDRSZ];
+ char *workb = VARATT_DATA(workbuf);
+ HeapTuple newtup;
+ Datum values[Natts_pg_largeobject];
+ char nulls[Natts_pg_largeobject];
+ char replace[Natts_pg_largeobject];
+ bool write_indices;
+ Relation idescs[Num_pg_largeobject_indices];
Assert(PointerIsValid(obj_desc));
Assert(buf != NULL);
@@ -422,7 +431,7 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
if (nbytes <= 0)
return 0;
- write_indices = ! IsIgnoringSystemIndexes();
+ write_indices = !IsIgnoringSystemIndexes();
if (write_indices)
CatalogOpenIndices(Num_pg_largeobject_indices,
Name_pg_largeobject_indices,
@@ -450,6 +459,7 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
while (nwritten < nbytes)
{
+
/*
* If possible, get next pre-existing page of the LO. We assume
* the indexscan will deliver these in order --- but there may be
@@ -471,12 +481,14 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
}
neednextpage = false;
}
+
/*
* If we have a pre-existing page, see if it is the page we want
* to write, or a later one.
*/
if (olddata != NULL && olddata->pageno == pageno)
{
+
/*
* Update an existing page with fresh data.
*
@@ -495,12 +507,14 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
memcpy(workb, VARDATA(datafield), len);
if (pfreeit)
pfree(datafield);
+
/*
* Fill any hole
*/
off = (int) (obj_desc->offset % LOBLKSIZE);
if (off > len)
MemSet(workb + len, 0, off - len);
+
/*
* Insert appropriate portion of new data
*/
@@ -513,6 +527,7 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
/* compute valid length of new page */
len = (len >= off) ? len : off;
VARATT_SIZEP(workbuf) = len + VARHDRSZ;
+
/*
* Form and insert updated tuple
*/
@@ -528,6 +543,7 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
CatalogIndexInsert(idescs, Num_pg_largeobject_indices,
obj_desc->heap_r, newtup);
heap_freetuple(newtup);
+
/*
* We're done with this old page.
*/
@@ -539,6 +555,7 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
}
else
{
+
/*
* Write a brand new page.
*
@@ -547,6 +564,7 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
off = (int) (obj_desc->offset % LOBLKSIZE);
if (off > 0)
MemSet(workb, 0, off);
+
/*
* Insert appropriate portion of new data
*/
@@ -558,6 +576,7 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
/* compute valid length of new page */
len = off + n;
VARATT_SIZEP(workbuf) = len + VARHDRSZ;
+
/*
* Form and insert updated tuple
*/
@@ -585,8 +604,8 @@ inv_write(LargeObjectDesc *obj_desc, char *buf, int nbytes)
CatalogCloseIndices(Num_pg_largeobject_indices, idescs);
/*
- * Advance command counter so that my tuple updates will be seen by later
- * large-object operations in this transaction.
+ * Advance command counter so that my tuple updates will be seen by
+ * later large-object operations in this transaction.
*/
CommandCounterIncrement();
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index 31db44e74b0..160fc64fb24 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.2 2001/01/25 03:45:50 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.3 2001/03/22 03:59:46 momjian Exp $
*
* Interface:
*
@@ -29,32 +29,36 @@
/* One edge in the waits-for graph */
-typedef struct {
- PROC *waiter; /* the waiting process */
- PROC *blocker; /* the process it is waiting for */
- int pred; /* workspace for TopoSort */
- int link; /* workspace for TopoSort */
+typedef struct
+{
+ PROC *waiter; /* the waiting process */
+ PROC *blocker; /* the process it is waiting for */
+ int pred; /* workspace for TopoSort */
+ int link; /* workspace for TopoSort */
} EDGE;
/* One potential reordering of a lock's wait queue */
-typedef struct {
- LOCK *lock; /* the lock whose wait queue is described */
- PROC **procs; /* array of PROC *'s in new wait order */
- int nProcs;
+typedef struct
+{
+ LOCK *lock; /* the lock whose wait queue is described */
+ PROC **procs; /* array of PROC *'s in new wait order */
+ int nProcs;
} WAIT_ORDER;
static bool DeadLockCheckRecurse(PROC *proc);
static bool TestConfiguration(PROC *startProc);
static bool FindLockCycle(PROC *checkProc,
- EDGE *softEdges, int *nSoftEdges);
+ EDGE *softEdges, int *nSoftEdges);
static bool FindLockCycleRecurse(PROC *checkProc,
- EDGE *softEdges, int *nSoftEdges);
+ EDGE *softEdges, int *nSoftEdges);
static bool ExpandConstraints(EDGE *constraints, int nConstraints);
static bool TopoSort(LOCK *lock, EDGE *constraints, int nConstraints,
- PROC **ordering);
+ PROC **ordering);
+
#ifdef DEBUG_DEADLOCK
static void PrintLockQueue(LOCK *lock, const char *info);
+
#endif
@@ -64,30 +68,34 @@ static void PrintLockQueue(LOCK *lock, const char *info);
/* Workspace for FindLockCycle */
static PROC **visitedProcs; /* Array of visited procs */
-static int nVisitedProcs;
+static int nVisitedProcs;
+
/* Workspace for TopoSort */
static PROC **topoProcs; /* Array of not-yet-output procs */
static int *beforeConstraints; /* Counts of remaining before-constraints */
static int *afterConstraints; /* List head for after-constraints */
+
/* Output area for ExpandConstraints */
static WAIT_ORDER *waitOrders; /* Array of proposed queue rearrangements */
-static int nWaitOrders;
+static int nWaitOrders;
static PROC **waitOrderProcs; /* Space for waitOrders queue contents */
+
/* Current list of constraints being considered */
static EDGE *curConstraints;
-static int nCurConstraints;
-static int maxCurConstraints;
+static int nCurConstraints;
+static int maxCurConstraints;
+
/* Storage space for results from FindLockCycle */
static EDGE *possibleConstraints;
-static int nPossibleConstraints;
-static int maxPossibleConstraints;
+static int nPossibleConstraints;
+static int maxPossibleConstraints;
/*
* InitDeadLockChecking -- initialize deadlock checker during backend startup
*
* This does per-backend initialization of the deadlock checker; primarily,
- * allocation of working memory for DeadLockCheck. We do this per-backend
+ * allocation of working memory for DeadLockCheck. We do this per-backend
* since there's no percentage in making the kernel do copy-on-write
* inheritance of workspace from the postmaster. We want to allocate the
* space at startup because the deadlock checker might be invoked when there's
@@ -96,7 +104,7 @@ static int maxPossibleConstraints;
void
InitDeadLockChecking(void)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
/* Make sure allocations are permanent */
oldcxt = MemoryContextSwitchTo(TopMemoryContext);
@@ -116,20 +124,21 @@ InitDeadLockChecking(void)
/*
* We need to consider rearranging at most MaxBackends/2 wait queues
- * (since it takes at least two waiters in a queue to create a soft edge),
- * and the expanded form of the wait queues can't involve more than
- * MaxBackends total waiters.
+ * (since it takes at least two waiters in a queue to create a soft
+ * edge), and the expanded form of the wait queues can't involve more
+ * than MaxBackends total waiters.
*/
- waitOrders = (WAIT_ORDER *) palloc((MaxBackends/2) * sizeof(WAIT_ORDER));
+ waitOrders = (WAIT_ORDER *) palloc((MaxBackends / 2) * sizeof(WAIT_ORDER));
waitOrderProcs = (PROC **) palloc(MaxBackends * sizeof(PROC *));
/*
* Allow at most MaxBackends distinct constraints in a configuration.
- * (Is this enough? In practice it seems it should be, but I don't quite
- * see how to prove it. If we run out, we might fail to find a workable
- * wait queue rearrangement even though one exists.) NOTE that this
- * number limits the maximum recursion depth of DeadLockCheckRecurse.
- * Making it really big might potentially allow a stack-overflow problem.
+ * (Is this enough? In practice it seems it should be, but I don't
+ * quite see how to prove it. If we run out, we might fail to find a
+ * workable wait queue rearrangement even though one exists.) NOTE
+ * that this number limits the maximum recursion depth of
+ * DeadLockCheckRecurse. Making it really big might potentially allow
+ * a stack-overflow problem.
*/
maxCurConstraints = MaxBackends;
curConstraints = (EDGE *) palloc(maxCurConstraints * sizeof(EDGE));
@@ -139,8 +148,8 @@ InitDeadLockChecking(void)
* re-run TestConfiguration. (This is probably more than enough, but
* we can survive if we run low on space by doing excess runs of
* TestConfiguration to re-compute constraint lists each time needed.)
- * The last MaxBackends entries in possibleConstraints[] are reserved as
- * output workspace for FindLockCycle.
+ * The last MaxBackends entries in possibleConstraints[] are reserved
+ * as output workspace for FindLockCycle.
*/
maxPossibleConstraints = MaxBackends * 4;
possibleConstraints =
@@ -185,9 +194,9 @@ DeadLockCheck(PROC *proc)
/* Apply any needed rearrangements of wait queues */
for (i = 0; i < nWaitOrders; i++)
{
- LOCK *lock = waitOrders[i].lock;
- PROC **procs = waitOrders[i].procs;
- int nProcs = waitOrders[i].nProcs;
+ LOCK *lock = waitOrders[i].lock;
+ PROC **procs = waitOrders[i].procs;
+ int nProcs = waitOrders[i].nProcs;
PROC_QUEUE *waitQueue = &(lock->waitProcs);
Assert(nProcs == waitQueue->size);
@@ -218,10 +227,10 @@ DeadLockCheck(PROC *proc)
* DeadLockCheckRecurse -- recursively search for valid orderings
*
* curConstraints[] holds the current set of constraints being considered
- * by an outer level of recursion. Add to this each possible solution
+ * by an outer level of recursion. Add to this each possible solution
* constraint for any cycle detected at this level.
*
- * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
+ * Returns TRUE if no solution exists. Returns FALSE if a deadlock-free
* state is attainable, in which case waitOrders[] shows the required
* rearrangements of lock wait queues (if any).
*/
@@ -252,6 +261,7 @@ DeadLockCheckRecurse(PROC *proc)
/* Not room; will need to regenerate the edges on-the-fly */
savedList = false;
}
+
/*
* Try each available soft edge as an addition to the configuration.
*/
@@ -264,7 +274,7 @@ DeadLockCheckRecurse(PROC *proc)
elog(FATAL, "DeadLockCheckRecurse: inconsistent results");
}
curConstraints[nCurConstraints] =
- possibleConstraints[oldPossibleConstraints+i];
+ possibleConstraints[oldPossibleConstraints + i];
nCurConstraints++;
if (!DeadLockCheckRecurse(proc))
return false; /* found a valid solution! */
@@ -293,25 +303,27 @@ DeadLockCheckRecurse(PROC *proc)
static bool
TestConfiguration(PROC *startProc)
{
- int softFound = 0;
- EDGE *softEdges = possibleConstraints + nPossibleConstraints;
- int nSoftEdges;
- int i;
+ int softFound = 0;
+ EDGE *softEdges = possibleConstraints + nPossibleConstraints;
+ int nSoftEdges;
+ int i;
/*
* Make sure we have room for FindLockCycle's output.
*/
if (nPossibleConstraints + MaxBackends > maxPossibleConstraints)
return -1;
+
/*
* Expand current constraint set into wait orderings. Fail if the
* constraint set is not self-consistent.
*/
if (!ExpandConstraints(curConstraints, nCurConstraints))
return -1;
+
/*
* Check for cycles involving startProc or any of the procs mentioned
- * in constraints. We check startProc last because if it has a soft
+ * in constraints. We check startProc last because if it has a soft
* cycle still to be dealt with, we want to deal with that first.
*/
for (i = 0; i < nCurConstraints; i++)
@@ -350,7 +362,7 @@ TestConfiguration(PROC *startProc)
*
* Since we need to be able to check hypothetical configurations that would
* exist after wait queue rearrangement, the routine pays attention to the
- * table of hypothetical queue orders in waitOrders[]. These orders will
+ * table of hypothetical queue orders in waitOrders[]. These orders will
* be believed in preference to the actual ordering seen in the locktable.
*/
static bool
@@ -391,9 +403,10 @@ FindLockCycleRecurse(PROC *checkProc,
/* If we return to starting point, we have a deadlock cycle */
if (i == 0)
return true;
+
/*
- * Otherwise, we have a cycle but it does not include the start
- * point, so say "no deadlock".
+ * Otherwise, we have a cycle but it does not include the
+ * start point, so say "no deadlock".
*/
return false;
}
@@ -401,6 +414,7 @@ FindLockCycleRecurse(PROC *checkProc,
/* Mark proc as seen */
Assert(nVisitedProcs < MaxBackends);
visitedProcs[nVisitedProcs++] = checkProc;
+
/*
* If the proc is not waiting, we have no outgoing waits-for edges.
*/
@@ -413,8 +427,9 @@ FindLockCycleRecurse(PROC *checkProc,
lockctl = lockMethodTable->ctl;
numLockModes = lockctl->numLockModes;
conflictMask = lockctl->conflictTab[checkProc->waitLockMode];
+
/*
- * Scan for procs that already hold conflicting locks. These are
+ * Scan for procs that already hold conflicting locks. These are
* "hard" edges in the waits-for graph.
*/
lockHolders = &(lock->lockHolders);
@@ -449,12 +464,13 @@ FindLockCycleRecurse(PROC *checkProc,
/*
* Scan for procs that are ahead of this one in the lock's wait queue.
- * Those that have conflicting requests soft-block this one. This must
- * be done after the hard-block search, since if another proc both
- * hard- and soft-blocks this one, we want to call it a hard edge.
+ * Those that have conflicting requests soft-block this one. This
+ * must be done after the hard-block search, since if another proc
+ * both hard- and soft-blocks this one, we want to call it a hard
+ * edge.
*
- * If there is a proposed re-ordering of the lock's wait order,
- * use that rather than the current wait order.
+ * If there is a proposed re-ordering of the lock's wait order, use that
+ * rather than the current wait order.
*/
for (i = 0; i < nWaitOrders; i++)
{
@@ -465,7 +481,7 @@ FindLockCycleRecurse(PROC *checkProc,
if (i < nWaitOrders)
{
/* Use the given hypothetical wait queue order */
- PROC **procs = waitOrders[i].procs;
+ PROC **procs = waitOrders[i].procs;
queue_size = waitOrders[i].nProcs;
@@ -483,7 +499,11 @@ FindLockCycleRecurse(PROC *checkProc,
/* This proc soft-blocks checkProc */
if (FindLockCycleRecurse(proc, softEdges, nSoftEdges))
{
- /* Add this edge to the list of soft edges in the cycle */
+
+ /*
+ * Add this edge to the list of soft edges in the
+ * cycle
+ */
Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProc;
softEdges[*nSoftEdges].blocker = proc;
@@ -513,7 +533,11 @@ FindLockCycleRecurse(PROC *checkProc,
/* This proc soft-blocks checkProc */
if (FindLockCycleRecurse(proc, softEdges, nSoftEdges))
{
- /* Add this edge to the list of soft edges in the cycle */
+
+ /*
+ * Add this edge to the list of soft edges in the
+ * cycle
+ */
Assert(*nSoftEdges < MaxBackends);
softEdges[*nSoftEdges].waiter = checkProc;
softEdges[*nSoftEdges].blocker = proc;
@@ -553,18 +577,19 @@ ExpandConstraints(EDGE *constraints,
j;
nWaitOrders = 0;
+
/*
- * Scan constraint list backwards. This is because the last-added
+ * Scan constraint list backwards. This is because the last-added
* constraint is the only one that could fail, and so we want to test
* it for inconsistency first.
*/
- for (i = nConstraints; --i >= 0; )
+ for (i = nConstraints; --i >= 0;)
{
- PROC *proc = constraints[i].waiter;
- LOCK *lock = proc->waitLock;
+ PROC *proc = constraints[i].waiter;
+ LOCK *lock = proc->waitLock;
/* Did we already make a list for this lock? */
- for (j = nWaitOrders; --j >= 0; )
+ for (j = nWaitOrders; --j >= 0;)
{
if (waitOrders[j].lock == lock)
break;
@@ -577,11 +602,12 @@ ExpandConstraints(EDGE *constraints,
waitOrders[nWaitOrders].nProcs = lock->waitProcs.size;
nWaitOrderProcs += lock->waitProcs.size;
Assert(nWaitOrderProcs <= MaxBackends);
+
/*
* Do the topo sort. TopoSort need not examine constraints after
* this one, since they must be for different locks.
*/
- if (!TopoSort(lock, constraints, i+1,
+ if (!TopoSort(lock, constraints, i + 1,
waitOrders[nWaitOrders].procs))
return false;
nWaitOrders++;
@@ -607,7 +633,7 @@ ExpandConstraints(EDGE *constraints,
* The initial queue ordering is taken directly from the lock's wait queue.
* The output is an array of PROC pointers, of length equal to the lock's
* wait queue length (the caller is responsible for providing this space).
- * The partial order is specified by an array of EDGE structs. Each EDGE
+ * The partial order is specified by an array of EDGE structs. Each EDGE
* is one that we need to reverse, therefore the "waiter" must appear before
* the "blocker" in the output array. The EDGE array may well contain
* edges associated with other locks; these should be ignored.
@@ -638,14 +664,15 @@ TopoSort(LOCK *lock,
}
/*
- * Scan the constraints, and for each proc in the array, generate a count
- * of the number of constraints that say it must be before something else,
- * plus a list of the constraints that say it must be after something else.
- * The count for the j'th proc is stored in beforeConstraints[j], and the
- * head of its list in afterConstraints[j]. Each constraint stores its
- * list link in constraints[i].link (note any constraint will be in
- * just one list). The array index for the before-proc of the i'th
- * constraint is remembered in constraints[i].pred.
+ * Scan the constraints, and for each proc in the array, generate a
+ * count of the number of constraints that say it must be before
+ * something else, plus a list of the constraints that say it must be
+ * after something else. The count for the j'th proc is stored in
+ * beforeConstraints[j], and the head of its list in
+ * afterConstraints[j]. Each constraint stores its list link in
+ * constraints[i].link (note any constraint will be in just one list).
+ * The array index for the before-proc of the i'th constraint is
+ * remembered in constraints[i].pred.
*/
MemSet(beforeConstraints, 0, queue_size * sizeof(int));
MemSet(afterConstraints, 0, queue_size * sizeof(int));
@@ -656,7 +683,7 @@ TopoSort(LOCK *lock,
if (proc->waitLock != lock)
continue;
/* Find the waiter proc in the array */
- for (j = queue_size; --j >= 0; )
+ for (j = queue_size; --j >= 0;)
{
if (topoProcs[j] == proc)
break;
@@ -664,20 +691,20 @@ TopoSort(LOCK *lock,
Assert(j >= 0); /* should have found a match */
/* Find the blocker proc in the array */
proc = constraints[i].blocker;
- for (k = queue_size; --k >= 0; )
+ for (k = queue_size; --k >= 0;)
{
if (topoProcs[k] == proc)
break;
}
Assert(k >= 0); /* should have found a match */
- beforeConstraints[j]++; /* waiter must come before */
+ beforeConstraints[j]++; /* waiter must come before */
/* add this constraint to list of after-constraints for blocker */
constraints[i].pred = j;
constraints[i].link = afterConstraints[k];
- afterConstraints[k] = i+1;
+ afterConstraints[k] = i + 1;
}
/*--------------------
- * Now scan the topoProcs array backwards. At each step, output the
+ * Now scan the topoProcs array backwards. At each step, output the
* last proc that has no remaining before-constraints, and decrease
* the beforeConstraints count of each of the procs it was constrained
* against.
@@ -687,8 +714,8 @@ TopoSort(LOCK *lock,
* last = last non-null index in topoProcs (avoid redundant searches)
*--------------------
*/
- last = queue_size-1;
- for (i = queue_size; --i >= 0; )
+ last = queue_size - 1;
+ for (i = queue_size; --i >= 0;)
{
/* Find next candidate to output */
while (topoProcs[last] == NULL)
@@ -705,10 +732,8 @@ TopoSort(LOCK *lock,
ordering[i] = topoProcs[j];
topoProcs[j] = NULL;
/* Update beforeConstraints counts of its predecessors */
- for (k = afterConstraints[j]; k > 0; k = constraints[k-1].link)
- {
- beforeConstraints[constraints[k-1].pred]--;
- }
+ for (k = afterConstraints[j]; k > 0; k = constraints[k - 1].link)
+ beforeConstraints[constraints[k - 1].pred]--;
}
/* Done */
@@ -734,4 +759,5 @@ PrintLockQueue(LOCK *lock, const char *info)
printf("\n");
fflush(stdout);
}
+
#endif
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index a042bbe3ee6..c06f76bfcdb 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lmgr.c,v 1.44 2001/01/24 19:43:07 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lmgr.c,v 1.45 2001/03/22 03:59:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -174,7 +174,7 @@ UnlockRelation(Relation relation, LOCKMODE lockmode)
/*
* LockRelationForSession
*
- * This routine grabs a session-level lock on the target relation. The
+ * This routine grabs a session-level lock on the target relation. The
* session lock persists across transaction boundaries. It will be removed
* when UnlockRelationForSession() is called, or if an elog(ERROR) occurs,
* or if the backend exits.
@@ -291,6 +291,7 @@ XactLockTableDelete(TransactionId xid)
LockRelease(LockTableId, &tag, xid, ExclusiveLock);
}
+
#endif
void
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index b3c630b79c0..912a25ff229 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.87 2001/03/18 20:13:13 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.88 2001/03/22 03:59:46 momjian Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
@@ -40,10 +40,10 @@
#include "utils/memutils.h"
#include "utils/ps_status.h"
-static int WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
- LOCK *lock, HOLDER *holder);
+static int WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
+ LOCK *lock, HOLDER *holder);
static void LockCountMyLocks(SHMEM_OFFSET lockOffset, PROC *proc,
- int *myHolding);
+ int *myHolding);
static char *lock_mode_names[] =
{
@@ -65,40 +65,40 @@ static char *DeadLockMessage = "Deadlock detected.\n\tSee the lock(l) manual pag
/*------
* The following configuration options are available for lock debugging:
*
- * TRACE_LOCKS -- give a bunch of output what's going on in this file
- * TRACE_USERLOCKS -- same but for user locks
- * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
- * (use to avoid output on system tables)
- * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
- * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
+ * TRACE_LOCKS -- give a bunch of output what's going on in this file
+ * TRACE_USERLOCKS -- same but for user locks
+ * TRACE_LOCK_OIDMIN-- do not trace locks for tables below this oid
+ * (use to avoid output on system tables)
+ * TRACE_LOCK_TABLE -- trace locks on this table (oid) unconditionally
+ * DEBUG_DEADLOCKS -- currently dumps locks at untimely occasions ;)
*
* Furthermore, but in storage/ipc/spin.c:
- * TRACE_SPINLOCKS -- trace spinlocks (pretty useless)
+ * TRACE_SPINLOCKS -- trace spinlocks (pretty useless)
*
* Define LOCK_DEBUG at compile time to get all these enabled.
* --------
*/
-int Trace_lock_oidmin = BootstrapObjectIdData;
-bool Trace_locks = false;
-bool Trace_userlocks = false;
-int Trace_lock_table = 0;
-bool Debug_deadlocks = false;
+int Trace_lock_oidmin = BootstrapObjectIdData;
+bool Trace_locks = false;
+bool Trace_userlocks = false;
+int Trace_lock_table = 0;
+bool Debug_deadlocks = false;
inline static bool
-LOCK_DEBUG_ENABLED(const LOCK * lock)
+LOCK_DEBUG_ENABLED(const LOCK *lock)
{
return
- (((LOCK_LOCKMETHOD(*lock) == DEFAULT_LOCKMETHOD && Trace_locks)
- || (LOCK_LOCKMETHOD(*lock) == USER_LOCKMETHOD && Trace_userlocks))
- && (lock->tag.relId >= (Oid) Trace_lock_oidmin))
- || (Trace_lock_table && (lock->tag.relId == Trace_lock_table));
+ (((LOCK_LOCKMETHOD(*lock) == DEFAULT_LOCKMETHOD && Trace_locks)
+ || (LOCK_LOCKMETHOD(*lock) == USER_LOCKMETHOD && Trace_userlocks))
+ && (lock->tag.relId >= (Oid) Trace_lock_oidmin))
+ || (Trace_lock_table && (lock->tag.relId == Trace_lock_table));
}
inline static void
-LOCK_PRINT(const char * where, const LOCK * lock, LOCKMODE type)
+LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
{
if (LOCK_DEBUG_ENABLED(lock))
elog(DEBUG,
@@ -119,30 +119,30 @@ LOCK_PRINT(const char * where, const LOCK * lock, LOCKMODE type)
inline static void
-HOLDER_PRINT(const char * where, const HOLDER * holderP)
+HOLDER_PRINT(const char *where, const HOLDER *holderP)
{
if (
- (((HOLDER_LOCKMETHOD(*holderP) == DEFAULT_LOCKMETHOD && Trace_locks)
- || (HOLDER_LOCKMETHOD(*holderP) == USER_LOCKMETHOD && Trace_userlocks))
- && (((LOCK *)MAKE_PTR(holderP->tag.lock))->tag.relId >= (Oid) Trace_lock_oidmin))
- || (Trace_lock_table && (((LOCK *)MAKE_PTR(holderP->tag.lock))->tag.relId == Trace_lock_table))
- )
+ (((HOLDER_LOCKMETHOD(*holderP) == DEFAULT_LOCKMETHOD && Trace_locks)
+ || (HOLDER_LOCKMETHOD(*holderP) == USER_LOCKMETHOD && Trace_userlocks))
+ && (((LOCK *) MAKE_PTR(holderP->tag.lock))->tag.relId >= (Oid) Trace_lock_oidmin))
+ || (Trace_lock_table && (((LOCK *) MAKE_PTR(holderP->tag.lock))->tag.relId == Trace_lock_table))
+ )
elog(DEBUG,
"%s: holder(%lx) lock(%lx) tbl(%d) proc(%lx) xid(%u) hold(%d,%d,%d,%d,%d,%d,%d)=%d",
where, MAKE_OFFSET(holderP), holderP->tag.lock,
HOLDER_LOCKMETHOD(*(holderP)),
holderP->tag.proc, holderP->tag.xid,
- holderP->holding[1], holderP->holding[2], holderP->holding[3],
- holderP->holding[4], holderP->holding[5], holderP->holding[6],
+ holderP->holding[1], holderP->holding[2], holderP->holding[3],
+ holderP->holding[4], holderP->holding[5], holderP->holding[6],
holderP->holding[7], holderP->nHolding);
}
-#else /* not LOCK_DEBUG */
+#else /* not LOCK_DEBUG */
#define LOCK_PRINT(where, lock, type)
#define HOLDER_PRINT(where, holderP)
-#endif /* not LOCK_DEBUG */
+#endif /* not LOCK_DEBUG */
@@ -218,7 +218,7 @@ LockingDisabled(void)
LOCKMETHODTABLE *
GetLocksMethodTable(LOCK *lock)
{
- LOCKMETHOD lockmethod = LOCK_LOCKMETHOD(*lock);
+ LOCKMETHOD lockmethod = LOCK_LOCKMETHOD(*lock);
Assert(lockmethod > 0 && lockmethod < NumLockMethods);
return LockMethodTable[lockmethod];
@@ -258,7 +258,7 @@ LockMethodInit(LOCKMETHODTABLE *lockMethodTable,
* is wasteful, in this case, but not much space is involved.
*
* NOTE: data structures allocated here are allocated permanently, using
- * TopMemoryContext and shared memory. We don't ever release them anyway,
+ * TopMemoryContext and shared memory. We don't ever release them anyway,
* and in normal multi-backend operation the lock table structures set up
* by the postmaster are inherited by each backend, so they must be in
* TopMemoryContext.
@@ -304,8 +304,8 @@ LockMethodTableInit(char *tabName,
SpinAcquire(LockMgrLock);
/*
- * allocate a control structure from shared memory or attach to it
- * if it already exists.
+ * allocate a control structure from shared memory or attach to it if
+ * it already exists.
*
*/
sprintf(shmemName, "%s (ctl)", tabName);
@@ -341,8 +341,8 @@ LockMethodTableInit(char *tabName,
Assert(NumLockMethods <= MAX_LOCK_METHODS);
/*
- * allocate a hash table for LOCK structs. This is used
- * to store per-locked-object information.
+ * allocate a hash table for LOCK structs. This is used to store
+ * per-locked-object information.
*
*/
info.keysize = SHMEM_LOCKTAB_KEYSIZE;
@@ -362,8 +362,8 @@ LockMethodTableInit(char *tabName,
Assert(lockMethodTable->lockHash->hash == tag_hash);
/*
- * allocate a hash table for HOLDER structs. This is used
- * to store per-lock-holder information.
+ * allocate a hash table for HOLDER structs. This is used to store
+ * per-lock-holder information.
*
*/
info.keysize = SHMEM_HOLDERTAB_KEYSIZE;
@@ -558,7 +558,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
* Create the hash key for the holder table.
*
*/
- MemSet(&holdertag, 0, sizeof(HOLDERTAG)); /* must clear padding, needed */
+ MemSet(&holdertag, 0, sizeof(HOLDERTAG)); /* must clear padding,
+ * needed */
holdertag.lock = MAKE_OFFSET(lock);
holdertag.proc = MAKE_OFFSET(MyProc);
TransactionIdStore(xid, &holdertag.xid);
@@ -595,6 +596,7 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
Assert(holder->nHolding <= lock->nGranted);
#ifdef CHECK_DEADLOCK_RISK
+
/*
* Issue warning if we already hold a lower-level lock on this
* object and do not hold a lock of the requested level or higher.
@@ -602,12 +604,13 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
* a deadlock if another backend were following the same code path
* at about the same time).
*
- * This is not enabled by default, because it may generate log entries
- * about user-level coding practices that are in fact safe in context.
- * It can be enabled to help find system-level problems.
+ * This is not enabled by default, because it may generate log
+ * entries about user-level coding practices that are in fact safe
+ * in context. It can be enabled to help find system-level
+ * problems.
*
- * XXX Doing numeric comparison on the lockmodes is a hack;
- * it'd be better to use a table. For now, though, this works.
+ * XXX Doing numeric comparison on the lockmodes is a hack; it'd be
+ * better to use a table. For now, though, this works.
*/
for (i = lockMethodTable->ctl->numLockModes; i > 0; i--)
{
@@ -618,17 +621,17 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
elog(DEBUG, "Deadlock risk: raising lock level"
" from %s to %s on object %u/%u/%u",
lock_mode_names[i], lock_mode_names[lockmode],
- lock->tag.relId, lock->tag.dbId, lock->tag.objId.blkno);
+ lock->tag.relId, lock->tag.dbId, lock->tag.objId.blkno);
break;
}
}
-#endif /* CHECK_DEADLOCK_RISK */
+#endif /* CHECK_DEADLOCK_RISK */
}
/*
* lock->nRequested and lock->requested[] count the total number of
- * requests, whether granted or waiting, so increment those immediately.
- * The other counts don't increment till we get the lock.
+ * requests, whether granted or waiting, so increment those
+ * immediately. The other counts don't increment till we get the lock.
*
*/
lock->nRequested++;
@@ -636,8 +639,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
/*
- * If I already hold one or more locks of the requested type,
- * just grant myself another one without blocking.
+ * If I already hold one or more locks of the requested type, just
+ * grant myself another one without blocking.
*
*/
if (holder->holding[lockmode] > 0)
@@ -649,8 +652,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
}
/*
- * If this process (under any XID) is a holder of the lock,
- * also grant myself another one without blocking.
+ * If this process (under any XID) is a holder of the lock, also grant
+ * myself another one without blocking.
*
*/
LockCountMyLocks(holder->tag.lock, MyProc, myHolding);
@@ -663,9 +666,9 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
}
/*
- * If lock requested conflicts with locks requested by waiters,
- * must join wait queue. Otherwise, check for conflict with
- * already-held locks. (That's last because most complex check.)
+ * If lock requested conflicts with locks requested by waiters, must
+ * join wait queue. Otherwise, check for conflict with already-held
+ * locks. (That's last because most complex check.)
*
*/
if (lockMethodTable->ctl->conflictTab[lockmode] & lock->waitMask)
@@ -711,7 +714,7 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
SpinRelease(masterLock);
return FALSE;
}
-#endif /* USER_LOCKS */
+#endif /* USER_LOCKS */
/*
* Construct bitmask of locks this process holds on this object.
@@ -737,8 +740,9 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
/*
* NOTE: do not do any material change of state between here and
- * return. All required changes in locktable state must have been
- * done when the lock was granted to us --- see notes in WaitOnLock.
+ * return. All required changes in locktable state must have been
+ * done when the lock was granted to us --- see notes in
+ * WaitOnLock.
*/
/*
@@ -795,13 +799,13 @@ LockCheckConflicts(LOCKMETHODTABLE *lockMethodTable,
int localHolding[MAX_LOCKMODES];
/*
- * first check for global conflicts: If no locks conflict
- * with my request, then I get the lock.
+ * first check for global conflicts: If no locks conflict with my
+ * request, then I get the lock.
*
* Checking for conflict: lock->grantMask represents the types of
- * currently held locks. conflictTable[lockmode] has a bit
- * set for each type of lock that conflicts with request. Bitwise
- * compare tells if there is a conflict.
+ * currently held locks. conflictTable[lockmode] has a bit set for
+ * each type of lock that conflicts with request. Bitwise compare
+ * tells if there is a conflict.
*
*/
if (!(lockctl->conflictTab[lockmode] & lock->grantMask))
@@ -811,10 +815,10 @@ LockCheckConflicts(LOCKMETHODTABLE *lockMethodTable,
}
/*
- * Rats. Something conflicts. But it could still be my own
- * lock. We have to construct a conflict mask
- * that does not reflect our own locks. Locks held by the current
- * process under another XID also count as "our own locks".
+ * Rats. Something conflicts. But it could still be my own lock. We
+ * have to construct a conflict mask that does not reflect our own
+ * locks. Locks held by the current process under another XID also
+ * count as "our own locks".
*
*/
if (myHolding == NULL)
@@ -834,10 +838,9 @@ LockCheckConflicts(LOCKMETHODTABLE *lockMethodTable,
}
/*
- * now check again for conflicts. 'bitmask' describes the types
- * of locks held by other processes. If one of these
- * conflicts with the kind of lock that I want, there is a
- * conflict and I have to sleep.
+ * now check again for conflicts. 'bitmask' describes the types of
+ * locks held by other processes. If one of these conflicts with the
+ * kind of lock that I want, there is a conflict and I have to sleep.
*
*/
if (!(lockctl->conflictTab[lockmode] & bitmask))
@@ -878,9 +881,7 @@ LockCountMyLocks(SHMEM_OFFSET lockOffset, PROC *proc, int *myHolding)
if (lockOffset == holder->tag.lock)
{
for (i = 1; i < MAX_LOCKMODES; i++)
- {
myHolding[i] += holder->holding[i];
- }
}
holder = (HOLDER *) SHMQueueNext(procHolders, &holder->procLink,
@@ -947,8 +948,8 @@ WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
* Hence, after granting, the locktable state must fully reflect the
* fact that we own the lock; we can't do additional work on return.
* Contrariwise, if we fail, any cleanup must happen in xact abort
- * processing, not here, to ensure it will also happen in the cancel/die
- * case.
+ * processing, not here, to ensure it will also happen in the
+ * cancel/die case.
*/
if (ProcSleep(lockMethodTable,
@@ -956,9 +957,10 @@ WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
lock,
holder) != STATUS_OK)
{
+
/*
- * We failed as a result of a deadlock, see HandleDeadLock().
- * Quit now. Removal of the holder and lock objects, if no longer
+ * We failed as a result of a deadlock, see HandleDeadLock(). Quit
+ * now. Removal of the holder and lock objects, if no longer
* needed, will happen in xact cleanup (see above for motivation).
*/
LOCK_PRINT("WaitOnLock: aborting on lock", lock, lockmode);
@@ -984,15 +986,15 @@ WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
* NB: this does not remove the process' holder object, nor the lock object,
* even though their counts might now have gone to zero. That will happen
* during a subsequent LockReleaseAll call, which we expect will happen
- * during transaction cleanup. (Removal of a proc from its wait queue by
+ * during transaction cleanup. (Removal of a proc from its wait queue by
* this routine can only happen if we are aborting the transaction.)
*--------------------
*/
void
RemoveFromWaitQueue(PROC *proc)
{
- LOCK *waitLock = proc->waitLock;
- LOCKMODE lockmode = proc->waitLockMode;
+ LOCK *waitLock = proc->waitLock;
+ LOCKMODE lockmode = proc->waitLockMode;
/* Make sure proc is waiting */
Assert(proc->links.next != INVALID_OFFSET);
@@ -1095,7 +1097,8 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
/*
* Find the holder entry for this holder.
*/
- MemSet(&holdertag, 0, sizeof(HOLDERTAG)); /* must clear padding, needed */
+ MemSet(&holdertag, 0, sizeof(HOLDERTAG)); /* must clear padding,
+ * needed */
holdertag.lock = MAKE_OFFSET(lock);
holdertag.proc = MAKE_OFFSET(MyProc);
TransactionIdStore(xid, &holdertag.xid);
@@ -1156,11 +1159,11 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
/*
* We need only run ProcLockWakeup if the released lock conflicts with
* at least one of the lock types requested by waiter(s). Otherwise
- * whatever conflict made them wait must still exist. NOTE: before MVCC,
- * we could skip wakeup if lock->granted[lockmode] was still positive.
- * But that's not true anymore, because the remaining granted locks might
- * belong to some waiter, who could now be awakened because he doesn't
- * conflict with his own locks.
+ * whatever conflict made them wait must still exist. NOTE: before
+ * MVCC, we could skip wakeup if lock->granted[lockmode] was still
+ * positive. But that's not true anymore, because the remaining
+ * granted locks might belong to some waiter, who could now be
+ * awakened because he doesn't conflict with his own locks.
*
*/
if (lockMethodTable->ctl->conflictTab[lockmode] & lock->waitMask)
@@ -1168,10 +1171,10 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
if (lock->nRequested == 0)
{
+
/*
- * if there's no one waiting in the queue,
- * we just released the last lock on this object.
- * Delete it from the lock table.
+ * if there's no one waiting in the queue, we just released the
+ * last lock on this object. Delete it from the lock table.
*
*/
Assert(lockMethodTable->lockHash->hash == tag_hash);
@@ -1197,8 +1200,8 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
Assert((holder->nHolding >= 0) && (holder->holding[lockmode] >= 0));
/*
- * If this was my last hold on this lock, delete my entry in the holder
- * table.
+ * If this was my last hold on this lock, delete my entry in the
+ * holder table.
*/
if (holder->nHolding == 0)
{
@@ -1316,11 +1319,12 @@ LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
Assert(lock->requested[i] >= 0 && lock->granted[i] >= 0);
if (lock->granted[i] == 0)
lock->grantMask &= BITS_OFF[i];
+
/*
* Read comments in LockRelease
*/
if (!wakeupNeeded &&
- lockMethodTable->ctl->conflictTab[i] & lock->waitMask)
+ lockMethodTable->ctl->conflictTab[i] & lock->waitMask)
wakeupNeeded = true;
}
}
@@ -1331,9 +1335,10 @@ LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
}
else
{
+
/*
- * This holder accounts for all the requested locks on the object,
- * so we can be lazy and just zero things out.
+ * This holder accounts for all the requested locks on the
+ * object, so we can be lazy and just zero things out.
*
*/
lock->nRequested = 0;
@@ -1371,6 +1376,7 @@ LockReleaseAll(LOCKMETHOD lockmethod, PROC *proc,
if (lock->nRequested == 0)
{
+
/*
* We've just released the last lock, so garbage-collect the
* lock object.
@@ -1412,7 +1418,8 @@ LockShmemSize(int maxBackends)
size += MAXALIGN(sizeof(PROC_HDR)); /* ProcGlobal */
size += maxBackends * MAXALIGN(sizeof(PROC)); /* each MyProc */
- size += MAX_LOCK_METHODS * MAXALIGN(sizeof(LOCKMETHODCTL)); /* each lockMethodTable->ctl */
+ size += MAX_LOCK_METHODS * MAXALIGN(sizeof(LOCKMETHODCTL)); /* each
+ * lockMethodTable->ctl */
/* lockHash table */
size += hash_estimate_size(NLOCKENTS(maxBackends),
@@ -1534,4 +1541,4 @@ DumpAllLocks(void)
}
}
-#endif /* LOCK_DEBUG */
+#endif /* LOCK_DEBUG */
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 463af1fa5e3..ee2d6751c5e 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.98 2001/01/26 18:23:12 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.99 2001/03/22 03:59:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,7 +76,7 @@
#include "storage/proc.h"
-int DeadlockTimeout = 1000;
+int DeadlockTimeout = 1000;
/* --------------------
* Spin lock for manipulating the shared process data structure:
@@ -147,10 +147,10 @@ InitProcGlobal(int maxBackends)
/*
* Arrange to delete semas on exit --- set this up now so that we
- * will clean up if pre-allocation fails. We use our own freeproc,
- * rather than IpcSemaphoreCreate's removeOnExit option, because
- * we don't want to fill up the on_shmem_exit list with a separate
- * entry for each semaphore set.
+ * will clean up if pre-allocation fails. We use our own
+ * freeproc, rather than IpcSemaphoreCreate's removeOnExit option,
+ * because we don't want to fill up the on_shmem_exit list with a
+ * separate entry for each semaphore set.
*/
on_shmem_exit(ProcFreeAllSemaphores, 0);
@@ -159,9 +159,9 @@ InitProcGlobal(int maxBackends)
*/
Assert(maxBackends > 0 && maxBackends <= MAXBACKENDS);
- for (i = 0; i < ((maxBackends-1)/PROC_NSEMS_PER_SET+1); i++)
+ for (i = 0; i < ((maxBackends - 1) / PROC_NSEMS_PER_SET + 1); i++)
{
- IpcSemaphoreId semId;
+ IpcSemaphoreId semId;
semId = IpcSemaphoreCreate(PROC_NSEMS_PER_SET,
IPCProtection,
@@ -242,6 +242,7 @@ InitProcess(void)
if (IsUnderPostmaster)
{
ProcGetNewSemIdAndNum(&MyProc->sem.semId, &MyProc->sem.semNum);
+
/*
* we might be reusing a semaphore that belongs to a dead backend.
* So be careful and reinitialize its value here.
@@ -288,8 +289,8 @@ InitProcess(void)
on_shmem_exit(ProcKill, 0);
/*
- * Now that we have a PROC, we could try to acquire locks,
- * so initialize the deadlock checker.
+ * Now that we have a PROC, we could try to acquire locks, so
+ * initialize the deadlock checker.
*/
InitDeadLockChecking();
}
@@ -300,7 +301,7 @@ InitProcess(void)
static void
ZeroProcSemaphore(PROC *proc)
{
- union semun semun;
+ union semun semun;
semun.val = 0;
if (semctl(proc->sem.semId, proc->sem.semNum, SETVAL, semun) < 0)
@@ -333,15 +334,15 @@ LockWaitCancel(void)
#ifndef __BEOS__
{
struct itimerval timeval,
- dummy;
+ dummy;
MemSet(&timeval, 0, sizeof(struct itimerval));
setitimer(ITIMER_REAL, &timeval, &dummy);
}
#else
/* BeOS doesn't have setitimer, but has set_alarm */
- set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM);
-#endif /* __BEOS__ */
+ set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM);
+#endif /* __BEOS__ */
/* Unlink myself from the wait queue, if on it (might not be anymore!) */
LockLockTable();
@@ -352,17 +353,17 @@ LockWaitCancel(void)
/*
* Reset the proc wait semaphore to zero. This is necessary in the
* scenario where someone else granted us the lock we wanted before we
- * were able to remove ourselves from the wait-list. The semaphore will
- * have been bumped to 1 by the would-be grantor, and since we are no
- * longer going to wait on the sema, we have to force it back to zero.
- * Otherwise, our next attempt to wait for a lock will fall through
- * prematurely.
+ * were able to remove ourselves from the wait-list. The semaphore
+ * will have been bumped to 1 by the would-be grantor, and since we
+ * are no longer going to wait on the sema, we have to force it back
+ * to zero. Otherwise, our next attempt to wait for a lock will fall
+ * through prematurely.
*/
ZeroProcSemaphore(MyProc);
/*
- * Return true even if we were kicked off the lock before we were
- * able to remove ourselves.
+ * Return true even if we were kicked off the lock before we were able
+ * to remove ourselves.
*/
return true;
}
@@ -467,7 +468,7 @@ ProcQueueAlloc(char *name)
{
bool found;
PROC_QUEUE *queue = (PROC_QUEUE *)
- ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
+ ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
if (!queue)
return NULL;
@@ -520,11 +521,14 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
int myHeldLocks = MyProc->heldLocks;
PROC *proc;
int i;
+
#ifndef __BEOS__
struct itimerval timeval,
dummy;
+
#else
- bigtime_t time_interval;
+ bigtime_t time_interval;
+
#endif
/* ----------------------
@@ -582,6 +586,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
aheadRequests |= (1 << proc->waitLockMode);
proc = (PROC *) MAKE_PTR(proc->links.next);
}
+
/*
* If we fall out of loop normally, proc points to waitQueue head,
* so we will insert at tail of queue as desired.
@@ -607,7 +612,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
MyProc->waitHolder = holder;
MyProc->waitLockMode = lockmode;
- MyProc->errType = STATUS_OK; /* initialize result for success */
+ MyProc->errType = STATUS_OK;/* initialize result for success */
/* mark that we are waiting for a lock */
waitingForLock = true;
@@ -643,7 +648,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
if (setitimer(ITIMER_REAL, &timeval, &dummy))
elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
#else
- time_interval = DeadlockTimeout * 1000000; /* usecs */
+ time_interval = DeadlockTimeout * 1000000; /* usecs */
if (set_alarm(time_interval, B_ONE_SHOT_RELATIVE_ALARM) < 0)
elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
#endif
@@ -674,7 +679,7 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
if (setitimer(ITIMER_REAL, &timeval, &dummy))
elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
#else
- if (set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM) < 0)
+ if (set_alarm(B_INFINITE_TIMEOUT, B_PERIODIC_ALARM) < 0)
elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
#endif
@@ -759,7 +764,7 @@ ProcLockWakeup(LOCKMETHODTABLE *lockMethodTable, LOCK *lock)
while (queue_size-- > 0)
{
- LOCKMODE lockmode = proc->waitLockMode;
+ LOCKMODE lockmode = proc->waitLockMode;
/*
* Waken if (a) doesn't conflict with requests of earlier waiters,
@@ -776,15 +781,20 @@ ProcLockWakeup(LOCKMETHODTABLE *lockMethodTable, LOCK *lock)
/* OK to waken */
GrantLock(lock, proc->waitHolder, lockmode);
proc = ProcWakeup(proc, STATUS_OK);
+
/*
- * ProcWakeup removes proc from the lock's waiting process queue
- * and returns the next proc in chain; don't use proc's next-link,
- * because it's been cleared.
+ * ProcWakeup removes proc from the lock's waiting process
+ * queue and returns the next proc in chain; don't use proc's
+ * next-link, because it's been cleared.
*/
}
else
{
- /* Cannot wake this guy. Remember his request for later checks. */
+
+ /*
+ * Cannot wake this guy. Remember his request for later
+ * checks.
+ */
aheadRequests |= (1 << lockmode);
proc = (PROC *) MAKE_PTR(proc->links.next);
}
@@ -807,11 +817,11 @@ HandleDeadLock(SIGNAL_ARGS)
int save_errno = errno;
/*
- * Acquire locktable lock. Note that the SIGALRM interrupt had better
- * not be enabled anywhere that this process itself holds the locktable
- * lock, else this will wait forever. Also note that this calls
- * SpinAcquire which creates a critical section, so that this routine
- * cannot be interrupted by cancel/die interrupts.
+ * Acquire locktable lock. Note that the SIGALRM interrupt had better
+ * not be enabled anywhere that this process itself holds the
+ * locktable lock, else this will wait forever. Also note that this
+ * calls SpinAcquire which creates a critical section, so that this
+ * routine cannot be interrupted by cancel/die interrupts.
*/
LockLockTable();
@@ -836,8 +846,8 @@ HandleDeadLock(SIGNAL_ARGS)
}
#ifdef LOCK_DEBUG
- if (Debug_deadlocks)
- DumpAllLocks();
+ if (Debug_deadlocks)
+ DumpAllLocks();
#endif
if (!DeadLockCheck(MyProc))
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index 02528a30e67..d208a80d5cc 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.36 2001/02/06 06:24:00 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.37 2001/03/22 03:59:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,14 +59,14 @@ PageInit(Page page, Size pageSize, Size specialSize)
void
PageZero(Page page)
{
- MemSet((char*)page + ((PageHeader)page)->pd_lower, 0,
- ((PageHeader)page)->pd_special - ((PageHeader)page)->pd_lower);
+ MemSet((char *) page + ((PageHeader) page)->pd_lower, 0,
+ ((PageHeader) page)->pd_special - ((PageHeader) page)->pd_lower);
}
/* ----------------
* PageAddItem
*
- * Add an item to a page. Return value is offset at which it was
+ * Add an item to a page. Return value is offset at which it was
* inserted, or InvalidOffsetNumber if there's not room to insert.
*
* If offsetNumber is valid and <= current max offset in the page,
@@ -75,7 +75,7 @@ PageZero(Page page)
* If offsetNumber is not valid, then assign one by finding the first
* one that is both unused and deallocated.
*
- * !!! ELOG(ERROR) IS DISALLOWED HERE !!!
+ * !!! ELOG(ERROR) IS DISALLOWED HERE !!!
*
* ----------------
*/
@@ -125,10 +125,12 @@ PageAddItem(Page page,
}
else
{
+
/*
- * Don't actually do the shuffle till we've checked free space!
+ * Don't actually do the shuffle till we've checked free
+ * space!
*/
- needshuffle = true; /* need to increase "lower" */
+ needshuffle = true; /* need to increase "lower" */
}
}
else
@@ -162,7 +164,8 @@ PageAddItem(Page page,
return InvalidOffsetNumber;
/*
- * OK to insert the item. First, shuffle the existing pointers if needed.
+ * OK to insert the item. First, shuffle the existing pointers if
+ * needed.
*/
if (needshuffle)
{
@@ -284,12 +287,12 @@ PageRepairFragmentation(Page page, OffsetNumber *unused)
for (i = 0; i < nline; i++)
{
lp = ((PageHeader) page)->pd_linp + i;
- if ((*lp).lp_flags & LP_DELETE) /* marked for deletion */
+ if ((*lp).lp_flags & LP_DELETE) /* marked for deletion */
(*lp).lp_flags &= ~(LP_USED | LP_DELETE);
if ((*lp).lp_flags & LP_USED)
nused++;
else if (unused)
- unused[i - nused] = (OffsetNumber)i;
+ unused[i - nused] = (OffsetNumber) i;
}
if (nused == 0)
@@ -347,7 +350,7 @@ PageRepairFragmentation(Page page, OffsetNumber *unused)
pfree(itemidbase);
}
- return(nline - nused);
+ return (nline - nused);
}
/*
@@ -377,16 +380,16 @@ PageGetFreeSpace(Page page)
void
IndexPageCleanup(Buffer buffer)
{
- Page page = (Page) BufferGetPage(buffer);
- ItemId lp;
- OffsetNumber maxoff;
- OffsetNumber i;
+ Page page = (Page) BufferGetPage(buffer);
+ ItemId lp;
+ OffsetNumber maxoff;
+ OffsetNumber i;
maxoff = PageGetMaxOffsetNumber(page);
for (i = 0; i < maxoff; i++)
{
lp = ((PageHeader) page)->pd_linp + i;
- if ((*lp).lp_flags & LP_DELETE) /* marked for deletion */
+ if ((*lp).lp_flags & LP_DELETE) /* marked for deletion */
{
PageIndexTupleDelete(page, i + 1);
maxoff--;
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index 179be5ab0e9..e4aca5aeee4 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.81 2001/01/24 19:43:08 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.82 2001/03/22 03:59:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,7 +76,7 @@ static int _mdfd_getrelnfd(Relation reln);
static MdfdVec *_mdfd_openseg(Relation reln, int segno, int oflags);
static MdfdVec *_mdfd_getseg(Relation reln, int blkno);
-static int _mdfd_blind_getseg(RelFileNode rnode, int blkno);
+static int _mdfd_blind_getseg(RelFileNode rnode, int blkno);
static int _fdvec_alloc(void);
static void _fdvec_free(int);
@@ -135,13 +135,14 @@ mdcreate(Relation reln)
if (fd < 0)
{
- int save_errno = errno;
+ int save_errno = errno;
/*
- * During bootstrap, there are cases where a system relation will be
- * accessed (by internal backend processes) before the bootstrap
- * script nominally creates it. Therefore, allow the file to exist
- * already, but in bootstrap mode only. (See also mdopen)
+ * During bootstrap, there are cases where a system relation will
+ * be accessed (by internal backend processes) before the
+ * bootstrap script nominally creates it. Therefore, allow the
+ * file to exist already, but in bootstrap mode only. (See also
+ * mdopen)
*/
if (IsBootstrapProcessingMode())
fd = FileNameOpenFile(path, O_RDWR | PG_BINARY, 0600);
@@ -197,7 +198,7 @@ mdunlink(RelFileNode rnode)
char *segpath = (char *) palloc(strlen(path) + 12);
int segno;
- for (segno = 1; ; segno++)
+ for (segno = 1;; segno++)
{
sprintf(segpath, "%s.%d", path, segno);
if (unlink(segpath) < 0)
@@ -293,11 +294,13 @@ mdopen(Relation reln)
if (fd < 0)
{
+
/*
- * During bootstrap, there are cases where a system relation will be
- * accessed (by internal backend processes) before the bootstrap
- * script nominally creates it. Therefore, accept mdopen() as a
- * substitute for mdcreate() in bootstrap mode only. (See mdcreate)
+ * During bootstrap, there are cases where a system relation will
+ * be accessed (by internal backend processes) before the
+ * bootstrap script nominally creates it. Therefore, accept
+ * mdopen() as a substitute for mdcreate() in bootstrap mode only.
+ * (See mdcreate)
*/
if (IsBootstrapProcessingMode())
fd = FileNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY, 0600);
@@ -666,12 +669,13 @@ mdnblocks(Relation reln)
if (v->mdfd_chain == (MdfdVec *) NULL)
{
+
/*
- * Because we pass O_CREAT, we will create the next segment
- * (with zero length) immediately, if the last segment is of
- * length REL_SEGSIZE. This is unnecessary but harmless, and
- * testing for the case would take more cycles than it seems
- * worth.
+ * Because we pass O_CREAT, we will create the next
+ * segment (with zero length) immediately, if the last
+ * segment is of length REL_SEGSIZE. This is unnecessary
+ * but harmless, and testing for the case would take more
+ * cycles than it seems worth.
*/
v->mdfd_chain = _mdfd_openseg(reln, segno, O_CREAT);
if (v->mdfd_chain == (MdfdVec *) NULL)
@@ -700,8 +704,10 @@ mdtruncate(Relation reln, int nblocks)
int curnblk;
int fd;
MdfdVec *v;
+
#ifndef LET_OS_MANAGE_FILESIZE
int priorblocks;
+
#endif
/*
@@ -1004,14 +1010,16 @@ _mdfd_getseg(Relation reln, int blkno)
if (v->mdfd_chain == (MdfdVec *) NULL)
{
+
/*
- * We will create the next segment only if the target block
- * is within it. This prevents Sorcerer's Apprentice syndrome
- * if a bug at higher levels causes us to be handed a ridiculously
- * large blkno --- otherwise we could create many thousands of
- * empty segment files before reaching the "target" block. We
- * should never need to create more than one new segment per call,
- * so this restriction seems reasonable.
+ * We will create the next segment only if the target block is
+ * within it. This prevents Sorcerer's Apprentice syndrome if
+ * a bug at higher levels causes us to be handed a
+ * ridiculously large blkno --- otherwise we could create many
+ * thousands of empty segment files before reaching the
+ * "target" block. We should never need to create more than
+ * one new segment per call, so this restriction seems
+ * reasonable.
*/
v->mdfd_chain = _mdfd_openseg(reln, i, (segno == 1) ? O_CREAT : 0);
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 10d6d6fc480..d19abcd6254 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.47 2001/01/24 19:43:08 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.48 2001/03/22 03:59:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,8 +39,8 @@ typedef struct f_smgr
char *buffer);
int (*smgr_flush) (Relation reln, BlockNumber blocknum,
char *buffer);
- int (*smgr_blindwrt) (RelFileNode rnode, BlockNumber blkno,
- char *buffer, bool dofsync);
+ int (*smgr_blindwrt) (RelFileNode rnode, BlockNumber blkno,
+ char *buffer, bool dofsync);
int (*smgr_markdirty) (Relation reln, BlockNumber blkno);
int (*smgr_blindmarkdirty) (RelFileNode, BlockNumber blkno);
int (*smgr_nblocks) (Relation reln);
@@ -60,7 +60,7 @@ static f_smgr smgrsw[] = {
/* magnetic disk */
{mdinit, NULL, mdcreate, mdunlink, mdextend, mdopen, mdclose,
mdread, mdwrite, mdflush, mdblindwrt, mdmarkdirty, mdblindmarkdirty,
- mdnblocks, mdtruncate, mdcommit, mdabort, mdsync
+ mdnblocks, mdtruncate, mdcommit, mdabort, mdsync
},
#ifdef STABLE_MEMORY_STORAGE
@@ -96,7 +96,7 @@ static int NSmgr = lengthof(smgrsw);
* that have been created or deleted in the current transaction. When
* a relation is created, we create the physical file immediately, but
* remember it so that we can delete the file again if the current
- * transaction is aborted. Conversely, a deletion request is NOT
+ * transaction is aborted. Conversely, a deletion request is NOT
* executed immediately, but is just entered in the list. When and if
* the transaction commits, we can delete the physical file.
*
@@ -108,12 +108,12 @@ static int NSmgr = lengthof(smgrsw);
typedef struct PendingRelDelete
{
RelFileNode relnode; /* relation that may need to be deleted */
- int16 which; /* which storage manager? */
- bool atCommit; /* T=delete at commit; F=delete at abort */
- struct PendingRelDelete *next; /* linked-list link */
+ int16 which; /* which storage manager? */
+ bool atCommit; /* T=delete at commit; F=delete at abort */
+ struct PendingRelDelete *next; /* linked-list link */
} PendingRelDelete;
-static PendingRelDelete *pendingDeletes = NULL; /* head of linked list */
+static PendingRelDelete *pendingDeletes = NULL; /* head of linked list */
/*
@@ -133,7 +133,7 @@ smgrinit()
if ((*(smgrsw[i].smgr_init)) () == SM_FAIL)
elog(FATAL, "initialization failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
@@ -155,7 +155,7 @@ smgrshutdown(void)
if ((*(smgrsw[i].smgr_shutdown)) () == SM_FAIL)
elog(FATAL, "shutdown failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
}
@@ -213,11 +213,11 @@ smgrunlink(int16 which, Relation reln)
/*
* NOTE: if the relation was created in this transaction, it will now
- * be present in the pending-delete list twice, once with atCommit true
- * and once with atCommit false. Hence, it will be physically deleted
- * at end of xact in either case (and the other entry will be ignored
- * by smgrDoPendingDeletes, so no error will occur). We could instead
- * remove the existing list entry and delete the physical file
+ * be present in the pending-delete list twice, once with atCommit
+ * true and once with atCommit false. Hence, it will be physically
+ * deleted at end of xact in either case (and the other entry will be
+ * ignored by smgrDoPendingDeletes, so no error will occur). We could
+ * instead remove the existing list entry and delete the physical file
* immediately, but for now I'll keep the logic simple.
*/
@@ -259,7 +259,7 @@ smgropen(int16 which, Relation reln, bool failOK)
if (reln->rd_rel->relkind == RELKIND_VIEW)
return -1;
if ((fd = (*(smgrsw[which].smgr_open)) (reln)) < 0)
- if (! failOK)
+ if (!failOK)
elog(ERROR, "cannot open %s: %m", RelationGetRelationName(reln));
return fd;
@@ -475,17 +475,20 @@ smgrDoPendingDeletes(bool isCommit)
pendingDeletes = pending->next;
if (pending->atCommit == isCommit)
{
+
/*
* Get rid of any leftover buffers for the rel (shouldn't be
- * any in the commit case, but there can be in the abort case).
+ * any in the commit case, but there can be in the abort
+ * case).
*/
DropRelFileNodeBuffers(pending->relnode);
+
/*
* And delete the physical files.
*
* Note: we treat deletion failure as a NOTICE, not an error,
- * because we've already decided to commit or abort the current
- * xact.
+ * because we've already decided to commit or abort the
+ * current xact.
*/
if ((*(smgrsw[pending->which].smgr_unlink)) (pending->relnode) == SM_FAIL)
elog(NOTICE, "cannot unlink %u/%u: %m",
@@ -513,7 +516,7 @@ smgrcommit()
if ((*(smgrsw[i].smgr_commit)) () == SM_FAIL)
elog(FATAL, "transaction commit failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
@@ -532,7 +535,7 @@ smgrabort()
if ((*(smgrsw[i].smgr_abort)) () == SM_FAIL)
elog(FATAL, "transaction abort failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
@@ -551,7 +554,7 @@ smgrsync()
if ((*(smgrsw[i].smgr_sync)) () == SM_FAIL)
elog(STOP, "storage sync failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
- Int16GetDatum(i))));
+ Int16GetDatum(i))));
}
}
@@ -579,8 +582,8 @@ void
smgr_undo(XLogRecPtr lsn, XLogRecord *record)
{
}
-
+
void
-smgr_desc(char *buf, uint8 xl_info, char* rec)
+smgr_desc(char *buf, uint8 xl_info, char *rec)
{
}
diff --git a/src/backend/tcop/dest.c b/src/backend/tcop/dest.c
index 6b266a244f7..1bd4d2339eb 100644
--- a/src/backend/tcop/dest.c
+++ b/src/backend/tcop/dest.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/dest.c,v 1.42 2001/01/24 19:43:09 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/dest.c,v 1.43 2001/03/22 03:59:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -176,7 +176,7 @@ DestToFunction(CommandDest dest)
{
switch (dest)
{
- case Remote:
+ case Remote:
return printtup_create_DR(false);
case RemoteInternal:
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index 364cef07144..0d83c5104c4 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/fastpath.c,v 1.46 2001/01/24 19:43:09 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/fastpath.c,v 1.47 2001/03/22 03:59:47 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
@@ -75,7 +75,7 @@
* ----------------
*/
static void
-SendFunctionResult(Datum retval, /* actual return value */
+SendFunctionResult(Datum retval,/* actual return value */
bool retbyval,
int retlen) /* the length according to the catalogs */
{
@@ -292,26 +292,29 @@ HandleFunctionRequest(void)
* XXX FIXME: This protocol is misdesigned.
*
* We really do not want to elog() before having swallowed all of the
- * frontend's fastpath message; otherwise we will lose sync with the input
- * datastream. What should happen is we absorb all of the input message
- * per protocol syntax, and *then* do error checking (including lookup of
- * the given function ID) and elog if appropriate. Unfortunately, because
- * we cannot even read the message properly without knowing whether the
- * data types are pass-by-ref or pass-by-value, it's not all that easy to
- * do :-(. The protocol should require the client to supply what it
- * thinks is the typbyval and typlen value for each arg, so that we can
- * read the data without having to do any lookups. Then after we've read
- * the message, we should do the lookups, verify agreement of the actual
- * function arg types with what we received, and finally call the function.
+ * frontend's fastpath message; otherwise we will lose sync with the
+ * input datastream. What should happen is we absorb all of the input
+ * message per protocol syntax, and *then* do error checking
+ * (including lookup of the given function ID) and elog if
+ * appropriate. Unfortunately, because we cannot even read the
+ * message properly without knowing whether the data types are
+ * pass-by-ref or pass-by-value, it's not all that easy to do :-(.
+ * The protocol should require the client to supply what it thinks is
+ * the typbyval and typlen value for each arg, so that we can read the
+ * data without having to do any lookups. Then after we've read the
+ * message, we should do the lookups, verify agreement of the actual
+ * function arg types with what we received, and finally call the
+ * function.
*
* As things stand, not only will we lose sync for an invalid message
- * (such as requested function OID doesn't exist), but we may lose sync
- * for a perfectly valid message if we are in transaction-aborted state!
- * This can happen because our database lookup attempts may fail entirely
- * in abort state.
+ * (such as requested function OID doesn't exist), but we may lose
+ * sync for a perfectly valid message if we are in transaction-aborted
+ * state! This can happen because our database lookup attempts may
+ * fail entirely in abort state.
*
* Unfortunately I see no way to fix this without breaking a lot of
- * existing clients. Maybe do it as part of next protocol version change.
+ * existing clients. Maybe do it as part of next protocol version
+ * change.
*/
if (pq_getint(&tmp, 4)) /* function oid */
@@ -323,7 +326,8 @@ HandleFunctionRequest(void)
/*
* This is where the one-back caching is done. If you want to save
* more state, make this a loop around an array. Given the relatively
- * short lifespan of the cache, not clear that there's any win possible.
+ * short lifespan of the cache, not clear that there's any win
+ * possible.
*/
fip = &last_fp;
if (!valid_fp_info(fid, fip))
@@ -365,9 +369,9 @@ HandleFunctionRequest(void)
elog(ERROR, "HandleFunctionRequest: bogus argsize %d",
argsize);
/* I suspect this +1 isn't really needed - tgl 5/2000 */
- p = palloc(argsize + VARHDRSZ + 1); /* Added +1 to solve
- * memory leak - Peter
- * 98 Jan 6 */
+ p = palloc(argsize + VARHDRSZ + 1); /* Added +1 to solve
+ * memory leak - Peter
+ * 98 Jan 6 */
VARATT_SIZEP(p) = argsize + VARHDRSZ;
if (pq_getbytes(VARDATA(p), argsize))
return EOF;
@@ -377,7 +381,7 @@ HandleFunctionRequest(void)
if (argsize != fip->arglen[i])
elog(ERROR, "HandleFunctionRequest: bogus argsize %d, should be %d",
argsize, fip->arglen[i]);
- p = palloc(argsize + 1); /* +1 in case argsize is 0 */
+ p = palloc(argsize + 1); /* +1 in case argsize is 0 */
if (pq_getbytes(p, argsize))
return EOF;
}
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 2aa08a1c634..745de3ce1a8 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.212 2001/03/14 18:24:34 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.213 2001/03/22 03:59:47 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
@@ -71,20 +71,20 @@
* ----------------
*/
-extern int optind;
+extern int optind;
extern char *optarg;
/*
* for ps display
*/
-bool HostnameLookup;
-bool ShowPortNumber;
+bool HostnameLookup;
+bool ShowPortNumber;
-bool Log_connections = false;
+bool Log_connections = false;
CommandDest whereToSendOutput = Debug;
-static bool dontExecute = false;
+static bool dontExecute = false;
/* note: these declarations had better match tcopprot.h */
DLLIMPORT sigjmp_buf Warn_restart;
@@ -342,9 +342,9 @@ ReadCommand(StringInfo inBuf)
* but it is still needed for parsing of SQL function bodies.
*/
List *
-pg_parse_and_rewrite(char *query_string, /* string to execute */
- Oid *typev, /* parameter types */
- int nargs) /* number of parameters */
+pg_parse_and_rewrite(char *query_string, /* string to execute */
+ Oid *typev,/* parameter types */
+ int nargs) /* number of parameters */
{
List *raw_parsetree_list;
List *querytree_list;
@@ -363,7 +363,7 @@ pg_parse_and_rewrite(char *query_string, /* string to execute */
querytree_list = NIL;
foreach(list_item, raw_parsetree_list)
{
- Node *parsetree = (Node *) lfirst(list_item);
+ Node *parsetree = (Node *) lfirst(list_item);
querytree_list = nconc(querytree_list,
pg_analyze_and_rewrite(parsetree));
@@ -486,10 +486,14 @@ pg_analyze_and_rewrite(Node *parsetree)
}
#ifdef COPY_PARSE_PLAN_TREES
- /* Optional debugging check: pass querytree output through copyObject() */
+
+ /*
+ * Optional debugging check: pass querytree output through
+ * copyObject()
+ */
new_list = (List *) copyObject(querytree_list);
/* This checks both copyObject() and the equal() routines... */
- if (! equal(new_list, querytree_list))
+ if (!equal(new_list, querytree_list))
elog(NOTICE, "pg_analyze_and_rewrite: copyObject failed on parse tree");
else
querytree_list = new_list;
@@ -547,14 +551,15 @@ pg_plan_query(Query *querytree)
#ifdef COPY_PARSE_PLAN_TREES
/* Optional debugging check: pass plan output through copyObject() */
{
- Plan *new_plan = (Plan *) copyObject(plan);
+ Plan *new_plan = (Plan *) copyObject(plan);
- /* equal() currently does not have routines to compare Plan nodes,
+ /*
+ * equal() currently does not have routines to compare Plan nodes,
* so don't try to test equality here. Perhaps fix someday?
*/
#ifdef NOT_USED
/* This checks both copyObject() and the equal() routines... */
- if (! equal(new_plan, plan))
+ if (!equal(new_plan, plan))
elog(NOTICE, "pg_plan_query: copyObject failed on plan tree");
else
#endif
@@ -593,7 +598,7 @@ pg_plan_query(Query *querytree)
*
* The CurrentMemoryContext after starting a transaction command must be
* appropriate for execution of individual queries (typically this will be
- * TransactionCommandContext). Note that this routine resets that context
+ * TransactionCommandContext). Note that this routine resets that context
* after each individual query, so don't store anything there that
* must outlive the call!
*
@@ -612,9 +617,10 @@ pg_plan_query(Query *querytree)
*/
void
-pg_exec_query_string(char *query_string, /* string to execute */
- CommandDest dest, /* where results should go */
- MemoryContext parse_context) /* context for parsetrees */
+pg_exec_query_string(char *query_string, /* string to execute */
+ CommandDest dest, /* where results should go */
+ MemoryContext parse_context) /* context for
+ * parsetrees */
{
bool xact_started;
MemoryContext oldcontext;
@@ -622,21 +628,21 @@ pg_exec_query_string(char *query_string, /* string to execute */
*parsetree_item;
/*
- * Start up a transaction command. All queries generated by the
- * query_string will be in this same command block, *unless* we find
- * a BEGIN/COMMIT/ABORT statement; we have to force a new xact command
- * after one of those, else bad things will happen in xact.c.
- * (Note that this will possibly change current memory context.)
+ * Start up a transaction command. All queries generated by the
+ * query_string will be in this same command block, *unless* we find a
+ * BEGIN/COMMIT/ABORT statement; we have to force a new xact command
+ * after one of those, else bad things will happen in xact.c. (Note
+ * that this will possibly change current memory context.)
*/
start_xact_command();
xact_started = true;
/*
- * parse_context *must* be different from the execution memory context,
- * else the context reset at the bottom of the loop will destroy the
- * parsetree list. (We really ought to check that parse_context isn't a
- * child of CurrentMemoryContext either, but that would take more cycles
- * than it's likely to be worth.)
+ * parse_context *must* be different from the execution memory
+ * context, else the context reset at the bottom of the loop will
+ * destroy the parsetree list. (We really ought to check that
+ * parse_context isn't a child of CurrentMemoryContext either, but
+ * that would take more cycles than it's likely to be worth.)
*/
Assert(parse_context != CurrentMemoryContext);
@@ -646,8 +652,8 @@ pg_exec_query_string(char *query_string, /* string to execute */
oldcontext = MemoryContextSwitchTo(parse_context);
/*
- * Do basic parsing of the query or queries (this should be safe
- * even if we are in aborted transaction state!)
+ * Do basic parsing of the query or queries (this should be safe even
+ * if we are in aborted transaction state!)
*/
parsetree_list = pg_parse_query(query_string, NULL, 0);
@@ -661,10 +667,10 @@ pg_exec_query_string(char *query_string, /* string to execute */
*/
foreach(parsetree_item, parsetree_list)
{
- Node *parsetree = (Node *) lfirst(parsetree_item);
- bool isTransactionStmt;
- List *querytree_list,
- *querytree_item;
+ Node *parsetree = (Node *) lfirst(parsetree_item);
+ bool isTransactionStmt;
+ List *querytree_list,
+ *querytree_item;
/* Transaction control statements need some special handling */
isTransactionStmt = IsA(parsetree, TransactionStmt);
@@ -673,13 +679,13 @@ pg_exec_query_string(char *query_string, /* string to execute */
* If we are in an aborted transaction, ignore all commands except
* COMMIT/ABORT. It is important that this test occur before we
* try to do parse analysis, rewrite, or planning, since all those
- * phases try to do database accesses, which may fail in abort state.
- * (It might be safe to allow some additional utility commands in
- * this state, but not many...)
+ * phases try to do database accesses, which may fail in abort
+ * state. (It might be safe to allow some additional utility
+ * commands in this state, but not many...)
*/
if (IsAbortedTransactionBlockState())
{
- bool allowit = false;
+ bool allowit = false;
if (isTransactionStmt)
{
@@ -696,7 +702,7 @@ pg_exec_query_string(char *query_string, /* string to execute */
}
}
- if (! allowit)
+ if (!allowit)
{
/* ----------------
* the EndCommand() stuff is to tell the frontend
@@ -720,7 +726,7 @@ pg_exec_query_string(char *query_string, /* string to execute */
}
/* Make sure we are in a transaction command */
- if (! xact_started)
+ if (!xact_started)
{
start_xact_command();
xact_started = true;
@@ -732,8 +738,8 @@ pg_exec_query_string(char *query_string, /* string to execute */
/*
* OK to analyze and rewrite this query.
*
- * Switch to appropriate context for constructing querytrees
- * (again, these must outlive the execution context).
+ * Switch to appropriate context for constructing querytrees (again,
+ * these must outlive the execution context).
*/
oldcontext = MemoryContextSwitchTo(parse_context);
@@ -753,13 +759,16 @@ pg_exec_query_string(char *query_string, /* string to execute */
Query *querytree = (Query *) lfirst(querytree_item);
/* Make sure we are in a transaction command */
- if (! xact_started)
+ if (!xact_started)
{
start_xact_command();
xact_started = true;
}
- /* If we got a cancel signal in analysis or prior command, quit */
+ /*
+ * If we got a cancel signal in analysis or prior command,
+ * quit
+ */
CHECK_FOR_INTERRUPTS();
if (querytree->commandType == CMD_UTILITY)
@@ -819,27 +828,27 @@ pg_exec_query_string(char *query_string, /* string to execute */
/*
* In a query block, we want to increment the command counter
* between queries so that the effects of early queries are
- * visible to subsequent ones. In particular we'd better
- * do so before checking constraints.
+ * visible to subsequent ones. In particular we'd better do
+ * so before checking constraints.
*/
if (!isTransactionStmt)
CommandCounterIncrement();
/*
- * Clear the execution context to recover temporary
- * memory used by the query. NOTE: if query string contains
+ * Clear the execution context to recover temporary memory
+ * used by the query. NOTE: if query string contains
* BEGIN/COMMIT transaction commands, execution context may
- * now be different from what we were originally passed;
- * so be careful to clear current context not "oldcontext".
+ * now be different from what we were originally passed; so be
+ * careful to clear current context not "oldcontext".
*/
Assert(parse_context != CurrentMemoryContext);
MemoryContextResetAndDeleteChildren(CurrentMemoryContext);
/*
- * If this was a transaction control statement, commit it
- * and arrange to start a new xact command for the next
- * command (if any).
+ * If this was a transaction control statement, commit it and
+ * arrange to start a new xact command for the next command
+ * (if any).
*/
if (isTransactionStmt)
{
@@ -847,8 +856,9 @@ pg_exec_query_string(char *query_string, /* string to execute */
xact_started = false;
}
- } /* end loop over queries generated from a parsetree */
- } /* end loop over parsetrees */
+ } /* end loop over queries generated from a
+ * parsetree */
+ } /* end loop over parsetrees */
/*
* Close down transaction statement, if one is open.
@@ -915,10 +925,10 @@ quickdie(SIGNAL_ARGS)
* corrupted, so we don't want to try to clean up our transaction.
* Just nail the windows shut and get out of town.
*
- * Note we do exit(1) not exit(0). This is to force the postmaster
- * into a system reset cycle if some idiot DBA sends a manual SIGQUIT
- * to a random backend. This is necessary precisely because we don't
- * clean up our shared memory state.
+ * Note we do exit(1) not exit(0). This is to force the postmaster into
+ * a system reset cycle if some idiot DBA sends a manual SIGQUIT to a
+ * random backend. This is necessary precisely because we don't clean
+ * up our shared memory state.
*/
exit(1);
@@ -934,13 +944,14 @@ die(SIGNAL_ARGS)
int save_errno = errno;
/* Don't joggle the elbow of proc_exit */
- if (! proc_exit_inprogress)
+ if (!proc_exit_inprogress)
{
InterruptPending = true;
ProcDiePending = true;
+
/*
- * If it's safe to interrupt, and we're waiting for input or a lock,
- * service the interrupt immediately
+ * If it's safe to interrupt, and we're waiting for input or a
+ * lock, service the interrupt immediately
*/
if (ImmediateInterruptOK && InterruptHoldoffCount == 0 &&
CritSectionCount == 0)
@@ -968,15 +979,19 @@ QueryCancelHandler(SIGNAL_ARGS)
{
int save_errno = errno;
- /* Don't joggle the elbow of proc_exit, nor an already-in-progress abort */
+ /*
+ * Don't joggle the elbow of proc_exit, nor an already-in-progress
+ * abort
+ */
if (!proc_exit_inprogress && !InError)
{
InterruptPending = true;
QueryCancelPending = true;
+
/*
* If it's safe to interrupt, and we're waiting for a lock,
- * service the interrupt immediately. No point in interrupting
- * if we're waiting for input, however.
+ * service the interrupt immediately. No point in interrupting if
+ * we're waiting for input, however.
*/
if (ImmediateInterruptOK && InterruptHoldoffCount == 0 &&
CritSectionCount == 0)
@@ -1032,14 +1047,14 @@ ProcessInterrupts(void)
if (ProcDiePending)
{
ProcDiePending = false;
- QueryCancelPending = false; /* ProcDie trumps QueryCancel */
- ImmediateInterruptOK = false; /* not idle anymore */
+ QueryCancelPending = false; /* ProcDie trumps QueryCancel */
+ ImmediateInterruptOK = false; /* not idle anymore */
elog(FATAL, "This connection has been terminated by an administrator");
}
if (QueryCancelPending)
{
QueryCancelPending = false;
- ImmediateInterruptOK = false; /* not idle anymore */
+ ImmediateInterruptOK = false; /* not idle anymore */
elog(ERROR, "Query was cancelled.");
}
/* If we get here, do nothing (probably, QueryCancelPending was reset) */
@@ -1065,7 +1080,7 @@ usage(char *progname)
printf(" -F turn fsync off\n");
printf(" -N do not use newline as interactive query delimiter\n");
printf(" -o FILENAME send stdout and stderr to given file\n");
- printf(" -P disable system indexes\n");
+ printf(" -P disable system indexes\n");
printf(" -s show statistics after each query\n");
printf(" -S SORT-MEM set amount of memory for sorts (in kbytes)\n");
printf("Developer options:\n");
@@ -1082,7 +1097,7 @@ usage(char *progname)
/* ----------------------------------------------------------------
* PostgresMain
- * postgres main loop -- all backends, interactive or otherwise start here
+ * postgres main loop -- all backends, interactive or otherwise start here
*
* argc/argv are the command line arguments to be used. When being forked
* by the postmaster, these are not the original argv array of the process.
@@ -1092,11 +1107,11 @@ usage(char *progname)
* ----------------------------------------------------------------
*/
int
-PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const char * username)
+PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const char *username)
{
int flag;
- const char *DBName = NULL;
+ const char *DBName = NULL;
bool secure = true;
int errs = 0;
@@ -1106,25 +1121,25 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
char *remote_host;
unsigned short remote_port;
- char *potential_DataDir = NULL;
+ char *potential_DataDir = NULL;
/*
- * Catch standard options before doing much else. This even works
- * on systems without getopt_long.
+ * Catch standard options before doing much else. This even works on
+ * systems without getopt_long.
*/
if (!IsUnderPostmaster && argc > 1)
{
- if (strcmp(argv[1], "--help")==0 || strcmp(argv[1], "-?")==0)
+ if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
usage(argv[0]);
exit(0);
}
- if (strcmp(argv[1], "--version")==0 || strcmp(argv[1], "-V")==0)
+ if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
{
puts("postgres (PostgreSQL) " PG_VERSION);
exit(0);
}
- }
+ }
/*
* Fire up essential subsystems: error and memory management
@@ -1174,7 +1189,7 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
optind = 1; /* reset after postmaster's usage */
- while ((flag = getopt(argc, argv, "A:B:c:CD:d:Eef:FiLNOPo:p:S:st:v:W:x:-:")) != EOF)
+ while ((flag = getopt(argc, argv, "A:B:c:CD:d:Eef:FiLNOPo:p:S:st:v:W:x:-:")) != EOF)
switch (flag)
{
case 'A':
@@ -1210,7 +1225,7 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
case 'd': /* debug level */
DebugLvl = atoi(optarg);
if (DebugLvl >= 1);
- Log_connections = true;
+ Log_connections = true;
if (DebugLvl >= 2)
Debug_print_query = true;
if (DebugLvl >= 3)
@@ -1438,26 +1453,27 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
case 'c':
case '-':
- {
- char *name, *value;
-
- ParseLongOption(optarg, &name, &value);
- if (!value)
{
- if (flag == '-')
- elog(ERROR, "--%s requires argument", optarg);
- else
- elog(ERROR, "-c %s requires argument", optarg);
- }
+ char *name,
+ *value;
- /* all options are allowed if not under postmaster */
- SetConfigOption(name, value,
- (IsUnderPostmaster) ? PGC_BACKEND : PGC_POSTMASTER);
- free(name);
- if (value)
- free(value);
- break;
- }
+ ParseLongOption(optarg, &name, &value);
+ if (!value)
+ {
+ if (flag == '-')
+ elog(ERROR, "--%s requires argument", optarg);
+ else
+ elog(ERROR, "-c %s requires argument", optarg);
+ }
+
+ /* all options are allowed if not under postmaster */
+ SetConfigOption(name, value,
+ (IsUnderPostmaster) ? PGC_BACKEND : PGC_POSTMASTER);
+ free(name);
+ if (value)
+ free(value);
+ break;
+ }
default:
errs++;
@@ -1482,9 +1498,9 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
if (!potential_DataDir)
{
fprintf(stderr, "%s does not know where to find the database system "
- "data. You must specify the directory that contains the "
- "database system either by specifying the -D invocation "
- "option or by setting the PGDATA environment variable.\n\n",
+ "data. You must specify the directory that contains the "
+ "database system either by specifying the -D invocation "
+ "option or by setting the PGDATA environment variable.\n\n",
argv[0]);
proc_exit(1);
}
@@ -1496,20 +1512,22 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
* Set up signal handlers and masks.
*
* Note that postmaster blocked all signals before forking child process,
- * so there is no race condition whereby we might receive a signal before
- * we have set up the handler.
+ * so there is no race condition whereby we might receive a signal
+ * before we have set up the handler.
*
* Also note: it's best not to use any signals that are SIG_IGNored in
- * the postmaster. If such a signal arrives before we are able to change
- * the handler to non-SIG_IGN, it'll get dropped. If necessary, make a
- * dummy handler in the postmaster to reserve the signal.
+ * the postmaster. If such a signal arrives before we are able to
+ * change the handler to non-SIG_IGN, it'll get dropped. If
+ * necessary, make a dummy handler in the postmaster to reserve the
+ * signal.
*/
pqsignal(SIGHUP, SigHupHandler); /* set flag to read config file */
- pqsignal(SIGINT, QueryCancelHandler); /* cancel current query */
+ pqsignal(SIGINT, QueryCancelHandler); /* cancel current query */
pqsignal(SIGTERM, die); /* cancel current query and exit */
- pqsignal(SIGQUIT, quickdie); /* hard crash time */
- pqsignal(SIGALRM, HandleDeadLock); /* check for deadlock after timeout */
+ pqsignal(SIGQUIT, quickdie);/* hard crash time */
+ pqsignal(SIGALRM, HandleDeadLock); /* check for deadlock after
+ * timeout */
/*
* Ignore failure to write to frontend. Note: if frontend closes
@@ -1518,13 +1536,15 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
* midst of output during who-knows-what operation...
*/
pqsignal(SIGPIPE, SIG_IGN);
- pqsignal(SIGUSR1, SIG_IGN); /* this signal available for use */
+ pqsignal(SIGUSR1, SIG_IGN); /* this signal available for use */
pqsignal(SIGUSR2, Async_NotifyHandler); /* flush also sinval cache */
pqsignal(SIGFPE, FloatExceptionHandler);
- pqsignal(SIGCHLD, SIG_IGN); /* ignored (may get this in system() calls) */
+ pqsignal(SIGCHLD, SIG_IGN); /* ignored (may get this in system()
+ * calls) */
/*
- * Reset some signals that are accepted by postmaster but not by backend
+ * Reset some signals that are accepted by postmaster but not by
+ * backend
*/
pqsignal(SIGTTIN, SIG_DFL);
pqsignal(SIGTTOU, SIG_DFL);
@@ -1549,7 +1569,8 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
if (errs || argc != optind || DBName == NULL)
{
fprintf(stderr, "%s: invalid command line arguments\nTry -? for help.\n", argv[0]);
- proc_exit(0); /* not 1, that causes system-wide restart... */
+ proc_exit(0); /* not 1, that causes system-wide
+ * restart... */
}
pq_init(); /* initialize libpq at backend startup */
whereToSendOutput = Remote;
@@ -1576,7 +1597,7 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
/*
* Create lockfile for data directory.
*/
- if (! CreateDataDirLockFile(DataDir, false))
+ if (!CreateDataDirLockFile(DataDir, false))
proc_exit(1);
XLOGPathInit();
@@ -1613,7 +1634,7 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
if (MyProcPort->raddr.sa.sa_family == AF_INET)
{
struct hostent *host_ent;
- char * host_addr;
+ char *host_addr;
remote_port = ntohs(MyProcPort->raddr.in.sin_port);
host_addr = inet_ntoa(MyProcPort->raddr.in.sin_addr);
@@ -1634,23 +1655,25 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
if (ShowPortNumber)
{
- char * str = palloc(strlen(remote_host) + 7);
+ char *str = palloc(strlen(remote_host) + 7);
+
sprintf(str, "%s:%hu", remote_host, remote_port);
pfree(remote_host);
remote_host = str;
}
}
- else /* not AF_INET */
+ else
+/* not AF_INET */
remote_host = "[local]";
/*
* Set process parameters for ps
*
- * WARNING: On some platforms the environment will be moved
- * around to make room for the ps display string. So any
- * references to optarg or getenv() from above will be invalid
- * after this call. Better use strdup or something similar.
+ * WARNING: On some platforms the environment will be moved around to
+ * make room for the ps display string. So any references to
+ * optarg or getenv() from above will be invalid after this call.
+ * Better use strdup or something similar.
*/
init_ps_display(real_argc, real_argv, username, DBName, remote_host);
set_ps_display("startup");
@@ -1692,16 +1715,16 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
if (!IsUnderPostmaster)
{
puts("\nPOSTGRES backend interactive interface ");
- puts("$Revision: 1.212 $ $Date: 2001/03/14 18:24:34 $\n");
+ puts("$Revision: 1.213 $ $Date: 2001/03/22 03:59:47 $\n");
}
/*
* Create the memory context we will use in the main loop.
*
- * QueryContext is reset once per iteration of the main loop,
- * ie, upon completion of processing of each supplied query string.
- * It can therefore be used for any data that should live just as
- * long as the query string --- parse trees, for example.
+ * QueryContext is reset once per iteration of the main loop, ie, upon
+ * completion of processing of each supplied query string. It can
+ * therefore be used for any data that should live just as long as the
+ * query string --- parse trees, for example.
*/
QueryContext = AllocSetContextCreate(TopMemoryContext,
"QueryContext",
@@ -1718,10 +1741,11 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
if (sigsetjmp(Warn_restart, 1) != 0)
{
+
/*
* NOTE: if you are tempted to add more code in this if-block,
- * consider the probability that it should be in AbortTransaction()
- * instead.
+ * consider the probability that it should be in
+ * AbortTransaction() instead.
*
* Make sure we're not interrupted while cleaning up. Also forget
* any pending QueryCancel request, since we're aborting anyway.
@@ -1776,9 +1800,10 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
for (;;)
{
+
/*
- * Release storage left over from prior query cycle, and
- * create a new query input buffer in the cleared QueryContext.
+ * Release storage left over from prior query cycle, and create a
+ * new query input buffer in the cleared QueryContext.
*/
MemoryContextSwitchTo(QueryContext);
MemoryContextResetAndDeleteChildren(QueryContext);
@@ -1804,7 +1829,8 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
* Then set up other stuff needed before blocking for input.
* ----------------
*/
- QueryCancelPending = false; /* forget any earlier CANCEL signal */
+ QueryCancelPending = false; /* forget any earlier CANCEL
+ * signal */
EnableNotifyInterrupt();
@@ -1825,7 +1851,7 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
* ----------------
*/
ImmediateInterruptOK = false;
- QueryCancelPending = false; /* forget any CANCEL signal */
+ QueryCancelPending = false; /* forget any CANCEL signal */
DisableNotifyInterrupt();
@@ -1912,10 +1938,11 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
*/
case 'X':
case EOF:
+
/*
* NOTE: if you are tempted to add more code here, DON'T!
- * Whatever you had in mind to do should be set up as
- * an on_proc_exit or on_shmem_exit callback, instead.
+ * Whatever you had in mind to do should be set up as an
+ * on_proc_exit or on_shmem_exit callback, instead.
* Otherwise it will fail to be called during other
* backend-shutdown scenarios.
*/
@@ -1926,11 +1953,12 @@ PostgresMain(int argc, char *argv[], int real_argc, char *real_argv[], const cha
}
#ifdef MEMORY_CONTEXT_CHECKING
+
/*
* Check all memory after each backend loop. This is a rather
* weird place to do it, perhaps.
*/
- MemoryContextCheck(TopMemoryContext);
+ MemoryContextCheck(TopMemoryContext);
#endif
} /* end of input-reading loop */
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index b36b9f6510c..2dfc5a9ff1f 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/pquery.c,v 1.42 2001/02/27 22:07:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/pquery.c,v 1.43 2001/03/22 03:59:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -144,7 +144,11 @@ PreparePortal(char *portalName)
portal = GetPortalByName(portalName);
if (PortalIsValid(portal))
{
- /* XXX Should we raise an error rather than closing the old portal? */
+
+ /*
+ * XXX Should we raise an error rather than closing the old
+ * portal?
+ */
elog(NOTICE, "Closing pre-existing portal \"%s\"",
portalName);
PortalDrop(&portal);
@@ -226,6 +230,7 @@ ProcessQuery(Query *parsetree,
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
parsetree = copyObject(parsetree);
plan = copyObject(plan);
+
/*
* We stay in portal's memory context for now, so that query desc,
* EState, and plan startup info are also allocated in the portal
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 65b6baf535e..370d2d3bac7 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/utility.c,v 1.107 2001/01/27 10:19:52 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/utility.c,v 1.108 2001/03/22 03:59:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,24 +52,25 @@
* Error-checking support for DROP commands
*/
-struct kindstrings {
- char kind;
- char *indef_article;
- char *name;
- char *command;
+struct kindstrings
+{
+ char kind;
+ char *indef_article;
+ char *name;
+ char *command;
};
static struct kindstrings kindstringarray[] = {
- { RELKIND_RELATION, "a", "table", "TABLE" },
- { RELKIND_SEQUENCE, "a", "sequence", "SEQUENCE" },
- { RELKIND_VIEW, "a", "view", "VIEW" },
- { RELKIND_INDEX, "an", "index", "INDEX" },
- { '\0', "a", "???", "???" }
+ {RELKIND_RELATION, "a", "table", "TABLE"},
+ {RELKIND_SEQUENCE, "a", "sequence", "SEQUENCE"},
+ {RELKIND_VIEW, "a", "view", "VIEW"},
+ {RELKIND_INDEX, "an", "index", "INDEX"},
+ {'\0', "a", "???", "???"}
};
static void
-DropErrorMsg(char* relname, char wrongkind, char rightkind)
+DropErrorMsg(char *relname, char wrongkind, char rightkind)
{
struct kindstrings *rentry;
struct kindstrings *wentry;
@@ -218,12 +219,12 @@ ProcessUtility(Node *parsetree,
DefineRelation((CreateStmt *) parsetree, RELKIND_RELATION);
/*
- * Let AlterTableCreateToastTable decide if this
- * one needs a secondary relation too.
+ * Let AlterTableCreateToastTable decide if this one needs a
+ * secondary relation too.
*/
CommandCounterIncrement();
- AlterTableCreateToastTable(((CreateStmt *)parsetree)->relname,
- true);
+ AlterTableCreateToastTable(((CreateStmt *) parsetree)->relname,
+ true);
break;
case T_DropStmt:
@@ -238,7 +239,7 @@ ProcessUtility(Node *parsetree,
{
relname = strVal(lfirst(arg));
- switch(stmt->removeType)
+ switch (stmt->removeType)
{
case DROP_TABLE:
CheckDropPermissions(relname, RELKIND_RELATION);
@@ -268,8 +269,8 @@ ProcessUtility(Node *parsetree,
relationName = RewriteGetRuleEventRel(rulename);
aclcheck_result = pg_aclcheck(relationName, GetUserId(), ACL_RU);
if (aclcheck_result != ACLCHECK_OK)
- elog(ERROR, "%s: %s", relationName,
- aclcheck_error_strings[aclcheck_result]);
+ elog(ERROR, "%s: %s", relationName,
+ aclcheck_error_strings[aclcheck_result]);
RemoveRewriteRule(rulename);
}
break;
@@ -281,9 +282,9 @@ ProcessUtility(Node *parsetree,
}
/*
- * Make sure subsequent loop iterations will see results
- * of this one; needed if removing multiple rules for
- * same table, for example.
+ * Make sure subsequent loop iterations will see
+ * results of this one; needed if removing multiple
+ * rules for same table, for example.
*/
CommandCounterIncrement();
}
@@ -402,7 +403,7 @@ ProcessUtility(Node *parsetree,
renameatt(relname, /* relname */
stmt->column, /* old att name */
stmt->newname, /* new att name */
- interpretInhOption(stmt->inhOpt)); /* recursive? */
+ interpretInhOption(stmt->inhOpt)); /* recursive? */
}
}
break;
@@ -423,29 +424,29 @@ ProcessUtility(Node *parsetree,
{
case 'A': /* ADD COLUMN */
AlterTableAddColumn(stmt->relname,
- interpretInhOption(stmt->inhOpt),
+ interpretInhOption(stmt->inhOpt),
(ColumnDef *) stmt->def);
break;
case 'T': /* ALTER COLUMN */
AlterTableAlterColumn(stmt->relname,
- interpretInhOption(stmt->inhOpt),
+ interpretInhOption(stmt->inhOpt),
stmt->name,
stmt->def);
break;
case 'D': /* ALTER DROP */
AlterTableDropColumn(stmt->relname,
- interpretInhOption(stmt->inhOpt),
+ interpretInhOption(stmt->inhOpt),
stmt->name,
stmt->behavior);
break;
case 'C': /* ADD CONSTRAINT */
AlterTableAddConstraint(stmt->relname,
- interpretInhOption(stmt->inhOpt),
+ interpretInhOption(stmt->inhOpt),
stmt->def);
break;
case 'X': /* DROP CONSTRAINT */
AlterTableDropConstraint(stmt->relname,
- interpretInhOption(stmt->inhOpt),
+ interpretInhOption(stmt->inhOpt),
stmt->name,
stmt->behavior);
break;
@@ -872,10 +873,10 @@ ProcessUtility(Node *parsetree,
{
if (!allowSystemTableMods && IsSystemRelationName(relname))
elog(ERROR, "\"%s\" is a system index. call REINDEX under standalone postgres with -O -P options",
- relname);
+ relname);
if (!IsIgnoringSystemIndexes())
elog(ERROR, "\"%s\" is a system index. call REINDEX under standalone postgres with -P -O options",
- relname);
+ relname);
}
if (!pg_ownercheck(GetUserId(), relname, RELNAME))
elog(ERROR, "%s: %s", relname, aclcheck_error_strings[ACLCHECK_NOT_OWNER]);
@@ -888,12 +889,12 @@ ProcessUtility(Node *parsetree,
#ifdef OLD_FILE_NAMING
if (!allowSystemTableMods && IsSystemRelationName(relname))
elog(ERROR, "\"%s\" is a system table. call REINDEX under standalone postgres with -O -P options",
- relname);
+ relname);
if (!IsIgnoringSystemIndexes())
elog(ERROR, "\"%s\" is a system table. call REINDEX under standalone postgres with -P -O options",
- relname);
-#endif /* OLD_FILE_NAMING */
+ relname);
+#endif /* OLD_FILE_NAMING */
}
if (!pg_ownercheck(GetUserId(), relname, RELNAME))
elog(ERROR, "%s: %s", relname, aclcheck_error_strings[ACLCHECK_NOT_OWNER]);
diff --git a/src/backend/tioga/tgRecipe.c b/src/backend/tioga/tgRecipe.c
index d9100c5f10a..0a2dca7dd8a 100644
--- a/src/backend/tioga/tgRecipe.c
+++ b/src/backend/tioga/tgRecipe.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tioga/Attic/tgRecipe.c,v 1.18 2001/01/24 19:43:12 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tioga/Attic/tgRecipe.c,v 1.19 2001/03/22 03:59:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,9 +44,9 @@ static Arr_TgString *TextArray2ArrTgString(char *str);
"select * from Edge e where e.belongsTo = '%s'"
/* static functions only used here */
-static void fillTgElement(TgElement * elem, PortalBuffer *pbuf, int tupno);
-static void fillTgNode(TgRecipe * r, TgNode * node, PortalBuffer *pbuf, int tupno);
-static TgRecipe *fillTgRecipe(PortalBuffer *pbuf, int tupno);
+static void fillTgElement(TgElement * elem, PortalBuffer * pbuf, int tupno);
+static void fillTgNode(TgRecipe * r, TgNode * node, PortalBuffer * pbuf, int tupno);
+static TgRecipe *fillTgRecipe(PortalBuffer * pbuf, int tupno);
static void lookupEdges(TgRecipe * r, char *name);
static void fillAllNodes(TgRecipe * r, char *name);
static void fillAllElements(TgRecipe * r, char *name);
@@ -173,7 +173,7 @@ fillTgNode
-------------------------------------- */
void
-fillTgNode(TgRecipe * r, TgNode * node, PortalBuffer *pbuf, int tupno)
+fillTgNode(TgRecipe * r, TgNode * node, PortalBuffer * pbuf, int tupno)
{
char *nodeType;
char *nodeElem;
@@ -258,7 +258,7 @@ fillTgElement
------------------------------------ */
void
-fillTgElement(TgElement * elem, PortalBuffer *pbuf, int tupno)
+fillTgElement(TgElement * elem, PortalBuffer * pbuf, int tupno)
{
char *srcLang,
*elemType;
@@ -618,7 +618,7 @@ fillTgRecipe
and converts it to a C TgRecipe strcture
------------------------------------ */
TgRecipe *
-fillTgRecipe(PortalBuffer *pbuf, int tupno)
+fillTgRecipe(PortalBuffer * pbuf, int tupno)
{
TgRecipe *r;
int i,
diff --git a/src/backend/tioga/tgRecipe.h b/src/backend/tioga/tgRecipe.h
index c52265d6051..5b190f17254 100644
--- a/src/backend/tioga/tgRecipe.h
+++ b/src/backend/tioga/tgRecipe.h
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: tgRecipe.h,v 1.15 2001/01/24 19:43:12 momjian Exp $
+ * $Id: tgRecipe.h,v 1.16 2001/03/22 03:59:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,6 +40,7 @@ typedef struct
*
*
*
+ *
* geo-decls.h */
#endif /* TIOGA_FRONTEND */
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 5a3b99f64a4..10e2f13bc32 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/acl.c,v 1.57 2001/01/24 19:43:12 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/acl.c,v 1.58 2001/03/22 03:59:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -262,7 +262,7 @@ aclitemin(PG_FUNCTION_ARGS)
Datum
aclitemout(PG_FUNCTION_ARGS)
{
- AclItem *aip = PG_GETARG_ACLITEM_P(0);
+ AclItem *aip = PG_GETARG_ACLITEM_P(0);
char *p;
char *out;
HeapTuple htup;
@@ -281,7 +281,7 @@ aclitemout(PG_FUNCTION_ARGS)
if (HeapTupleIsValid(htup))
{
strncat(p,
- NameStr(((Form_pg_shadow) GETSTRUCT(htup))->usename),
+ NameStr(((Form_pg_shadow) GETSTRUCT(htup))->usename),
NAMEDATALEN);
ReleaseSysCache(htup);
}
@@ -291,7 +291,7 @@ aclitemout(PG_FUNCTION_ARGS)
char *tmp;
tmp = DatumGetCString(DirectFunctionCall1(int4out,
- Int32GetDatum((int32) aip->ai_id)));
+ Int32GetDatum((int32) aip->ai_id)));
strcat(p, tmp);
pfree(tmp);
}
@@ -307,7 +307,7 @@ aclitemout(PG_FUNCTION_ARGS)
char *tmp;
tmp = DatumGetCString(DirectFunctionCall1(int4out,
- Int32GetDatum((int32) aip->ai_id)));
+ Int32GetDatum((int32) aip->ai_id)));
strcat(p, tmp);
pfree(tmp);
}
@@ -473,9 +473,9 @@ aclinsert3(Acl *old_acl, AclItem *mod_aip, unsigned modechg)
}
/*
- * if the adjusted entry has no permissions, delete it from the
- * list. For example, this helps in removing entries for users who no
- * longer exist. EXCEPTION: never remove the world entry.
+ * if the adjusted entry has no permissions, delete it from the list.
+ * For example, this helps in removing entries for users who no longer
+ * exist. EXCEPTION: never remove the world entry.
*/
if (new_aip[dst].ai_mode == 0 && dst > 0)
{
@@ -502,7 +502,7 @@ Datum
aclinsert(PG_FUNCTION_ARGS)
{
Acl *old_acl = PG_GETARG_ACL_P(0);
- AclItem *mod_aip = PG_GETARG_ACLITEM_P(1);
+ AclItem *mod_aip = PG_GETARG_ACLITEM_P(1);
PG_RETURN_ACL_P(aclinsert3(old_acl, mod_aip, ACL_MODECHG_EQL));
}
@@ -511,7 +511,7 @@ Datum
aclremove(PG_FUNCTION_ARGS)
{
Acl *old_acl = PG_GETARG_ACL_P(0);
- AclItem *mod_aip = PG_GETARG_ACLITEM_P(1);
+ AclItem *mod_aip = PG_GETARG_ACLITEM_P(1);
Acl *new_acl;
AclItem *old_aip,
*new_aip;
@@ -575,7 +575,7 @@ Datum
aclcontains(PG_FUNCTION_ARGS)
{
Acl *acl = PG_GETARG_ACL_P(0);
- AclItem *aip = PG_GETARG_ACLITEM_P(1);
+ AclItem *aip = PG_GETARG_ACLITEM_P(1);
AclItem *aidat;
int i,
num;
@@ -599,7 +599,7 @@ aclcontains(PG_FUNCTION_ARGS)
void
ExecuteChangeACLStmt(ChangeACLStmt *stmt)
{
- AclItem aclitem;
+ AclItem aclitem;
unsigned modechg;
List *i;
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index d76fe6d64fe..84c674071c1 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.69 2001/01/24 19:43:12 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.70 2001/03/22 03:59:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,29 +60,29 @@
static int ArrayCount(char *str, int *dim, int typdelim);
static Datum *ReadArrayStr(char *arrayStr, int nitems, int ndim, int *dim,
- FmgrInfo *inputproc, Oid typelem, int32 typmod,
- char typdelim, int typlen, bool typbyval,
- char typalign, int *nbytes);
+ FmgrInfo *inputproc, Oid typelem, int32 typmod,
+ char typdelim, int typlen, bool typbyval,
+ char typalign, int *nbytes);
static void CopyArrayEls(char *p, Datum *values, int nitems,
- bool typbyval, int typlen, char typalign,
- bool freedata);
+ bool typbyval, int typlen, char typalign,
+ bool freedata);
static void system_cache_lookup(Oid element_type, bool input, int *typlen,
- bool *typbyval, char *typdelim, Oid *typelem,
- Oid *proc, char *typalign);
+ bool *typbyval, char *typdelim, Oid *typelem,
+ Oid *proc, char *typalign);
static Datum ArrayCast(char *value, bool byval, int len);
static int ArrayCastAndSet(Datum src, bool typbyval, int typlen, char *dest);
static int array_nelems_size(char *ptr, int eltsize, int nitems);
static char *array_seek(char *ptr, int eltsize, int nitems);
static int array_copy(char *destptr, int eltsize, int nitems, char *srcptr);
-static int array_slice_size(int ndim, int *dim, int *lb, char *arraydataptr,
- int eltsize, int *st, int *endp);
+static int array_slice_size(int ndim, int *dim, int *lb, char *arraydataptr,
+ int eltsize, int *st, int *endp);
static void array_extract_slice(int ndim, int *dim, int *lb,
- char *arraydataptr, int eltsize,
- int *st, int *endp, char *destPtr);
+ char *arraydataptr, int eltsize,
+ int *st, int *endp, char *destPtr);
static void array_insert_slice(int ndim, int *dim, int *lb,
- char *origPtr, int origdatasize,
- char *destPtr, int eltsize,
- int *st, int *endp, char *srcPtr);
+ char *origPtr, int origdatasize,
+ char *destPtr, int eltsize,
+ int *st, int *endp, char *srcPtr);
/*---------------------------------------------------------------------
@@ -96,9 +96,11 @@ static void array_insert_slice(int ndim, int *dim, int *lb,
Datum
array_in(PG_FUNCTION_ARGS)
{
- char *string = PG_GETARG_CSTRING(0); /* external form */
- Oid element_type = PG_GETARG_OID(1); /* type of an array element */
- int32 typmod = PG_GETARG_INT32(2); /* typmod for array elements */
+ char *string = PG_GETARG_CSTRING(0); /* external form */
+ Oid element_type = PG_GETARG_OID(1); /* type of an array
+ * element */
+ int32 typmod = PG_GETARG_INT32(2); /* typmod for array
+ * elements */
int typlen;
bool typbyval;
char typdelim;
@@ -132,8 +134,8 @@ array_in(PG_FUNCTION_ARGS)
* Otherwise, we require the input to be in curly-brace style, and we
* prescan the input to determine dimensions.
*
- * Dimension info takes the form of one or more [n] or [m:n] items.
- * The outer loop iterates once per dimension item.
+ * Dimension info takes the form of one or more [n] or [m:n] items. The
+ * outer loop iterates once per dimension item.
*/
p = string_save;
ndim = 0;
@@ -450,6 +452,7 @@ ReadArrayStr(char *arrayStr,
ObjectIdGetDatum(typelem),
Int32GetDatum(typmod));
p = ++q;
+
/*
* if not at the end of the array skip white space
*/
@@ -460,6 +463,7 @@ ReadArrayStr(char *arrayStr,
q++;
}
}
+
/*
* Initialize any unset items and compute total data space needed
*/
@@ -511,7 +515,7 @@ ReadArrayStr(char *arrayStr,
* referenced by Datums after copying them.
*
* If the input data is of varlena type, the caller must have ensured that
- * the values are not toasted. (Doing it here doesn't work since the
+ * the values are not toasted. (Doing it here doesn't work since the
* caller has already allocated space for the array...)
*----------
*/
@@ -601,7 +605,7 @@ array_out(PG_FUNCTION_ARGS)
itemvalue = fetch_att(p, typbyval, typlen);
values[i] = DatumGetCString(FunctionCall3(&outputproc,
itemvalue,
- ObjectIdGetDatum(typelem),
+ ObjectIdGetDatum(typelem),
Int32GetDatum(-1)));
if (typlen > 0)
p += typlen;
@@ -706,6 +710,7 @@ array_dims(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
nbytes = ARR_NDIM(v) * 33 + 1;
+
/*
* 33 since we assume 15 digits per number + ':' +'[]'
*
@@ -759,6 +764,7 @@ array_ref(ArrayType *array,
if (arraylen > 0)
{
+
/*
* fixed-length arrays -- these are assumed to be 1-d, 0-based
*/
@@ -809,7 +815,7 @@ array_ref(ArrayType *array,
* lowerIndx[] and upperIndx[]. These are generally just temporaries.
*-----------------------------------------------------------------------------
*/
-ArrayType *
+ArrayType *
array_get_slice(ArrayType *array,
int nSubscripts,
int *upperIndx,
@@ -835,6 +841,7 @@ array_get_slice(ArrayType *array,
if (arraylen > 0)
{
+
/*
* fixed-length arrays -- currently, cannot slice these because
* parser labels output as being of the fixed-length array type!
@@ -866,8 +873,9 @@ array_get_slice(ArrayType *array,
/*
* Check provided subscripts. A slice exceeding the current array
- * limits is silently truncated to the array limits. If we end up with
- * an empty slice, return NULL (should it be an empty array instead?)
+ * limits is silently truncated to the array limits. If we end up
+ * with an empty slice, return NULL (should it be an empty array
+ * instead?)
*/
if (ndim != nSubscripts || ndim <= 0 || ndim > MAXDIM)
RETURN_NULL(ArrayType *);
@@ -918,7 +926,7 @@ array_get_slice(ArrayType *array,
* with NULL, which will probably not make him happy.
*-----------------------------------------------------------------------------
*/
-ArrayType *
+ArrayType *
array_set(ArrayType *array,
int nSubscripts,
int *indx,
@@ -950,9 +958,10 @@ array_set(ArrayType *array,
if (arraylen > 0)
{
+
/*
- * fixed-length arrays -- these are assumed to be 1-d, 0-based.
- * We cannot extend them, either.
+ * fixed-length arrays -- these are assumed to be 1-d, 0-based. We
+ * cannot extend them, either.
*/
if (nSubscripts != 1)
elog(ERROR, "Invalid array subscripts");
@@ -994,9 +1003,7 @@ array_set(ArrayType *array,
extendbefore = true;
}
else
- {
elog(ERROR, "Invalid array subscripts");
- }
}
if (indx[i] >= (dim[i] + lb[i]))
{
@@ -1006,9 +1013,7 @@ array_set(ArrayType *array,
extendafter = true;
}
else
- {
elog(ERROR, "Invalid array subscripts");
- }
}
}
@@ -1085,7 +1090,7 @@ array_set(ArrayType *array,
* with NULL, which will probably not make him happy.
*----------------------------------------------------------------------------
*/
-ArrayType *
+ArrayType *
array_set_slice(ArrayType *array,
int nSubscripts,
int *upperIndx,
@@ -1118,6 +1123,7 @@ array_set_slice(ArrayType *array,
if (arraylen > 0)
{
+
/*
* fixed-length arrays -- not got round to doing this...
*/
@@ -1141,8 +1147,8 @@ array_set_slice(ArrayType *array,
/*
* Check provided subscripts. A slice exceeding the current array
* limits throws an error, *except* in the 1-D case where we will
- * extend the array as long as no hole is created.
- * An empty slice is an error, too.
+ * extend the array as long as no hole is created. An empty slice is
+ * an error, too.
*/
for (i = 0; i < ndim; i++)
{
@@ -1156,26 +1162,20 @@ array_set_slice(ArrayType *array,
lb[i] = lowerIndx[i];
}
else
- {
elog(ERROR, "Invalid array subscripts");
- }
}
if (upperIndx[i] >= (dim[i] + lb[i]))
{
if (ndim == 1 && lowerIndx[i] <= (dim[i] + lb[i]))
- {
dim[i] = upperIndx[i] - lb[i] + 1;
- }
else
- {
elog(ERROR, "Invalid array subscripts");
- }
}
}
/*
- * Make sure source array has enough entries. Note we ignore the shape
- * of the source array and just read entries serially.
+ * Make sure source array has enough entries. Note we ignore the
+ * shape of the source array and just read entries serially.
*/
mda_get_range(ndim, span, lowerIndx, upperIndx);
nsrcitems = ArrayGetNItems(ndim, span);
@@ -1192,24 +1192,27 @@ array_set_slice(ArrayType *array,
olddatasize = ARR_SIZE(array) - overheadlen;
if (ndim > 1)
{
+
/*
- * here we do not need to cope with extension of the array;
- * it would be a lot more complicated if we had to do so...
+ * here we do not need to cope with extension of the array; it
+ * would be a lot more complicated if we had to do so...
*/
olditemsize = array_slice_size(ndim, dim, lb, ARR_DATA_PTR(array),
elmlen, lowerIndx, upperIndx);
- lenbefore = lenafter = 0; /* keep compiler quiet */
+ lenbefore = lenafter = 0; /* keep compiler quiet */
}
else
{
+
/*
- * here we must allow for possibility of slice larger than orig array
+ * here we must allow for possibility of slice larger than orig
+ * array
*/
- int oldlb = ARR_LBOUND(array)[0];
- int oldub = oldlb + ARR_DIMS(array)[0] - 1;
- int slicelb = MAX(oldlb, lowerIndx[0]);
- int sliceub = MIN(oldub, upperIndx[0]);
- char *oldarraydata = ARR_DATA_PTR(array);
+ int oldlb = ARR_LBOUND(array)[0];
+ int oldub = oldlb + ARR_DIMS(array)[0] - 1;
+ int slicelb = MAX(oldlb, lowerIndx[0]);
+ int sliceub = MIN(oldub, upperIndx[0]);
+ char *oldarraydata = ARR_DATA_PTR(array);
lenbefore = array_nelems_size(oldarraydata,
elmlen,
@@ -1234,9 +1237,10 @@ array_set_slice(ArrayType *array,
if (ndim > 1)
{
+
/*
- * here we do not need to cope with extension of the array;
- * it would be a lot more complicated if we had to do so...
+ * here we do not need to cope with extension of the array; it
+ * would be a lot more complicated if we had to do so...
*/
array_insert_slice(ndim, dim, lb, ARR_DATA_PTR(array), olddatasize,
ARR_DATA_PTR(newarray), elmlen,
@@ -1261,7 +1265,7 @@ array_set_slice(ArrayType *array,
/*
* array_map()
*
- * Map an array through an arbitrary function. Return a new array with
+ * Map an array through an arbitrary function. Return a new array with
* same dimensions and each source element transformed by fn(). Each
* source element is passed as the first argument to fn(); additional
* arguments to be passed to fn() can be specified by the caller.
@@ -1269,15 +1273,15 @@ array_set_slice(ArrayType *array,
*
* Parameters are:
* * fcinfo: a function-call data structure pre-constructed by the caller
- * to be ready to call the desired function, with everything except the
- * first argument position filled in. In particular, flinfo identifies
- * the function fn(), and if nargs > 1 then argument positions after the
- * first must be preset to the additional values to be passed. The
- * first argument position initially holds the input array value.
+ * to be ready to call the desired function, with everything except the
+ * first argument position filled in. In particular, flinfo identifies
+ * the function fn(), and if nargs > 1 then argument positions after the
+ * first must be preset to the additional values to be passed. The
+ * first argument position initially holds the input array value.
* * inpType: OID of element type of input array. This must be the same as,
- * or binary-compatible with, the first argument type of fn().
- * * retType: OID of element type of output array. This must be the same as,
- * or binary-compatible with, the result type of fn().
+ * or binary-compatible with, the first argument type of fn().
+ * * retType: OID of element type of output array. This must be the same as,
+ * or binary-compatible with, the result type of fn().
*
* NB: caller must assure that input array is not NULL. Currently,
* any additional parameters passed to fn() may not be specified as NULL
@@ -1344,9 +1348,9 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType)
/*
* Apply the given function to source elt and extra args.
*
- * We assume the extra args are non-NULL, so need not check
- * whether fn() is strict. Would need to do more work here
- * to support arrays containing nulls, too.
+ * We assume the extra args are non-NULL, so need not check whether
+ * fn() is strict. Would need to do more work here to support
+ * arrays containing nulls, too.
*/
fcinfo->arg[0] = elt;
fcinfo->argnull[0] = false;
@@ -1374,7 +1378,10 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType)
result->ndim = ndim;
memcpy(ARR_DIMS(result), ARR_DIMS(v), 2 * ndim * sizeof(int));
- /* Note: do not risk trying to pfree the results of the called function */
+ /*
+ * Note: do not risk trying to pfree the results of the called
+ * function
+ */
CopyArrayEls(ARR_DATA_PTR(result), values, nitems,
typbyval, typlen, typalign, false);
pfree(values);
@@ -1383,7 +1390,7 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType)
}
/*----------
- * construct_array --- simple method for constructing an array object
+ * construct_array --- simple method for constructing an array object
*
* elems: array of Datum items to become the array contents
* nelems: number of items
@@ -1394,7 +1401,7 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType)
* NULL element values are not supported.
*----------
*/
-ArrayType *
+ArrayType *
construct_array(Datum *elems, int nelems,
bool elmbyval, int elmlen, char elmalign)
{
diff --git a/src/backend/utils/adt/ascii.c b/src/backend/utils/adt/ascii.c
index ddc365f2004..bdcd24e44e0 100644
--- a/src/backend/utils/adt/ascii.c
+++ b/src/backend/utils/adt/ascii.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* ascii.c
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/ascii.c,v 1.6 2001/01/24 19:43:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/ascii.c,v 1.7 2001/03/22 03:59:49 momjian Exp $
*
* Portions Copyright (c) 1999-2000, PostgreSQL Global Development Group
*
@@ -33,21 +33,21 @@ multibyte_error(void)
elog(ERROR, "Multi-byte support is not enabled");
}
-Datum
+Datum
to_ascii_encname(PG_FUNCTION_ARGS)
{
multibyte_error();
return 0; /* keep compiler quiet */
}
-Datum
+Datum
to_ascii_enc(PG_FUNCTION_ARGS)
{
multibyte_error();
return 0; /* keep compiler quiet */
}
-Datum
+Datum
to_ascii_default(PG_FUNCTION_ARGS)
{
multibyte_error();
@@ -55,29 +55,29 @@ to_ascii_default(PG_FUNCTION_ARGS)
}
-#else /* with MULTIBYTE */
+#else /* with MULTIBYTE */
static text *encode_to_ascii(text *data, int enc);
/* ----------
- * to_ascii
+ * to_ascii
* ----------
*/
char *
pg_to_ascii(unsigned char *src, unsigned char *src_end, unsigned char *desc, int enc)
{
- unsigned char *x = NULL;
- unsigned char *ascii = NULL ;
- int range = 0;
-
- /*
- * relevant start for an encoding
- */
- #define RANGE_128 128
- #define RANGE_160 160
-
-
+ unsigned char *x = NULL;
+ unsigned char *ascii = NULL;
+ int range = 0;
+
+ /*
+ * relevant start for an encoding
+ */
+#define RANGE_128 128
+#define RANGE_160 160
+
+
if (enc == LATIN1)
{
/* ----------
@@ -107,24 +107,24 @@ pg_to_ascii(unsigned char *src, unsigned char *src_end, unsigned char *desc, int
}
else
{
- elog(ERROR, "pg_to_ascii(): unsupported encoding from %s",
- pg_encoding_to_char(enc));
+ elog(ERROR, "pg_to_ascii(): unsupported encoding from %s",
+ pg_encoding_to_char(enc));
}
-
+
/* ----------
* Encode
* ----------
*/
for (x = src; x <= src_end; x++)
{
- if (*x < 128)
+ if (*x < 128)
*desc++ = *x;
else if (*x < range)
- *desc++ = ' '; /* bogus 128 to 'range' */
+ *desc++ = ' '; /* bogus 128 to 'range' */
else
- *desc++ = ascii[*x - range];
- }
-
+ *desc++ = ascii[*x - range];
+ }
+
return desc;
}
@@ -136,11 +136,11 @@ static text *
encode_to_ascii(text *data, int enc)
{
pg_to_ascii(
- (unsigned char *) VARDATA(data), /* src */
- VARDATA(data) + VARSIZE(data), /* src end */
- (unsigned char *) VARDATA(data), /* desc */
- enc); /* encoding */
-
+ (unsigned char *) VARDATA(data), /* src */
+ VARDATA(data) + VARSIZE(data), /* src end */
+ (unsigned char *) VARDATA(data), /* desc */
+ enc); /* encoding */
+
return data;
}
@@ -152,30 +152,30 @@ Datum
to_ascii_encname(PG_FUNCTION_ARGS)
{
PG_RETURN_TEXT_P
- (
- encode_to_ascii
- (
- PG_GETARG_TEXT_P_COPY(0),
- pg_char_to_encoding( NameStr(*PG_GETARG_NAME(1)) )
- )
- );
+ (
+ encode_to_ascii
+ (
+ PG_GETARG_TEXT_P_COPY(0),
+ pg_char_to_encoding(NameStr(*PG_GETARG_NAME(1)))
+ )
+ );
}
/* ----------
* convert to ASCII - enc is set as int4
* ----------
*/
-Datum
+Datum
to_ascii_enc(PG_FUNCTION_ARGS)
{
PG_RETURN_TEXT_P
- (
- encode_to_ascii
- (
- PG_GETARG_TEXT_P_COPY(0),
- PG_GETARG_INT32(1)
- )
- );
+ (
+ encode_to_ascii
+ (
+ PG_GETARG_TEXT_P_COPY(0),
+ PG_GETARG_INT32(1)
+ )
+ );
}
/* ----------
@@ -185,14 +185,14 @@ to_ascii_enc(PG_FUNCTION_ARGS)
Datum
to_ascii_default(PG_FUNCTION_ARGS)
{
- PG_RETURN_TEXT_P
- (
- encode_to_ascii
- (
- PG_GETARG_TEXT_P_COPY(0),
- GetDatabaseEncoding()
- )
+ PG_RETURN_TEXT_P
+ (
+ encode_to_ascii
+ (
+ PG_GETARG_TEXT_P_COPY(0),
+ GetDatabaseEncoding()
+ )
);
}
-#endif /* MULTIBYTE */
+#endif /* MULTIBYTE */
diff --git a/src/backend/utils/adt/bool.c b/src/backend/utils/adt/bool.c
index 4f702b24595..dba0fe61492 100644
--- a/src/backend/utils/adt/bool.c
+++ b/src/backend/utils/adt/bool.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/bool.c,v 1.24 2001/01/24 19:43:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/bool.c,v 1.25 2001/03/22 03:59:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,8 +36,8 @@ boolin(PG_FUNCTION_ARGS)
switch (*b)
{
- case 't':
- case 'T':
+ case 't':
+ case 'T':
if (strncasecmp(b, "true", strlen(b)) == 0)
PG_RETURN_BOOL(true);
break;
@@ -187,7 +187,7 @@ isfalse(PG_FUNCTION_ARGS)
b = PG_GETARG_BOOL(0);
- PG_RETURN_BOOL(! b);
+ PG_RETURN_BOOL(!b);
}
Datum
@@ -200,7 +200,7 @@ isnottrue(PG_FUNCTION_ARGS)
b = PG_GETARG_BOOL(0);
- PG_RETURN_BOOL(! b);
+ PG_RETURN_BOOL(!b);
}
Datum
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c
index 7a3a5c11be9..dbcf881bf6c 100644
--- a/src/backend/utils/adt/cash.c
+++ b/src/backend/utils/adt/cash.c
@@ -9,7 +9,7 @@
* workings can be found in the book "Software Solutions in C" by
* Dale Schumacher, Academic Press, ISBN: 0-12-632360-7.
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/cash.c,v 1.49 2000/12/03 20:45:35 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/cash.c,v 1.50 2001/03/22 03:59:49 momjian Exp $
*/
#include "postgres.h"
@@ -82,20 +82,23 @@ cash_in(PG_FUNCTION_ARGS)
ssymbol,
psymbol,
*nsymbol;
+
#ifdef USE_LOCALE
struct lconv *lconvert = PGLC_localeconv();
+
#endif
#ifdef USE_LOCALE
+
/*
* frac_digits will be CHAR_MAX in some locales, notably C. However,
* just testing for == CHAR_MAX is risky, because of compilers like
* gcc that "helpfully" let you alter the platform-standard definition
* of whether char is signed or not. If we are so unfortunate as to
* get compiled with a nonstandard -fsigned-char or -funsigned-char
- * switch, then our idea of CHAR_MAX will not agree with libc's.
- * The safest course is not to test for CHAR_MAX at all, but to impose
- * a range check for plausible frac_digits values.
+ * switch, then our idea of CHAR_MAX will not agree with libc's. The
+ * safest course is not to test for CHAR_MAX at all, but to impose a
+ * range check for plausible frac_digits values.
*/
fpoint = lconvert->frac_digits;
if (fpoint < 0 || fpoint > 10)
@@ -238,8 +241,10 @@ cash_out(PG_FUNCTION_ARGS)
dsymbol,
*nsymbol;
char convention;
+
#ifdef USE_LOCALE
struct lconv *lconvert = PGLC_localeconv();
+
#endif
#ifdef USE_LOCALE
@@ -249,8 +254,8 @@ cash_out(PG_FUNCTION_ARGS)
points = 2; /* best guess in this case, I think */
/*
- * As with frac_digits, must apply a range check to mon_grouping
- * to avoid being fooled by variant CHAR_MAX values.
+ * As with frac_digits, must apply a range check to mon_grouping to
+ * avoid being fooled by variant CHAR_MAX values.
*/
mon_group = *lconvert->mon_grouping;
if (mon_group <= 0 || mon_group > 6)
@@ -680,7 +685,7 @@ cash_words(PG_FUNCTION_ARGS)
buf[0] = '\0';
m0 = value % 100; /* cents */
- m1 = (value / 100) % 1000; /* hundreds */
+ m1 = (value / 100) % 1000; /* hundreds */
m2 = (value / 100000) % 1000; /* thousands */
m3 = value / 100000000 % 1000; /* millions */
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 35031e81873..5e7d3c92f2e 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/date.c,v 1.55 2001/01/24 19:43:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/date.c,v 1.56 2001/03/22 03:59:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,7 +49,7 @@ date_in(PG_FUNCTION_ARGS)
char lowstr[MAXDATELEN + 1];
if ((ParseDateTime(str, lowstr, field, ftype, MAXDATEFIELDS, &nf) != 0)
- || (DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, &tzp) != 0))
+ || (DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, &tzp) != 0))
elog(ERROR, "Bad date external representation '%s'", str);
switch (dtype)
@@ -244,15 +244,15 @@ date_timestamp(PG_FUNCTION_ARGS)
if (utime == -1)
elog(ERROR, "Unable to convert date to tm");
- result = utime + ((date2j(1970,1,1)-date2j(2000,1,1))*86400.0);
+ result = utime + ((date2j(1970, 1, 1) - date2j(2000, 1, 1)) * 86400.0);
#else
- result = dateVal*86400.0+CTimeZone;
+ result = dateVal * 86400.0 + CTimeZone;
#endif
}
else
{
/* Outside of range for timezone support, so assume UTC */
- result = dateVal*86400.0;
+ result = dateVal * 86400.0;
}
PG_RETURN_TIMESTAMP(result);
@@ -277,13 +277,9 @@ timestamp_date(PG_FUNCTION_ARGS)
elog(ERROR, "Unable to convert timestamp to date");
if (TIMESTAMP_IS_EPOCH(timestamp))
- {
timestamp2tm(SetTimestamp(timestamp), NULL, tm, &fsec, NULL);
- }
else if (TIMESTAMP_IS_CURRENT(timestamp))
- {
timestamp2tm(SetTimestamp(timestamp), &tz, tm, &fsec, &tzn);
- }
else
{
if (timestamp2tm(timestamp, &tz, tm, &fsec, &tzn) != 0)
@@ -538,8 +534,10 @@ time_smaller(PG_FUNCTION_ARGS)
Datum
overlaps_time(PG_FUNCTION_ARGS)
{
- /* The arguments are TimeADT, but we leave them as generic Datums
- * to avoid dereferencing nulls (TimeADT is pass-by-reference!)
+
+ /*
+ * The arguments are TimeADT, but we leave them as generic Datums to
+ * avoid dereferencing nulls (TimeADT is pass-by-reference!)
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
@@ -556,9 +554,9 @@ overlaps_time(PG_FUNCTION_ARGS)
(DatumGetTimeADT(t1) < DatumGetTimeADT(t2))
/*
- * If both endpoints of interval 1 are null, the result is null (unknown).
- * If just one endpoint is null, take ts1 as the non-null one.
- * Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null
+ * (unknown). If just one endpoint is null, take ts1 as the non-null
+ * one. Otherwise, take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
@@ -572,7 +570,7 @@ overlaps_time(PG_FUNCTION_ARGS)
{
if (TIMEADT_GT(ts1, te1))
{
- Datum tt = ts1;
+ Datum tt = ts1;
ts1 = te1;
te1 = tt;
@@ -592,7 +590,7 @@ overlaps_time(PG_FUNCTION_ARGS)
{
if (TIMEADT_GT(ts2, te2))
{
- Datum tt = ts2;
+ Datum tt = ts2;
ts2 = te2;
te2 = tt;
@@ -605,7 +603,9 @@ overlaps_time(PG_FUNCTION_ARGS)
*/
if (TIMEADT_GT(ts1, ts2))
{
- /* This case is ts1 < te2 OR te1 < te2, which may look redundant
+
+ /*
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant
* but in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
@@ -614,7 +614,9 @@ overlaps_time(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(true);
if (te1IsNull)
PG_RETURN_NULL();
- /* If te1 is not null then we had ts1 <= te1 above, and we just
+
+ /*
+ * If te1 is not null then we had ts1 <= te1 above, and we just
* found ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
@@ -628,15 +630,20 @@ overlaps_time(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(true);
if (te2IsNull)
PG_RETURN_NULL();
- /* If te2 is not null then we had ts2 <= te2 above, and we just
+
+ /*
+ * If te2 is not null then we had ts2 <= te2 above, and we just
* found ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
else
{
- /* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else null".
+
+ /*
+ * For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
+ * rather silly way of saying "true if both are nonnull, else
+ * null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
@@ -690,7 +697,7 @@ datetime_timestamp(PG_FUNCTION_ARGS)
Timestamp result;
result = DatumGetTimestamp(DirectFunctionCall1(date_timestamp,
- DateADTGetDatum(date)));
+ DateADTGetDatum(date)));
result += time;
PG_RETURN_TIMESTAMP(result);
@@ -895,62 +902,62 @@ timetz_out(PG_FUNCTION_ARGS)
Datum
timetz_eq(PG_FUNCTION_ARGS)
{
- TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
- TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
+ TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
- PG_RETURN_BOOL(((time1->time+time1->zone) == (time2->time+time2->zone)));
+ PG_RETURN_BOOL(((time1->time + time1->zone) == (time2->time + time2->zone)));
}
Datum
timetz_ne(PG_FUNCTION_ARGS)
{
- TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
- TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
+ TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
- PG_RETURN_BOOL(((time1->time+time1->zone) != (time2->time+time2->zone)));
+ PG_RETURN_BOOL(((time1->time + time1->zone) != (time2->time + time2->zone)));
}
Datum
timetz_lt(PG_FUNCTION_ARGS)
{
- TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
- TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
+ TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
- PG_RETURN_BOOL(((time1->time+time1->zone) < (time2->time+time2->zone)));
+ PG_RETURN_BOOL(((time1->time + time1->zone) < (time2->time + time2->zone)));
}
Datum
timetz_le(PG_FUNCTION_ARGS)
{
- TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
- TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
+ TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
- PG_RETURN_BOOL(((time1->time+time1->zone) <= (time2->time+time2->zone)));
+ PG_RETURN_BOOL(((time1->time + time1->zone) <= (time2->time + time2->zone)));
}
Datum
timetz_gt(PG_FUNCTION_ARGS)
{
- TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
- TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
+ TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
- PG_RETURN_BOOL(((time1->time+time1->zone) > (time2->time+time2->zone)));
+ PG_RETURN_BOOL(((time1->time + time1->zone) > (time2->time + time2->zone)));
}
Datum
timetz_ge(PG_FUNCTION_ARGS)
{
- TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
- TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
+ TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
- PG_RETURN_BOOL(((time1->time+time1->zone) >= (time2->time+time2->zone)));
+ PG_RETURN_BOOL(((time1->time + time1->zone) >= (time2->time + time2->zone)));
}
Datum
timetz_cmp(PG_FUNCTION_ARGS)
{
- TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
- TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
+ TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
if (DatumGetBool(DirectFunctionCall2(timetz_lt,
TimeTzADTPGetDatum(time1),
@@ -969,7 +976,7 @@ timetz_cmp(PG_FUNCTION_ARGS)
Datum
timetz_hash(PG_FUNCTION_ARGS)
{
- TimeTzADT *key = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *key = PG_GETARG_TIMETZADT_P(0);
/*
* Specify hash length as sizeof(double) + sizeof(int4), not as
@@ -982,8 +989,8 @@ timetz_hash(PG_FUNCTION_ARGS)
Datum
timetz_larger(PG_FUNCTION_ARGS)
{
- TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
- TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
+ TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
if (DatumGetBool(DirectFunctionCall2(timetz_gt,
TimeTzADTPGetDatum(time1),
@@ -995,8 +1002,8 @@ timetz_larger(PG_FUNCTION_ARGS)
Datum
timetz_smaller(PG_FUNCTION_ARGS)
{
- TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
- TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
+ TimeTzADT *time1 = PG_GETARG_TIMETZADT_P(0);
+ TimeTzADT *time2 = PG_GETARG_TIMETZADT_P(1);
if (DatumGetBool(DirectFunctionCall2(timetz_lt,
TimeTzADTPGetDatum(time1),
@@ -1058,7 +1065,9 @@ timetz_mi_interval(PG_FUNCTION_ARGS)
Datum
overlaps_timetz(PG_FUNCTION_ARGS)
{
- /* The arguments are TimeTzADT *, but we leave them as generic Datums
+
+ /*
+ * The arguments are TimeTzADT *, but we leave them as generic Datums
* for convenience of notation --- and to avoid dereferencing nulls.
*/
Datum ts1 = PG_GETARG_DATUM(0);
@@ -1076,9 +1085,9 @@ overlaps_timetz(PG_FUNCTION_ARGS)
DatumGetBool(DirectFunctionCall2(timetz_lt,t1,t2))
/*
- * If both endpoints of interval 1 are null, the result is null (unknown).
- * If just one endpoint is null, take ts1 as the non-null one.
- * Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null
+ * (unknown). If just one endpoint is null, take ts1 as the non-null
+ * one. Otherwise, take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
@@ -1092,7 +1101,7 @@ overlaps_timetz(PG_FUNCTION_ARGS)
{
if (TIMETZ_GT(ts1, te1))
{
- Datum tt = ts1;
+ Datum tt = ts1;
ts1 = te1;
te1 = tt;
@@ -1112,7 +1121,7 @@ overlaps_timetz(PG_FUNCTION_ARGS)
{
if (TIMETZ_GT(ts2, te2))
{
- Datum tt = ts2;
+ Datum tt = ts2;
ts2 = te2;
te2 = tt;
@@ -1125,7 +1134,9 @@ overlaps_timetz(PG_FUNCTION_ARGS)
*/
if (TIMETZ_GT(ts1, ts2))
{
- /* This case is ts1 < te2 OR te1 < te2, which may look redundant
+
+ /*
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant
* but in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
@@ -1134,7 +1145,9 @@ overlaps_timetz(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(true);
if (te1IsNull)
PG_RETURN_NULL();
- /* If te1 is not null then we had ts1 <= te1 above, and we just
+
+ /*
+ * If te1 is not null then we had ts1 <= te1 above, and we just
* found ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
@@ -1148,15 +1161,20 @@ overlaps_timetz(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(true);
if (te2IsNull)
PG_RETURN_NULL();
- /* If te2 is not null then we had ts2 <= te2 above, and we just
+
+ /*
+ * If te2 is not null then we had ts2 <= te2 above, and we just
* found ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
else
{
- /* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else null".
+
+ /*
+ * For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
+ * rather silly way of saying "true if both are nonnull, else
+ * null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
@@ -1219,7 +1237,7 @@ datetimetz_timestamp(PG_FUNCTION_ARGS)
TimeTzADT *time = PG_GETARG_TIMETZADT_P(1);
Timestamp result;
- result = date*86400.0 + time->time + time->zone;
+ result = date * 86400.0 + time->time + time->zone;
PG_RETURN_TIMESTAMP(result);
}
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 8e9299643ff..8691fa49b91 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/datetime.c,v 1.61 2001/03/14 20:12:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/datetime.c,v 1.62 2001/03/22 03:59:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,16 +25,16 @@
#include "utils/datetime.h"
static int DecodeNumber(int flen, char *field,
- int fmask, int *tmask,
- struct tm * tm, double *fsec, int *is2digits);
+ int fmask, int *tmask,
+ struct tm * tm, double *fsec, int *is2digits);
static int DecodeNumberField(int len, char *str,
- int fmask, int *tmask,
- struct tm * tm, double *fsec, int *is2digits);
+ int fmask, int *tmask,
+ struct tm * tm, double *fsec, int *is2digits);
static int DecodeTime(char *str, int fmask, int *tmask,
- struct tm * tm, double *fsec);
+ struct tm * tm, double *fsec);
static int DecodeTimezone(char *str, int *tzp);
static datetkn *datebsearch(char *key, datetkn *base, unsigned int nel);
-static int DecodeDate(char *str, int fmask, int *tmask, struct tm * tm);
+static int DecodeDate(char *str, int fmask, int *tmask, struct tm * tm);
#define USE_DATE_CACHE 1
#define ROUND_ALL 0
@@ -271,10 +271,13 @@ static datetkn deltatktbl[] = {
{"m", UNITS, DTK_MINUTE}, /* "minute" relative time units */
{"microsecon", UNITS, DTK_MICROSEC}, /* "microsecond" relative
* time units */
- {"mil", UNITS, DTK_MILLENNIUM}, /* "millennium" relative time units */
- {"mils", UNITS, DTK_MILLENNIUM}, /* "millennia" relative time units */
- {"millennia", UNITS, DTK_MILLENNIUM}, /* "millennia" relative time units */
- {DMILLENNIUM, UNITS, DTK_MILLENNIUM}, /* "millennium" relative time units */
+ {"mil", UNITS, DTK_MILLENNIUM}, /* "millennium" relative time
+ * units */
+ {"mils", UNITS, DTK_MILLENNIUM}, /* "millennia" relative time units */
+ {"millennia", UNITS, DTK_MILLENNIUM}, /* "millennia" relative
+ * time units */
+ {DMILLENNIUM, UNITS, DTK_MILLENNIUM}, /* "millennium" relative
+ * time units */
{"millisecon", UNITS, DTK_MILLISEC}, /* relative time units */
{"min", UNITS, DTK_MINUTE}, /* "minute" relative time units */
{"mins", UNITS, DTK_MINUTE},/* "minutes" relative time units */
@@ -876,14 +879,14 @@ DecodeDateTime(char **field, int *ftype, int nf,
tm->tm_year += 1900;
tm->tm_mon += 1;
-# if defined(HAVE_TM_ZONE)
+#if defined(HAVE_TM_ZONE)
*tzp = -(tm->tm_gmtoff); /* tm_gmtoff is
* Sun/DEC-ism */
-# elif defined(HAVE_INT_TIMEZONE)
+#elif defined(HAVE_INT_TIMEZONE)
*tzp = ((tm->tm_isdst > 0) ? (TIMEZONE_GLOBAL - 3600) : TIMEZONE_GLOBAL);
-# endif /* HAVE_INT_TIMEZONE */
+#endif /* HAVE_INT_TIMEZONE */
-#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
+#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
*tzp = CTimeZone;
#endif
}
@@ -1121,13 +1124,13 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
mktime(tmp);
tm->tm_isdst = tmp->tm_isdst;
-# if defined(HAVE_TM_ZONE)
+#if defined(HAVE_TM_ZONE)
*tzp = -(tmp->tm_gmtoff); /* tm_gmtoff is Sun/DEC-ism */
-# elif defined(HAVE_INT_TIMEZONE)
+#elif defined(HAVE_INT_TIMEZONE)
*tzp = ((tmp->tm_isdst > 0) ? (TIMEZONE_GLOBAL - 3600) : TIMEZONE_GLOBAL);
-# endif
+#endif
-#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
+#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
*tzp = CTimeZone;
#endif
}
@@ -1492,7 +1495,7 @@ DecodeNumberField(int len, char *str, int fmask,
return -1;
return 0;
-} /* DecodeNumberField() */
+} /* DecodeNumberField() */
/* DecodeTimezone()
@@ -1674,20 +1677,26 @@ DecodeDateDelta(char **field, int *ftype, int nf, int *dtype, struct tm * tm, do
break;
case DTK_TZ:
+
/*
* Timezone is a token with a leading sign character and
* otherwise the same as a non-signed time field
*/
Assert((*field[i] == '-') || (*field[i] == '+'));
- /* A single signed number ends up here, but will be rejected by DecodeTime().
- * So, work this out to drop through to DTK_NUMBER, which *can* tolerate this.
+
+ /*
+ * A single signed number ends up here, but will be
+ * rejected by DecodeTime(). So, work this out to drop
+ * through to DTK_NUMBER, which *can* tolerate this.
*/
- cp = field[i]+1;
+ cp = field[i] + 1;
while ((*cp != '\0') && (*cp != ':') && (*cp != '.'))
cp++;
if ((*cp == ':')
- && (DecodeTime((field[i]+1), fmask, &tmask, tm, fsec) == 0)) {
- if (*field[i] == '-') {
+ && (DecodeTime((field[i] + 1), fmask, &tmask, tm, fsec) == 0))
+ {
+ if (*field[i] == '-')
+ {
/* flip the sign on all fields */
tm->tm_hour = -tm->tm_hour;
tm->tm_min = -tm->tm_min;
@@ -1695,18 +1704,33 @@ DecodeDateDelta(char **field, int *ftype, int nf, int *dtype, struct tm * tm, do
*fsec = -(*fsec);
}
- /* Set the next type to be a day, if units are not specified.
- * This handles the case of '1 +02:03' since we are reading right to left.
+ /*
+ * Set the next type to be a day, if units are not
+ * specified. This handles the case of '1 +02:03'
+ * since we are reading right to left.
*/
type = DTK_DAY;
tmask = DTK_M(TZ);
break;
- } else if (type == IGNORE) {
- if (*cp == '.') {
- /* Got a decimal point? Then assume some sort of seconds specification */
+ }
+ else if (type == IGNORE)
+ {
+ if (*cp == '.')
+ {
+
+ /*
+ * Got a decimal point? Then assume some sort of
+ * seconds specification
+ */
type = DTK_SECOND;
- } else if (*cp == '\0') {
- /* Only a signed integer? Then must assume a timezone-like usage */
+ }
+ else if (*cp == '\0')
+ {
+
+ /*
+ * Only a signed integer? Then must assume a
+ * timezone-like usage
+ */
type = DTK_HOUR;
}
}
@@ -1921,7 +1945,7 @@ DecodeUnits(int field, char *lowtoken, int *val)
* Binary search -- from Knuth (6.2.1) Algorithm B. Special case like this
* is WAY faster than the generic bsearch().
*/
-static datetkn *
+static datetkn *
datebsearch(char *key, datetkn *base, unsigned int nel)
{
datetkn *last = base + nel - 1,
@@ -2166,7 +2190,7 @@ EncodeDateTime(struct tm * tm, double fsec, int *tzp, char **tzn, int style, cha
if ((*tzn != NULL) && (tm->tm_isdst >= 0))
{
strcpy((str + 27), " ");
- StrNCpy((str + 28), *tzn, MAXTZLEN+1);
+ StrNCpy((str + 28), *tzn, MAXTZLEN + 1);
}
}
else
@@ -2175,7 +2199,7 @@ EncodeDateTime(struct tm * tm, double fsec, int *tzp, char **tzn, int style, cha
if ((*tzn != NULL) && (tm->tm_isdst >= 0))
{
strcpy((str + 24), " ");
- StrNCpy((str + 25), *tzn, MAXTZLEN+1);
+ StrNCpy((str + 25), *tzn, MAXTZLEN + 1);
}
}
@@ -2207,10 +2231,11 @@ EncodeTimeSpan(struct tm * tm, double fsec, int style, char *str)
int is_nonzero = FALSE;
char *cp = str;
- /* The sign of year and month are guaranteed to match,
- * since they are stored internally as "month".
- * But we'll need to check for is_before and is_nonzero
- * when determining the signs of hour/minute/seconds fields.
+ /*
+ * The sign of year and month are guaranteed to match, since they are
+ * stored internally as "month". But we'll need to check for is_before
+ * and is_nonzero when determining the signs of hour/minute/seconds
+ * fields.
*/
switch (style)
{
@@ -2247,8 +2272,8 @@ EncodeTimeSpan(struct tm * tm, double fsec, int style, char *str)
if ((!is_nonzero) || (tm->tm_hour != 0) || (tm->tm_min != 0)
|| (tm->tm_sec != 0) || (fsec != 0))
{
- int minus = ((tm->tm_hour < 0) || (tm->tm_min < 0)
- || (tm->tm_sec < 0) || (fsec < 0));
+ int minus = ((tm->tm_hour < 0) || (tm->tm_min < 0)
+ || (tm->tm_sec < 0) || (fsec < 0));
sprintf(cp, "%s%s%02d:%02d", (is_nonzero ? " " : ""),
(minus ? "-" : (is_before ? "+" : "")),
@@ -2283,7 +2308,8 @@ EncodeTimeSpan(struct tm * tm, double fsec, int style, char *str)
if (tm->tm_year != 0)
{
- int year = tm->tm_year;
+ int year = tm->tm_year;
+
if (tm->tm_year < 0)
year = -year;
@@ -2296,55 +2322,59 @@ EncodeTimeSpan(struct tm * tm, double fsec, int style, char *str)
if (tm->tm_mon != 0)
{
- int mon = tm->tm_mon;
+ int mon = tm->tm_mon;
+
if (is_before || ((!is_nonzero) && (tm->tm_mon < 0)))
mon = -mon;
sprintf(cp, "%s%d mon%s", (is_nonzero ? " " : ""), mon,
((mon != 1) ? "s" : ""));
cp += strlen(cp);
- if (! is_nonzero)
+ if (!is_nonzero)
is_before = (tm->tm_mon < 0);
is_nonzero = TRUE;
}
if (tm->tm_mday != 0)
{
- int day = tm->tm_mday;
+ int day = tm->tm_mday;
+
if (is_before || ((!is_nonzero) && (tm->tm_mday < 0)))
day = -day;
sprintf(cp, "%s%d day%s", (is_nonzero ? " " : ""), day,
((day != 1) ? "s" : ""));
cp += strlen(cp);
- if (! is_nonzero)
+ if (!is_nonzero)
is_before = (tm->tm_mday < 0);
is_nonzero = TRUE;
}
if (tm->tm_hour != 0)
{
- int hour = tm->tm_hour;
+ int hour = tm->tm_hour;
+
if (is_before || ((!is_nonzero) && (tm->tm_hour < 0)))
hour = -hour;
sprintf(cp, "%s%d hour%s", (is_nonzero ? " " : ""), hour,
((hour != 1) ? "s" : ""));
cp += strlen(cp);
- if (! is_nonzero)
+ if (!is_nonzero)
is_before = (tm->tm_hour < 0);
is_nonzero = TRUE;
}
if (tm->tm_min != 0)
{
- int min = tm->tm_min;
+ int min = tm->tm_min;
+
if (is_before || ((!is_nonzero) && (tm->tm_min < 0)))
min = -min;
sprintf(cp, "%s%d min%s", (is_nonzero ? " " : ""), min,
((min != 1) ? "s" : ""));
cp += strlen(cp);
- if (! is_nonzero)
+ if (!is_nonzero)
is_before = (tm->tm_min < 0);
is_nonzero = TRUE;
}
@@ -2352,7 +2382,8 @@ EncodeTimeSpan(struct tm * tm, double fsec, int style, char *str)
/* fractional seconds? */
if (fsec != 0)
{
- double sec;
+ double sec;
+
fsec += tm->tm_sec;
sec = fsec;
if (is_before || ((!is_nonzero) && (fsec < 0)))
@@ -2360,7 +2391,7 @@ EncodeTimeSpan(struct tm * tm, double fsec, int style, char *str)
sprintf(cp, "%s%.2f secs", (is_nonzero ? " " : ""), sec);
cp += strlen(cp);
- if (! is_nonzero)
+ if (!is_nonzero)
is_before = (fsec < 0);
is_nonzero = TRUE;
@@ -2368,14 +2399,15 @@ EncodeTimeSpan(struct tm * tm, double fsec, int style, char *str)
}
else if (tm->tm_sec != 0)
{
- int sec = tm->tm_sec;
+ int sec = tm->tm_sec;
+
if (is_before || ((!is_nonzero) && (tm->tm_sec < 0)))
sec = -sec;
sprintf(cp, "%s%d sec%s", (is_nonzero ? " " : ""), sec,
((sec != 1) ? "s" : ""));
cp += strlen(cp);
- if (! is_nonzero)
+ if (!is_nonzero)
is_before = (tm->tm_sec < 0);
is_nonzero = TRUE;
}
@@ -2383,7 +2415,7 @@ EncodeTimeSpan(struct tm * tm, double fsec, int style, char *str)
}
/* identically zero? then put in a unitless zero... */
- if (! is_nonzero)
+ if (!is_nonzero)
{
strcat(cp, "0");
cp += strlen(cp);
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 9a1bffb81e4..d0766d15d70 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/datum.c,v 1.19 2001/01/24 19:43:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/datum.c,v 1.20 2001/03/22 03:59:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -153,10 +153,11 @@ datumFree(Datum value, bool typByVal, int typLen)
bool
datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
{
- bool res;
+ bool res;
if (typByVal)
{
+
/*
* just compare the two datums. NOTE: just comparing "len" bytes
* will not do the work, because we do not know how these bytes
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index b02117d3a78..7a83ee6577e 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/float.c,v 1.69 2001/01/24 19:43:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/float.c,v 1.70 2001/03/22 03:59:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,7 +56,7 @@
#include <limits.h>
/* for finite() on Solaris */
#ifdef HAVE_IEEEFP_H
-# include <ieeefp.h>
+#include <ieeefp.h>
#endif
#include "fmgr.h"
@@ -69,6 +69,7 @@
#ifndef atof
extern double atof(const char *p);
+
#endif
#ifndef HAVE_CBRT
@@ -78,8 +79,9 @@ static double cbrt(double x);
#else
#if !defined(nextstep)
extern double cbrt(double x);
+
#endif
-#endif /* HAVE_CBRT */
+#endif /* HAVE_CBRT */
#ifndef HAVE_RINT
#define rint my_rint
@@ -87,9 +89,10 @@ static double rint(double x);
#else
extern double rint(double x);
-#endif /* HAVE_RINT */
-#endif /* NeXT check */
+#endif /* HAVE_RINT */
+
+#endif /* NeXT check */
static void CheckFloat4Val(double val);
@@ -1345,7 +1348,7 @@ setseed(PG_FUNCTION_ARGS)
* float8_accum - accumulate for AVG(), STDDEV(), etc
* float4_accum - same, but input data is float4
* float8_avg - produce final result for float AVG()
- * float8_variance - produce final result for float VARIANCE()
+ * float8_variance - produce final result for float VARIANCE()
* float8_stddev - produce final result for float STDDEV()
*
* The transition datatype for all these aggregates is a 3-element array
@@ -1360,10 +1363,11 @@ setseed(PG_FUNCTION_ARGS)
static float8 *
check_float8_array(ArrayType *transarray, const char *caller)
{
+
/*
- * We expect the input to be a 3-element float array; verify that.
- * We don't need to use deconstruct_array() since the array data
- * is just going to look like a C array of 3 float8 values.
+ * We expect the input to be a 3-element float array; verify that. We
+ * don't need to use deconstruct_array() since the array data is just
+ * going to look like a C array of 3 float8 values.
*/
if (ARR_SIZE(transarray) != (ARR_OVERHEAD(1) + 3 * sizeof(float8)) ||
ARR_NDIM(transarray) != 1 ||
@@ -1398,7 +1402,7 @@ float8_accum(PG_FUNCTION_ARGS)
transdatums[2] = Float8GetDatumFast(sumX2);
result = construct_array(transdatums, 3,
- false /* float8 byval */, sizeof(float8), 'd');
+ false /* float8 byval */ , sizeof(float8), 'd');
PG_RETURN_ARRAYTYPE_P(result);
}
@@ -1433,7 +1437,7 @@ float4_accum(PG_FUNCTION_ARGS)
transdatums[2] = Float8GetDatumFast(sumX2);
result = construct_array(transdatums, 3,
- false /* float8 byval */, sizeof(float8), 'd');
+ false /* float8 byval */ , sizeof(float8), 'd');
PG_RETURN_ARRAYTYPE_P(result);
}
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index 4cdaaee765f..b4990245500 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/format_type.c,v 1.9 2001/02/05 17:35:04 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/format_type.c,v 1.10 2001/03/22 03:59:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,10 +31,10 @@ static char *format_type_internal(Oid type_oid, int32 typemod);
static char *
-psnprintf(size_t len, const char * fmt, ...)
+psnprintf(size_t len, const char *fmt,...)
{
- va_list ap;
- char * buf;
+ va_list ap;
+ char *buf;
buf = palloc(len);
@@ -136,7 +136,9 @@ format_type_internal(Oid type_oid, int32 typemod)
break;
case CHAROID:
- /* This char type is the single-byte version. You have to
+
+ /*
+ * This char type is the single-byte version. You have to
* double-quote it to get at it in the parser.
*/
buf = pstrdup("\"char\"");
@@ -252,7 +254,7 @@ type_maximum_size(Oid type_oid, int32 typemod)
/* precision (ie, max # of digits) is in upper bits of typmod */
if (typemod > VARHDRSZ)
{
- int precision = ((typemod - VARHDRSZ) >> 16) & 0xffff;
+ int precision = ((typemod - VARHDRSZ) >> 16) & 0xffff;
/* Numeric stores 2 decimal digits/byte, plus header */
return (precision + 1) / 2 + NUMERIC_HDRSZ;
@@ -262,7 +264,7 @@ type_maximum_size(Oid type_oid, int32 typemod)
case VARBITOID:
case ZPBITOID:
/* typemod is the (max) number of bits */
- return (typemod + (BITS_PER_BYTE-1)) / BITS_PER_BYTE
+ return (typemod + (BITS_PER_BYTE - 1)) / BITS_PER_BYTE
+ 2 * sizeof(int32);
}
@@ -300,10 +302,10 @@ oidvectortypes(PG_FUNCTION_ARGS)
result = palloc(total);
result[0] = '\0';
left = total - 1;
-
+
for (num = 0; num < numargs; num++)
{
- char * typename = format_type_internal(oidArray[num], -1);
+ char *typename = format_type_internal(oidArray[num], -1);
if (left < strlen(typename) + 2)
{
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index bef39d2da53..bebe8240144 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/formatting.c,v 1.33 2001/02/27 08:13:28 ishii Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/formatting.c,v 1.34 2001/03/22 03:59:50 momjian Exp $
*
*
* Portions Copyright (c) 1999-2000, PostgreSQL Global Development Group
@@ -47,14 +47,14 @@
* TODO (7.2):
* - replace some global values by struct that handle it
* - check last used entry in the cache_search
- * - better number building (formatting)
+ * - better number building (formatting)
* - add support for abstime
* - add support for roman number to standard number conversion
* - add support for number spelling
* - add support for string to string formatting (we must be better
- * than Oracle :-),
- * to_char('Hello', 'X X X X X') -> 'H e l l o'
- *
+ * than Oracle :-),
+ * to_char('Hello', 'X X X X X') -> 'H e l l o'
+ *
* -----------------------------------------------------------------------
*/
@@ -116,8 +116,8 @@
* External (defined in PgSQL dt.c (timestamp utils))
* ----------
*/
-extern char *months[], /* month abbreviation */
- *days[]; /* full days */
+extern char *months[], /* month abbreviation */
+ *days[]; /* full days */
/* ----------
* Format parser structs
@@ -125,28 +125,28 @@ extern char *months[], /* month abbreviation */
*/
typedef struct
{
- char *name; /* suffix string */
- int len, /* suffix length */
- id, /* used in node->suffix */
- type; /* prefix / postfix */
+ char *name; /* suffix string */
+ int len, /* suffix length */
+ id, /* used in node->suffix */
+ type; /* prefix / postfix */
} KeySuffix;
typedef struct
{
- char *name; /* keyword */
- /* action for keyword */
- int len, /* keyword length */
- (*action) (),
- id; /* keyword id */
- bool isitdigit; /* is expected output/input digit */
+ char *name; /* keyword */
+ /* action for keyword */
+ int len, /* keyword length */
+ (*action) (),
+ id; /* keyword id */
+ bool isitdigit; /* is expected output/input digit */
} KeyWord;
typedef struct
{
- int type; /* node type */
- KeyWord *key; /* if node type is KEYWORD */
- int character, /* if node type is CHAR */
- suffix; /* keyword suffix */
+ int type; /* node type */
+ KeyWord *key; /* if node type is KEYWORD */
+ int character, /* if node type is CHAR */
+ suffix; /* keyword suffix */
} FormatNode;
#define NODE_TYPE_END 1
@@ -249,7 +249,7 @@ static char *numth[] = {"st", "nd", "rd", "th", NULL};
* Flags for DCH version
* ----------
*/
-static int DCH_global_flag = 0;
+static int DCH_global_flag = 0;
#define DCH_F_FX 0x01
@@ -262,15 +262,15 @@ static int DCH_global_flag = 0;
*/
typedef struct
{
- int pre, /* (count) numbers before decimal */
- post, /* (count) numbers after decimal */
- lsign, /* want locales sign */
- flag, /* number parametrs */
- pre_lsign_num, /* tmp value for lsign */
- multi, /* multiplier for 'V' */
- zero_start, /* position of first zero */
- zero_end, /* position of last zero */
- need_locale; /* needs it locale */
+ int pre, /* (count) numbers before decimal */
+ post, /* (count) numbers after decimal */
+ lsign, /* want locales sign */
+ flag, /* number parametrs */
+ pre_lsign_num, /* tmp value for lsign */
+ multi, /* multiplier for 'V' */
+ zero_start, /* position of first zero */
+ zero_end, /* position of last zero */
+ need_locale; /* needs it locale */
} NUMDesc;
/* ----------
@@ -280,14 +280,14 @@ typedef struct
#define NUM_F_DECIMAL 0x01
#define NUM_F_LDECIMAL 0x02
#define NUM_F_ZERO 0x04
-#define NUM_F_BLANK 0x08
+#define NUM_F_BLANK 0x08
#define NUM_F_FILLMODE 0x10
-#define NUM_F_LSIGN 0x20
+#define NUM_F_LSIGN 0x20
#define NUM_F_BRACKET 0x40
-#define NUM_F_MINUS 0x80
+#define NUM_F_MINUS 0x80
#define NUM_F_PLUS 0x100
-#define NUM_F_ROMAN 0x200
-#define NUM_F_MULTI 0x400
+#define NUM_F_ROMAN 0x200
+#define NUM_F_MULTI 0x400
#define NUM_LSIGN_PRE -1
#define NUM_LSIGN_POST 1
@@ -299,20 +299,20 @@ typedef struct
*/
#define IS_DECIMAL(_f) ((_f)->flag & NUM_F_DECIMAL)
#define IS_LDECIMAL(_f) ((_f)->flag & NUM_F_LDECIMAL)
-#define IS_ZERO(_f) ((_f)->flag & NUM_F_ZERO)
+#define IS_ZERO(_f) ((_f)->flag & NUM_F_ZERO)
#define IS_BLANK(_f) ((_f)->flag & NUM_F_BLANK)
#define IS_FILLMODE(_f) ((_f)->flag & NUM_F_FILLMODE)
#define IS_BRACKET(_f) ((_f)->flag & NUM_F_BRACKET)
#define IS_MINUS(_f) ((_f)->flag & NUM_F_MINUS)
#define IS_LSIGN(_f) ((_f)->flag & NUM_F_LSIGN)
-#define IS_PLUS(_f) ((_f)->flag & NUM_F_PLUS)
+#define IS_PLUS(_f) ((_f)->flag & NUM_F_PLUS)
#define IS_ROMAN(_f) ((_f)->flag & NUM_F_ROMAN)
#define IS_MULTI(_f) ((_f)->flag & NUM_F_MULTI)
/* ----------
* Format picture cache
* (cache size:
- * Number part = NUM_CACHE_SIZE * NUM_CACHE_FIELDS
+ * Number part = NUM_CACHE_SIZE * NUM_CACHE_FIELDS
* Date-time part = DCH_CACHE_SIZE * DCH_CACHE_FIELDS
* )
* ----------
@@ -326,25 +326,25 @@ typedef struct
{
FormatNode format[DCH_CACHE_SIZE + 1];
char str[DCH_CACHE_SIZE + 1];
- int age;
+ int age;
} DCHCacheEntry;
typedef struct
{
FormatNode format[NUM_CACHE_SIZE + 1];
char str[NUM_CACHE_SIZE + 1];
- int age;
+ int age;
NUMDesc Num;
} NUMCacheEntry;
/* global cache for --- date/time part */
-static DCHCacheEntry DCHCache[DCH_CACHE_FIELDS + 1];
+static DCHCacheEntry DCHCache[DCH_CACHE_FIELDS + 1];
static int n_DCHCache = 0; /* number of entries */
static int DCHCounter = 0;
/* global cache for --- number part */
-static NUMCacheEntry NUMCache[NUM_CACHE_FIELDS + 1];
+static NUMCacheEntry NUMCache[NUM_CACHE_FIELDS + 1];
static NUMCacheEntry *last_NUMCacheEntry;
static int n_NUMCache = 0; /* number of entries */
@@ -356,18 +356,38 @@ static int NUMCounter = 0;
* For char->date/time conversion
* ----------
*/
-typedef struct {
- int hh, am, pm, mi, ss, ssss, d, dd, ddd, mm, yyyy, yyy, yy, y,
- bc, iw, ww, w, cc, q, j;
+typedef struct
+{
+ int hh,
+ am,
+ pm,
+ mi,
+ ss,
+ ssss,
+ d,
+ dd,
+ ddd,
+ mm,
+ yyyy,
+ yyy,
+ yy,
+ y,
+ bc,
+ iw,
+ ww,
+ w,
+ cc,
+ q,
+ j;
} TmFromChar;
-#define ZERO_tmfc( _X ) \
+#define ZERO_tmfc( _X ) \
do { \
(_X)->hh= (_X)->am= (_X)->pm= (_X)->mi= (_X)->ss= (_X)->ssss= \
(_X)->d= (_X)->dd= (_X)->ddd= (_X)->mm= (_X)->yyyy= (_X)->yyy= \
(_X)->yy= (_X)->y= (_X)->bc= (_X)->iw= (_X)->ww= (_X)->w= \
(_X)->cc= (_X)->q= (_X)->j= 0; \
- } while(0)
+ } while(0)
#ifdef DEBUG_TO_FROM_CHAR
@@ -396,8 +416,10 @@ typedef struct {
* Private global-modul definitions
* ----------
*/
-static struct tm _tm, *tm = &_tm;
-static TmFromChar _tmfc, *tmfc = &_tmfc;
+static struct tm _tm,
+ *tm = &_tm;
+static TmFromChar _tmfc,
+ *tmfc = &_tmfc;
static char *tzn;
/* ----------
@@ -620,7 +642,7 @@ static KeyWord DCH_keywords[] = {
{"AM", 2, dch_time, DCH_AM, FALSE},
{"B.C.", 4, dch_date, DCH_B_C, FALSE}, /* B */
{"BC", 2, dch_date, DCH_BC, FALSE},
- {"CC", 2, dch_date, DCH_CC, TRUE}, /* C */
+ {"CC", 2, dch_date, DCH_CC, TRUE}, /* C */
{"DAY", 3, dch_date, DCH_DAY, FALSE}, /* D */
{"DDD", 3, dch_date, DCH_DDD, TRUE},
{"DD", 2, dch_date, DCH_DD, TRUE},
@@ -632,8 +654,8 @@ static KeyWord DCH_keywords[] = {
{"HH24", 4, dch_time, DCH_HH24, TRUE}, /* H */
{"HH12", 4, dch_time, DCH_HH12, TRUE},
{"HH", 2, dch_time, DCH_HH, TRUE},
- {"IW", 2, dch_date, DCH_IW, TRUE}, /* I */
- {"J", 1, dch_date, DCH_J, TRUE}, /* J */
+ {"IW", 2, dch_date, DCH_IW, TRUE}, /* I */
+ {"J", 1, dch_date, DCH_J, TRUE}, /* J */
{"MI", 2, dch_time, DCH_MI, TRUE},
{"MM", 2, dch_date, DCH_MM, TRUE},
{"MONTH", 5, dch_date, DCH_MONTH, FALSE},
@@ -642,12 +664,12 @@ static KeyWord DCH_keywords[] = {
{"Mon", 3, dch_date, DCH_Mon, FALSE},
{"P.M.", 4, dch_time, DCH_P_M, FALSE}, /* P */
{"PM", 2, dch_time, DCH_PM, FALSE},
- {"Q", 1, dch_date, DCH_Q, TRUE}, /* Q */
- {"RM", 2, dch_date, DCH_RM, FALSE}, /* R */
- {"SSSS", 4, dch_time, DCH_SSSS, TRUE}, /* S */
+ {"Q", 1, dch_date, DCH_Q, TRUE}, /* Q */
+ {"RM", 2, dch_date, DCH_RM, FALSE}, /* R */
+ {"SSSS", 4, dch_time, DCH_SSSS, TRUE}, /* S */
{"SS", 2, dch_time, DCH_SS, TRUE},
- {"TZ", 2, dch_time, DCH_TZ, FALSE}, /* T */
- {"WW", 2, dch_date, DCH_WW, TRUE}, /* W */
+ {"TZ", 2, dch_time, DCH_TZ, FALSE}, /* T */
+ {"WW", 2, dch_date, DCH_WW, TRUE}, /* W */
{"W", 1, dch_date, DCH_W, TRUE},
{"Y,YYY", 5, dch_date, DCH_Y_YYY, TRUE}, /* Y */
{"YYYY", 4, dch_date, DCH_YYYY, TRUE},
@@ -660,7 +682,7 @@ static KeyWord DCH_keywords[] = {
{"am", 2, dch_time, DCH_am, FALSE},
{"b.c.", 4, dch_date, DCH_b_c, FALSE}, /* b */
{"bc", 2, dch_date, DCH_bc, FALSE},
- {"cc", 2, dch_date, DCH_CC, TRUE}, /* c */
+ {"cc", 2, dch_date, DCH_CC, TRUE}, /* c */
{"day", 3, dch_date, DCH_day, FALSE}, /* d */
{"ddd", 3, dch_date, DCH_DDD, TRUE},
{"dd", 2, dch_date, DCH_DD, TRUE},
@@ -670,20 +692,20 @@ static KeyWord DCH_keywords[] = {
{"hh24", 4, dch_time, DCH_HH24, TRUE}, /* h */
{"hh12", 4, dch_time, DCH_HH12, TRUE},
{"hh", 2, dch_time, DCH_HH, TRUE},
- {"iw", 2, dch_date, DCH_IW, TRUE}, /* i */
- {"j", 1, dch_time, DCH_J, TRUE}, /* j */
- {"mi", 2, dch_time, DCH_MI, TRUE}, /* m */
+ {"iw", 2, dch_date, DCH_IW, TRUE}, /* i */
+ {"j", 1, dch_time, DCH_J, TRUE}, /* j */
+ {"mi", 2, dch_time, DCH_MI, TRUE}, /* m */
{"mm", 2, dch_date, DCH_MM, TRUE},
{"month", 5, dch_date, DCH_month, FALSE},
{"mon", 3, dch_date, DCH_mon, FALSE},
{"p.m.", 4, dch_time, DCH_p_m, FALSE}, /* p */
{"pm", 2, dch_time, DCH_pm, FALSE},
- {"q", 1, dch_date, DCH_Q, TRUE}, /* q */
- {"rm", 2, dch_date, DCH_rm, FALSE}, /* r */
- {"ssss", 4, dch_time, DCH_SSSS, TRUE}, /* s */
+ {"q", 1, dch_date, DCH_Q, TRUE}, /* q */
+ {"rm", 2, dch_date, DCH_rm, FALSE}, /* r */
+ {"ssss", 4, dch_time, DCH_SSSS, TRUE}, /* s */
{"ss", 2, dch_time, DCH_SS, TRUE},
- {"tz", 2, dch_time, DCH_tz, FALSE}, /* t */
- {"ww", 2, dch_date, DCH_WW, TRUE}, /* w */
+ {"tz", 2, dch_time, DCH_tz, FALSE}, /* t */
+ {"ww", 2, dch_date, DCH_WW, TRUE}, /* w */
{"w", 1, dch_date, DCH_W, TRUE},
{"y,yyy", 5, dch_date, DCH_Y_YYY, TRUE}, /* y */
{"yyyy", 4, dch_date, DCH_YYYY, TRUE},
@@ -794,32 +816,33 @@ static int NUM_index[KeyWord_INDEX_SIZE] = {
*/
typedef struct NUMProc
{
- int type; /* FROM_CHAR (TO_NUMBER) or TO_CHAR */
-
- NUMDesc *Num; /* number description */
-
- int sign, /* '-' or '+' */
- sign_wrote, /* was sign write */
- sign_pos, /* pre number sign position */
- num_count, /* number of write digits */
- num_in, /* is inside number */
- num_curr, /* current position in number */
- num_pre, /* space before first number */
-
- read_dec, /* to_number - was read dec. point */
- read_post; /* to_number - number of dec. digit */
-
- char *number, /* string with number */
- *number_p, /* pointer to current number pozition */
- *inout, /* in / out buffer */
- *inout_p, /* pointer to current inout pozition */
- *last_relevant, /* last relevant number after decimal point */
-
- *L_negative_sign,/* Locale */
- *L_positive_sign,
- *decimal,
- *L_thousands_sep,
- *L_currency_symbol;
+ int type; /* FROM_CHAR (TO_NUMBER) or TO_CHAR */
+
+ NUMDesc *Num; /* number description */
+
+ int sign, /* '-' or '+' */
+ sign_wrote, /* was sign write */
+ sign_pos, /* pre number sign position */
+ num_count, /* number of write digits */
+ num_in, /* is inside number */
+ num_curr, /* current position in number */
+ num_pre, /* space before first number */
+
+ read_dec, /* to_number - was read dec. point */
+ read_post; /* to_number - number of dec. digit */
+
+ char *number, /* string with number */
+ *number_p, /* pointer to current number pozition */
+ *inout, /* in / out buffer */
+ *inout_p, /* pointer to current inout pozition */
+ *last_relevant, /* last relevant number after decimal
+ * point */
+
+ *L_negative_sign,/* Locale */
+ *L_positive_sign,
+ *decimal,
+ *L_thousands_sep,
+ *L_currency_symbol;
} NUMProc;
@@ -1093,11 +1116,11 @@ static void
parse_format(FormatNode *node, char *str, KeyWord *kw,
KeySuffix *suf, int *index, int ver, NUMDesc *Num)
{
- KeySuffix *s;
- FormatNode *n;
- int node_set = 0,
- suffix,
- last = 0;
+ KeySuffix *s;
+ FormatNode *n;
+ int node_set = 0,
+ suffix,
+ last = 0;
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "to_char/number(): run parser.");
@@ -1245,7 +1268,7 @@ DCH_processor(FormatNode *node, char *inout, int flag)
{
if (n->type == NODE_TYPE_ACTION)
{
- int len;
+ int len;
/* ----------
* Call node action function
@@ -1274,7 +1297,7 @@ DCH_processor(FormatNode *node, char *inout, int flag)
* Skip blank space in FROM_CHAR's input
* ----------
*/
- if (isspace((unsigned char) n->character) && IS_FX == 0)
+ if (isspace((unsigned char) n->character) && IS_FX == 0)
{
while (*s != '\0' && isspace((unsigned char) *(s + 1)))
++s;
@@ -1341,18 +1364,19 @@ dump_node(FormatNode *node, int max)
static char *
get_th(char *num, int type)
{
- int len = strlen(num),
- last, seclast;
+ int len = strlen(num),
+ last,
+ seclast;
last = *(num + (len - 1));
if (!isdigit((unsigned char) last))
elog(ERROR, "get_th: '%s' is not number.", num);
/*
- * All "teens" (<x>1[0-9]) get 'TH/th',
- * while <x>[02-9][123] still get 'ST/st', 'ND/nd', 'RD/rd', respectively
+ * All "teens" (<x>1[0-9]) get 'TH/th', while <x>[02-9][123] still get
+ * 'ST/st', 'ND/nd', 'RD/rd', respectively
*/
- if ((len > 1) && ((seclast = num[len-2]) == '1'))
+ if ((len > 1) && ((seclast = num[len - 2]) == '1'))
last = 0;
switch (last)
@@ -1442,11 +1466,11 @@ str_tolower(char *buff)
static int
seq_search(char *name, char **array, int type, int max, int *len)
{
- char *p,
- *n,
- **a;
- int last,
- i;
+ char *p,
+ *n,
+ **a;
+ int last,
+ i;
*len = 0;
@@ -1522,9 +1546,9 @@ seq_search(char *name, char **array, int type, int max, int *len)
static void
dump_index(KeyWord *k, int *index)
{
- int i,
- count = 0,
- free_i = 0;
+ int i,
+ count = 0,
+ free_i = 0;
elog(DEBUG_elog_output, "TO-FROM_CHAR: Dump KeyWord Index:");
@@ -1563,9 +1587,9 @@ dch_global(int arg, char *inout, int suf, int flag, FormatNode *node)
{
switch (arg)
{
- case DCH_FX:
- DCH_global_flag |= DCH_F_FX;
- break;
+ case DCH_FX:
+ DCH_global_flag |= DCH_F_FX;
+ break;
}
return -1;
}
@@ -1579,29 +1603,29 @@ is_next_separator(FormatNode *n)
{
if (n->type == NODE_TYPE_END)
return FALSE;
-
+
if (n->type == NODE_TYPE_ACTION && S_THth(n->suffix))
return TRUE;
-
- /*
- * Next node
+
+ /*
+ * Next node
*/
- n++;
-
+ n++;
+
if (n->type == NODE_TYPE_END)
return FALSE;
-
+
if (n->type == NODE_TYPE_ACTION)
{
if (n->key->isitdigit)
return FALSE;
-
- return TRUE;
- }
+
+ return TRUE;
+ }
else if (isdigit((unsigned char) n->character))
return FALSE;
-
- return TRUE; /* some non-digit input (separator) */
+
+ return TRUE; /* some non-digit input (separator) */
}
#define AMPM_ERROR elog(ERROR, "to_timestamp(): bad AM/PM string")
@@ -1619,12 +1643,12 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
switch (arg)
{
- case DCH_A_M:
+ case DCH_A_M:
case DCH_P_M:
if (flag == TO_CHAR)
{
- strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < 24) ? P_M_STR : A_M_STR));
+ strcpy(inout, ((tm->tm_hour > 11
+ && tm->tm_hour < 24) ? P_M_STR : A_M_STR));
return 3;
}
else if (flag == FROM_CHAR)
@@ -1632,9 +1656,9 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
if (strncmp(inout, P_M_STR, 4) == 0)
tmfc->pm = TRUE;
else if (strncmp(inout, A_M_STR, 4) == 0)
- tmfc->am = TRUE;
+ tmfc->am = TRUE;
else
- AMPM_ERROR;
+ AMPM_ERROR;
return 3;
}
break;
@@ -1642,8 +1666,8 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
case DCH_PM:
if (flag == TO_CHAR)
{
- strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < 24) ? PM_STR : AM_STR));
+ strcpy(inout, ((tm->tm_hour > 11
+ && tm->tm_hour < 24) ? PM_STR : AM_STR));
return 1;
}
else if (flag == FROM_CHAR)
@@ -1653,7 +1677,7 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
else if (strncmp(inout, AM_STR, 2) == 0)
tmfc->am = TRUE;
else
- AMPM_ERROR;
+ AMPM_ERROR;
return 1;
}
break;
@@ -1661,8 +1685,8 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
case DCH_p_m:
if (flag == TO_CHAR)
{
- strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < 24) ? p_m_STR : a_m_STR));
+ strcpy(inout, ((tm->tm_hour > 11
+ && tm->tm_hour < 24) ? p_m_STR : a_m_STR));
return 3;
}
else if (flag == FROM_CHAR)
@@ -1672,7 +1696,7 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
else if (strncmp(inout, a_m_STR, 4) == 0)
tmfc->am = TRUE;
else
- AMPM_ERROR;
+ AMPM_ERROR;
return 3;
}
break;
@@ -1681,7 +1705,7 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
if (flag == TO_CHAR)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < 24) ? pm_STR : am_STR));
+ && tm->tm_hour < 24) ? pm_STR : am_STR));
return 1;
}
else if (flag == FROM_CHAR)
@@ -1691,7 +1715,7 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
else if (strncmp(inout, am_STR, 2) == 0)
tmfc->am = TRUE;
else
- AMPM_ERROR;
+ AMPM_ERROR;
return 1;
}
break;
@@ -1812,12 +1836,12 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
str_numth(p_inout, inout, S_TH_TYPE(suf));
return strlen(p_inout) - 1;
}
- else if (flag == FROM_CHAR)
+ else if (flag == FROM_CHAR)
{
if (is_next_separator(node))
sscanf(inout, "%d", &tmfc->ssss);
else
- sscanf(inout, "%05d", &tmfc->ssss);
+ sscanf(inout, "%05d", &tmfc->ssss);
return int4len((int4) tmfc->ssss) - 1 + SKIP_THth(suf);
}
break;
@@ -1825,24 +1849,22 @@ dch_time(int arg, char *inout, int suf, int flag, FormatNode *node)
case DCH_TZ:
if (flag == TO_CHAR && tzn)
{
- int siz = strlen(tzn);
-
+ int siz = strlen(tzn);
+
if (arg == DCH_TZ)
strcpy(inout, tzn);
- else
+ else
{
- char *p = palloc(siz);
-
+ char *p = palloc(siz);
+
strcpy(p, tzn);
strcpy(inout, str_tolower(p));
pfree(p);
}
return siz - 1;
- }
- else if (flag == FROM_CHAR)
- {
+ }
+ else if (flag == FROM_CHAR)
elog(ERROR, "to_timestamp(): TZ/tz not supported.");
- }
}
return -1;
}
@@ -1864,10 +1886,10 @@ do { \
static int
dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
{
- char buff[DCH_CACHE_SIZE],
- *p_inout;
- int i,
- len;
+ char buff[DCH_CACHE_SIZE],
+ *p_inout;
+ int i,
+ len;
p_inout = inout;
@@ -1881,7 +1903,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
{
if (arg == DCH_MONTH || arg == DCH_Month || arg == DCH_month)
{
- tmfc->mm = seq_search(inout, months_full, ONE_UPPER, FULL_SIZ, &len) +1;
+ tmfc->mm = seq_search(inout, months_full, ONE_UPPER, FULL_SIZ, &len) + 1;
CHECK_SEQ_SEARCH(len, "MONTH/Month/month");
if (S_FM(suf))
return len - 1;
@@ -1891,7 +1913,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
}
else if (arg == DCH_MON || arg == DCH_Mon || arg == DCH_mon)
{
- tmfc->mm = seq_search(inout, months, ONE_UPPER, MAX_MON_LEN, &len) +1;
+ tmfc->mm = seq_search(inout, months, ONE_UPPER, MAX_MON_LEN, &len) + 1;
CHECK_SEQ_SEARCH(len, "MON/Mon/mon");
return 2;
}
@@ -1983,14 +2005,14 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
return strlen(p_inout) - 1;
else
return 8;
-
+
case DCH_Month:
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, months_full[tm->tm_mon - 1]);
if (S_FM(suf))
return strlen(p_inout) - 1;
else
return 8;
-
+
case DCH_month:
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, months_full[tm->tm_mon - 1]);
*inout = tolower((unsigned char) *inout);
@@ -1998,12 +2020,12 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
return strlen(p_inout) - 1;
else
return 8;
-
+
case DCH_MON:
strcpy(inout, months[tm->tm_mon - 1]);
inout = str_toupper(inout);
return 2;
-
+
case DCH_Mon:
strcpy(inout, months[tm->tm_mon - 1]);
return 2;
@@ -2149,7 +2171,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
if (flag == TO_CHAR)
{
sprintf(inout, "%0*d", S_FM(suf) ? 0 : 2,
- (tm->tm_yday-1) / 7 + 1);
+ (tm->tm_yday - 1) / 7 + 1);
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
if (S_FM(suf) || S_THth(suf))
@@ -2158,7 +2180,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
return 1;
}
- else if (flag == FROM_CHAR)
+ else if (flag == FROM_CHAR)
{
if (S_FM(suf))
{
@@ -2176,7 +2198,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
if (flag == TO_CHAR)
{
sprintf(inout, "%0*d", S_FM(suf) ? 0 : 2,
- date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday));
+ date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday));
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
if (S_FM(suf) || S_THth(suf))
@@ -2185,7 +2207,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
return 1;
}
- else if (flag == FROM_CHAR)
+ else if (flag == FROM_CHAR)
{
if (S_FM(suf))
{
@@ -2198,7 +2220,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
return 1 + SKIP_THth(suf);
}
}
- break;
+ break;
case DCH_Q:
if (flag == TO_CHAR)
{
@@ -2233,7 +2255,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
else if (flag == FROM_CHAR)
{
sscanf(inout, "%d", &tmfc->cc);
- return int4len((int4) tmfc->cc) + SKIP_THth(suf) -1;
+ return int4len((int4) tmfc->cc) + SKIP_THth(suf) - 1;
}
break;
case DCH_Y_YYY:
@@ -2277,7 +2299,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
sscanf(inout, "%d", &tmfc->yyyy);
else
sscanf(inout, "%04d", &tmfc->yyyy);
-
+
if (!S_FM(suf) && tmfc->yyyy <= 9999 && tmfc->yyyy >= -9999)
len = 4;
else
@@ -2391,7 +2413,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
case DCH_W:
if (flag == TO_CHAR)
{
- sprintf(inout, "%d", (tm->tm_mday-1) / 7 + 1);
+ sprintf(inout, "%d", (tm->tm_mday - 1) / 7 + 1);
if (S_THth(suf))
{
str_numth(p_inout, inout, S_TH_TYPE(suf));
@@ -2416,7 +2438,7 @@ dch_date(int arg, char *inout, int suf, int flag, FormatNode *node)
else if (flag == FROM_CHAR)
{
sscanf(inout, "%d", &tmfc->j);
- return int4len((int4) tmfc->j) + SKIP_THth(suf) -1;
+ return int4len((int4) tmfc->j) + SKIP_THth(suf) - 1;
}
break;
}
@@ -2522,24 +2544,24 @@ Datum
timestamp_to_char(PG_FUNCTION_ARGS)
{
Timestamp dt = PG_GETARG_TIMESTAMP(0);
- text *fmt = PG_GETARG_TEXT_P(1);
- text *result,
- *result_tmp;
-
- FormatNode *format;
- char *str;
+ text *fmt = PG_GETARG_TEXT_P(1);
+ text *result,
+ *result_tmp;
+
+ FormatNode *format;
+ char *str;
double fsec;
- int len = 0,
- tz,
- flag = 0,
- x = 0;
+ int len = 0,
+ tz,
+ flag = 0,
+ x = 0;
len = VARSIZE(fmt) - VARHDRSZ;
if (len <= 0 || TIMESTAMP_NOT_FINITE(dt))
PG_RETURN_NULL();
- ZERO_tm(tm);
+ ZERO_tm(tm);
tzn = NULL;
if (TIMESTAMP_IS_EPOCH(dt))
@@ -2613,7 +2635,7 @@ timestamp_to_char(PG_FUNCTION_ARGS)
* ----------
*/
parse_format(ent->format, str, DCH_keywords,
- DCH_suff, DCH_index, DCH_TYPE, NULL);
+ DCH_suff, DCH_index, DCH_TYPE, NULL);
(ent->format + len)->type = NODE_TYPE_END; /* Paranoa? */
@@ -2642,7 +2664,7 @@ timestamp_to_char(PG_FUNCTION_ARGS)
pfree(result);
PG_RETURN_NULL();
}
-
+
result_tmp = result;
result = (text *) palloc(len + 1 + VARHDRSZ);
@@ -2664,13 +2686,13 @@ timestamp_to_char(PG_FUNCTION_ARGS)
Datum
to_timestamp(PG_FUNCTION_ARGS)
{
- text *date_txt = PG_GETARG_TEXT_P(0);
- text *fmt = PG_GETARG_TEXT_P(1);
- FormatNode *format;
- int flag = 0;
+ text *date_txt = PG_GETARG_TEXT_P(0);
+ text *fmt = PG_GETARG_TEXT_P(1);
+ FormatNode *format;
+ int flag = 0;
Timestamp result;
- char *str;
- char *date_str;
+ char *str;
+ char *date_str;
int len,
date_len,
fsec = 0,
@@ -2766,16 +2788,16 @@ to_timestamp(PG_FUNCTION_ARGS)
}
/* --------------------------------------------------------------
- * Convert values that user define for FROM_CHAR (to_date/to_timestamp)
+ * Convert values that user define for FROM_CHAR (to_date/to_timestamp)
* to standard 'tm'
* ----------
- */
+ */
#ifdef DEBUG_TO_FROM_CHAR
NOTICE_TMFC;
-#endif
- if (tmfc->ssss)
+#endif
+ if (tmfc->ssss)
{
- int x = tmfc->ssss;
+ int x = tmfc->ssss;
tm->tm_hour = x / 3600;
x %= 3600;
@@ -2785,128 +2807,148 @@ to_timestamp(PG_FUNCTION_ARGS)
}
if (tmfc->cc)
- tm->tm_year = (tmfc->cc-1) * 100;
+ tm->tm_year = (tmfc->cc - 1) * 100;
- if (tmfc->ww)
+ if (tmfc->ww)
tmfc->ddd = (tmfc->ww - 1) * 7 + 1;
- if (tmfc->w)
+ if (tmfc->w)
tmfc->dd = (tmfc->w - 1) * 7 + 1;
- if (tmfc->ss) tm->tm_sec = tmfc->ss;
- if (tmfc->mi) tm->tm_min = tmfc->mi;
- if (tmfc->hh) tm->tm_hour = tmfc->hh;
-
- if (tmfc->pm || tmfc->am)
- {
- if (tm->tm_hour < 1 || tm->tm_hour > 12)
- elog(ERROR, "to_timestamp(): AM/PM hour must be between 1 and 12");
-
+ if (tmfc->ss)
+ tm->tm_sec = tmfc->ss;
+ if (tmfc->mi)
+ tm->tm_min = tmfc->mi;
+ if (tmfc->hh)
+ tm->tm_hour = tmfc->hh;
+
+ if (tmfc->pm || tmfc->am)
+ {
+ if (tm->tm_hour < 1 || tm->tm_hour > 12)
+ elog(ERROR, "to_timestamp(): AM/PM hour must be between 1 and 12");
+
if (tmfc->pm && tm->tm_hour < 12)
tm->tm_hour += 12;
-
+
else if (tmfc->am && tm->tm_hour == 12)
- tm->tm_hour = 0;
- }
+ tm->tm_hour = 0;
+ }
- switch (tmfc->q)
+ switch (tmfc->q)
{
- case 1: tm->tm_mday = 1; tm->tm_mon = 1; break;
- case 2: tm->tm_mday = 1; tm->tm_mon = 4; break;
- case 3: tm->tm_mday = 1; tm->tm_mon = 7; break;
- case 4: tm->tm_mday = 1; tm->tm_mon = 10; break;
+ case 1:
+ tm->tm_mday = 1;
+ tm->tm_mon = 1;
+ break;
+ case 2:
+ tm->tm_mday = 1;
+ tm->tm_mon = 4;
+ break;
+ case 3:
+ tm->tm_mday = 1;
+ tm->tm_mon = 7;
+ break;
+ case 4:
+ tm->tm_mday = 1;
+ tm->tm_mon = 10;
+ break;
}
-
- if (tmfc->yyyy)
+
+ if (tmfc->yyyy)
tm->tm_year = tmfc->yyyy;
else if (tmfc->y)
{
+
/*
- * 1-digit year:
- * always +2000
+ * 1-digit year: always +2000
*/
- tm->tm_year = tmfc->y + 2000;
- }
+ tm->tm_year = tmfc->y + 2000;
+ }
else if (tmfc->yy)
{
+
/*
- * 2-digit year:
- * '00' ... '69' = 2000 ... 2069
- * '70' ... '99' = 1970 ... 1999
- */
+ * 2-digit year: '00' ... '69' = 2000 ... 2069 '70' ... '99' =
+ * 1970 ... 1999
+ */
tm->tm_year = tmfc->yy;
-
+
if (tm->tm_year < 70)
tm->tm_year += 2000;
- else
+ else
tm->tm_year += 1900;
}
else if (tmfc->yyy)
{
+
/*
- * 3-digit year:
- * '100' ... '999' = 1100 ... 1999
- * '000' ... '099' = 2000 ... 2099
+ * 3-digit year: '100' ... '999' = 1100 ... 1999 '000' ... '099' =
+ * 2000 ... 2099
*/
tm->tm_year = tmfc->yyy;
-
+
if (tm->tm_year >= 100)
tm->tm_year += 1000;
- else
+ else
tm->tm_year += 2000;
}
-
+
if (tmfc->bc)
{
if (tm->tm_year > 0)
tm->tm_year = -(tm->tm_year - 1);
else
elog(ERROR, "Inconsistant use of year %04d and 'BC'", tm->tm_year);
- }
-
+ }
+
if (tmfc->j)
j2date(tmfc->j, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
-
+
if (tmfc->iw)
isoweek2date(tmfc->iw, &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
-
- if (tmfc->d) tm->tm_wday = tmfc->d;
- if (tmfc->dd) tm->tm_mday = tmfc->dd;
- if (tmfc->ddd) tm->tm_yday = tmfc->ddd;
- if (tmfc->mm) tm->tm_mon = tmfc->mm;
+
+ if (tmfc->d)
+ tm->tm_wday = tmfc->d;
+ if (tmfc->dd)
+ tm->tm_mday = tmfc->dd;
+ if (tmfc->ddd)
+ tm->tm_yday = tmfc->ddd;
+ if (tmfc->mm)
+ tm->tm_mon = tmfc->mm;
/*
* we not ignore DDD
*/
- if (tmfc->ddd && (tm->tm_mon <=1 || tm->tm_mday <=1))
+ if (tmfc->ddd && (tm->tm_mon <= 1 || tm->tm_mday <= 1))
{
/* count mday and mon from yday */
- int *y, i;
-
- int ysum[2][13] = {
- { 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365, 0 },
- { 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366, 0 }};
-
- if (!tm->tm_year)
- elog(ERROR, "to_timestamp() cat't convert yday without year information");
-
- y = ysum[ isleap(tm->tm_year) ];
-
- for (i=0; i <= 11; i++)
- {
- if (tm->tm_yday < y[i])
+ int *y,
+ i;
+
+ int ysum[2][13] = {
+ {31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365, 0},
+ {31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366, 0}};
+
+ if (!tm->tm_year)
+ elog(ERROR, "to_timestamp() cat't convert yday without year information");
+
+ y = ysum[isleap(tm->tm_year)];
+
+ for (i = 0; i <= 11; i++)
+ {
+ if (tm->tm_yday < y[i])
break;
- }
- if (tm->tm_mon <=1)
- tm->tm_mon = i+1;
-
- if (tm->tm_mday <=1)
- tm->tm_mday = i == 0 ? tm->tm_yday :
- tm->tm_yday - y[i-1];
+ }
+ if (tm->tm_mon <= 1)
+ tm->tm_mon = i + 1;
+
+ if (tm->tm_mday <= 1)
+ tm->tm_mday = i == 0 ? tm->tm_yday :
+ tm->tm_yday - y[i - 1];
}
-
+
/* -------------------------------------------------------------- */
#ifdef DEBUG_TO_FROM_CHAR
@@ -2920,21 +2962,21 @@ to_timestamp(PG_FUNCTION_ARGS)
tm->tm_year -= 1900;
tm->tm_mon -= 1;
-# ifdef DEBUG_TO_FROM_CHAR
+#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "TO-FROM_CHAR: Call mktime()");
NOTICE_TM;
-# endif
+#endif
mktime(tm);
tm->tm_year += 1900;
tm->tm_mon += 1;
-# if defined(HAVE_TM_ZONE)
+#if defined(HAVE_TM_ZONE)
tz = -(tm->tm_gmtoff); /* tm_gmtoff is Sun/DEC-ism */
-# elif defined(HAVE_INT_TIMEZONE)
+#elif defined(HAVE_INT_TIMEZONE)
tz = ((tm->tm_isdst > 0) ? (TIMEZONE_GLOBAL - 3600) : TIMEZONE_GLOBAL);
-# endif
+#endif
-#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
+#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
tz = CTimeZone;
#endif
}
@@ -2960,8 +3002,10 @@ to_timestamp(PG_FUNCTION_ARGS)
Datum
to_date(PG_FUNCTION_ARGS)
{
- /* Quick hack: since our inputs are just like to_timestamp,
- * hand over the whole input info struct...
+
+ /*
+ * Quick hack: since our inputs are just like to_timestamp, hand over
+ * the whole input info struct...
*/
return DirectFunctionCall1(timestamp_date, to_timestamp(fcinfo));
}
@@ -2985,7 +3029,7 @@ do { \
(_n)->lsign = 0; \
(_n)->pre = 0; \
(_n)->post = 0; \
- (_n)->pre_lsign_num = 0; \
+ (_n)->pre_lsign_num = 0; \
(_n)->need_locale = 0; \
(_n)->multi = 0; \
(_n)->zero_start = 0; \
@@ -3021,10 +3065,11 @@ NUM_cache_getnew(char *str)
for (ent = NUMCache; ent <= (NUMCache + NUM_CACHE_FIELDS); ent++)
{
- /* entry removed via NUM_cache_remove()
- * can be used here
+
+ /*
+ * entry removed via NUM_cache_remove() can be used here
*/
- if (*ent->str == '\0')
+ if (*ent->str == '\0')
{
old = ent;
break;
@@ -3063,7 +3108,7 @@ NUM_cache_getnew(char *str)
static NUMCacheEntry *
NUM_cache_search(char *str)
{
- int i = 0;
+ int i = 0;
NUMCacheEntry *ent;
/* counter overload check - paranoa? */
@@ -3196,12 +3241,12 @@ NUM_cache(int len, NUMDesc *Num, char *pars_str, int *flag)
static char *
int_to_roman(int number)
{
- int len = 0,
- num = 0,
- set = 0;
- char *p = NULL,
- *result,
- numstr[5];
+ int len = 0,
+ num = 0,
+ set = 0;
+ char *p = NULL,
+ *result,
+ numstr[5];
result = (char *) palloc(16);
*result = '\0';
@@ -3336,7 +3381,7 @@ static char *
get_last_relevant_decnum(char *num)
{
char *result,
- *p = strchr(num, '.');
+ *p = strchr(num, '.');
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "CALL: get_last_relevant_decnum()");
@@ -3483,7 +3528,7 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
}
else
{
- int x = strlen(Np->decimal);
+ int x = strlen(Np->decimal);
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read locale point (%c).", *Np->inout_p);
@@ -3716,8 +3761,8 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
int plen, int sign, int type)
{
FormatNode *n;
- NUMProc _Np,
- *Np = &_Np;
+ NUMProc _Np,
+ *Np = &_Np;
Np->Num = Num;
Np->type = type;
@@ -3802,9 +3847,9 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
{
if (IS_DECIMAL(Np->Num))
Np->last_relevant = get_last_relevant_decnum(
- Np->number +
- ((Np->Num->zero_end - Np->num_pre > 0) ?
- Np->Num->zero_end - Np->num_pre : 0));
+ Np->number +
+ ((Np->Num->zero_end - Np->num_pre > 0) ?
+ Np->Num->zero_end - Np->num_pre : 0));
}
if (!Np->sign_wrote && Np->num_pre == 0)
@@ -4168,7 +4213,7 @@ do { \
if (flag) \
pfree(format); \
\
- /* ---------- \
+ /* ---------- \
* for result is allocated max memory, which current format-picture\
* needs, now it must be re-allocate to result real size \
* ---------- \
@@ -4194,16 +4239,16 @@ do { \
Datum
numeric_to_number(PG_FUNCTION_ARGS)
{
- text *value = PG_GETARG_TEXT_P(0);
- text *fmt = PG_GETARG_TEXT_P(1);
- NUMDesc Num;
- Datum result;
+ text *value = PG_GETARG_TEXT_P(0);
+ text *fmt = PG_GETARG_TEXT_P(1);
+ NUMDesc Num;
+ Datum result;
FormatNode *format;
- char *numstr;
- int flag = 0;
- int len = 0;
- int scale,
- precision;
+ char *numstr;
+ int flag = 0;
+ int len = 0;
+ int scale,
+ precision;
len = VARSIZE(fmt) - VARHDRSZ;
@@ -4224,9 +4269,9 @@ numeric_to_number(PG_FUNCTION_ARGS)
pfree(format);
result = DirectFunctionCall3(numeric_in,
- CStringGetDatum(numstr),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(((precision << 16) | scale) + VARHDRSZ));
+ CStringGetDatum(numstr),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(((precision << 16) | scale) + VARHDRSZ));
pfree(numstr);
return result;
}
@@ -4238,20 +4283,20 @@ numeric_to_number(PG_FUNCTION_ARGS)
Datum
numeric_to_char(PG_FUNCTION_ARGS)
{
- Numeric value = PG_GETARG_NUMERIC(0);
- text *fmt = PG_GETARG_TEXT_P(1);
- NUMDesc Num;
+ Numeric value = PG_GETARG_NUMERIC(0);
+ text *fmt = PG_GETARG_TEXT_P(1);
+ NUMDesc Num;
FormatNode *format;
- text *result,
- *result_tmp;
- int flag = 0;
- int len = 0,
- plen = 0,
- sign = 0;
- char *numstr,
- *orgnum,
- *p;
- Numeric x;
+ text *result,
+ *result_tmp;
+ int flag = 0;
+ int len = 0,
+ plen = 0,
+ sign = 0;
+ char *numstr,
+ *orgnum,
+ *p;
+ Numeric x;
NUM_TOCHAR_prepare;
@@ -4262,11 +4307,11 @@ numeric_to_char(PG_FUNCTION_ARGS)
if (IS_ROMAN(&Num))
{
x = DatumGetNumeric(DirectFunctionCall2(numeric_round,
- NumericGetDatum(value),
- Int32GetDatum(0)));
+ NumericGetDatum(value),
+ Int32GetDatum(0)));
numstr = orgnum =
int_to_roman(DatumGetInt32(DirectFunctionCall1(numeric_int4,
- NumericGetDatum(x))));
+ NumericGetDatum(x))));
pfree(x);
}
else
@@ -4276,16 +4321,16 @@ numeric_to_char(PG_FUNCTION_ARGS)
if (IS_MULTI(&Num))
{
Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(10)));
+ Int32GetDatum(10)));
Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(Num.multi)));
+ Int32GetDatum(Num.multi)));
x = DatumGetNumeric(DirectFunctionCall2(numeric_power,
NumericGetDatum(a),
NumericGetDatum(b)));
val = DatumGetNumeric(DirectFunctionCall2(numeric_mul,
- NumericGetDatum(value),
- NumericGetDatum(x)));
+ NumericGetDatum(value),
+ NumericGetDatum(x)));
pfree(x);
pfree(a);
pfree(b);
@@ -4293,10 +4338,10 @@ numeric_to_char(PG_FUNCTION_ARGS)
}
x = DatumGetNumeric(DirectFunctionCall2(numeric_round,
- NumericGetDatum(val),
- Int32GetDatum(Num.post)));
+ NumericGetDatum(val),
+ Int32GetDatum(Num.post)));
orgnum = DatumGetCString(DirectFunctionCall1(numeric_out,
- NumericGetDatum(x)));
+ NumericGetDatum(x)));
pfree(x);
if (*orgnum == '-')
@@ -4339,18 +4384,18 @@ numeric_to_char(PG_FUNCTION_ARGS)
Datum
int4_to_char(PG_FUNCTION_ARGS)
{
- int32 value = PG_GETARG_INT32(0);
- text *fmt = PG_GETARG_TEXT_P(1);
- NUMDesc Num;
+ int32 value = PG_GETARG_INT32(0);
+ text *fmt = PG_GETARG_TEXT_P(1);
+ NUMDesc Num;
FormatNode *format;
- text *result,
- *result_tmp;
- int flag = 0;
- int len = 0,
- plen = 0,
- sign = 0;
- char *numstr,
- *orgnum;
+ text *result,
+ *result_tmp;
+ int flag = 0;
+ int len = 0,
+ plen = 0,
+ sign = 0;
+ char *numstr,
+ *orgnum;
NUM_TOCHAR_prepare;
@@ -4359,21 +4404,19 @@ int4_to_char(PG_FUNCTION_ARGS)
* ----------
*/
if (IS_ROMAN(&Num))
- {
numstr = orgnum = int_to_roman(value);
- }
else
{
if (IS_MULTI(&Num))
{
orgnum = DatumGetCString(DirectFunctionCall1(int4out,
- Int32GetDatum(value * ((int32) pow((double) 10, (double) Num.multi)))));
+ Int32GetDatum(value * ((int32) pow((double) 10, (double) Num.multi)))));
Num.pre += Num.multi;
}
else
{
orgnum = DatumGetCString(DirectFunctionCall1(int4out,
- Int32GetDatum(value)));
+ Int32GetDatum(value)));
}
len = strlen(orgnum);
@@ -4387,7 +4430,7 @@ int4_to_char(PG_FUNCTION_ARGS)
if (Num.post)
{
- int i;
+ int i;
numstr = (char *) palloc(len + Num.post + 2);
strcpy(numstr, orgnum + (*orgnum == '-' ? 1 : 0));
@@ -4423,18 +4466,18 @@ int4_to_char(PG_FUNCTION_ARGS)
Datum
int8_to_char(PG_FUNCTION_ARGS)
{
- int64 value = PG_GETARG_INT64(0);
- text *fmt = PG_GETARG_TEXT_P(1);
- NUMDesc Num;
+ int64 value = PG_GETARG_INT64(0);
+ text *fmt = PG_GETARG_TEXT_P(1);
+ NUMDesc Num;
FormatNode *format;
- text *result,
- *result_tmp;
- int flag = 0;
- int len = 0,
- plen = 0,
- sign = 0;
- char *numstr,
- *orgnum;
+ text *result,
+ *result_tmp;
+ int flag = 0;
+ int len = 0,
+ plen = 0,
+ sign = 0;
+ char *numstr,
+ *orgnum;
NUM_TOCHAR_prepare;
@@ -4446,7 +4489,7 @@ int8_to_char(PG_FUNCTION_ARGS)
{
/* Currently don't support int8 conversion to roman... */
numstr = orgnum = int_to_roman(DatumGetInt32(
- DirectFunctionCall1(int84, Int64GetDatum(value))));
+ DirectFunctionCall1(int84, Int64GetDatum(value))));
}
else
{
@@ -4455,14 +4498,14 @@ int8_to_char(PG_FUNCTION_ARGS)
double multi = pow((double) 10, (double) Num.multi);
value = DatumGetInt64(DirectFunctionCall2(int8mul,
- Int64GetDatum(value),
- DirectFunctionCall1(dtoi8,
- Float8GetDatum(multi))));
+ Int64GetDatum(value),
+ DirectFunctionCall1(dtoi8,
+ Float8GetDatum(multi))));
Num.pre += Num.multi;
}
orgnum = DatumGetCString(DirectFunctionCall1(int8out,
- Int64GetDatum(value)));
+ Int64GetDatum(value)));
len = strlen(orgnum);
if (*orgnum == '-')
@@ -4475,7 +4518,7 @@ int8_to_char(PG_FUNCTION_ARGS)
if (Num.post)
{
- int i;
+ int i;
numstr = (char *) palloc(len + Num.post + 2);
strcpy(numstr, orgnum + (*orgnum == '-' ? 1 : 0));
@@ -4511,19 +4554,19 @@ int8_to_char(PG_FUNCTION_ARGS)
Datum
float4_to_char(PG_FUNCTION_ARGS)
{
- float4 value = PG_GETARG_FLOAT4(0);
- text *fmt = PG_GETARG_TEXT_P(1);
- NUMDesc Num;
+ float4 value = PG_GETARG_FLOAT4(0);
+ text *fmt = PG_GETARG_TEXT_P(1);
+ NUMDesc Num;
FormatNode *format;
- text *result,
- *result_tmp;
- int flag = 0;
- int len = 0,
- plen = 0,
- sign = 0;
- char *numstr,
- *orgnum,
- *p;
+ text *result,
+ *result_tmp;
+ int flag = 0;
+ int len = 0,
+ plen = 0,
+ sign = 0;
+ char *numstr,
+ *orgnum,
+ *p;
NUM_TOCHAR_prepare;
@@ -4538,7 +4581,7 @@ float4_to_char(PG_FUNCTION_ARGS)
if (IS_MULTI(&Num))
{
- float multi = pow((double) 10, (double) Num.multi);
+ float multi = pow((double) 10, (double) Num.multi);
val = value * multi;
Num.pre += Num.multi;
@@ -4591,19 +4634,19 @@ float4_to_char(PG_FUNCTION_ARGS)
Datum
float8_to_char(PG_FUNCTION_ARGS)
{
- float8 value = PG_GETARG_FLOAT8(0);
- text *fmt = PG_GETARG_TEXT_P(1);
- NUMDesc Num;
+ float8 value = PG_GETARG_FLOAT8(0);
+ text *fmt = PG_GETARG_TEXT_P(1);
+ NUMDesc Num;
FormatNode *format;
- text *result,
- *result_tmp;
- int flag = 0;
- int len = 0,
- plen = 0,
- sign = 0;
- char *numstr,
- *orgnum,
- *p;
+ text *result,
+ *result_tmp;
+ int flag = 0;
+ int len = 0,
+ plen = 0,
+ sign = 0;
+ char *numstr,
+ *orgnum,
+ *p;
NUM_TOCHAR_prepare;
@@ -4618,7 +4661,7 @@ float8_to_char(PG_FUNCTION_ARGS)
if (IS_MULTI(&Num))
{
- double multi = pow((double) 10, (double) Num.multi);
+ double multi = pow((double) 10, (double) Num.multi);
val = value * multi;
Num.pre += Num.multi;
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index ef3e28fc61d..aac191b3778 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/geo_ops.c,v 1.57 2001/01/24 19:43:13 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/geo_ops.c,v 1.58 2001/03/22 03:59:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -484,11 +484,11 @@ box_ov(BOX *box1, BOX *box2)
FPle(box1->low.x, box2->high.x)) ||
(FPge(box2->high.x, box1->high.x) &&
FPle(box2->low.x, box1->high.x)))
- &&
- ((FPge(box1->high.y, box2->high.y) &&
- FPle(box1->low.y, box2->high.y)) ||
- (FPge(box2->high.y, box1->high.y) &&
- FPle(box2->low.y, box1->high.y)));
+ &&
+ ((FPge(box1->high.y, box2->high.y) &&
+ FPle(box1->low.y, box2->high.y)) ||
+ (FPge(box2->high.y, box1->high.y) &&
+ FPle(box2->low.y, box1->high.y)));
}
/* box_overleft - is the right edge of box1 to the left of
@@ -811,8 +811,10 @@ line_in(PG_FUNCTION_ARGS)
{
#ifdef ENABLE_LINE_TYPE
char *str = PG_GETARG_CSTRING(0);
+
#endif
LINE *line;
+
#ifdef ENABLE_LINE_TYPE
LSEG lseg;
int isopen;
@@ -838,8 +840,10 @@ line_out(PG_FUNCTION_ARGS)
{
#ifdef ENABLE_LINE_TYPE
LINE *line = PG_GETARG_LINE_P(0);
+
#endif
char *result;
+
#ifdef ENABLE_LINE_TYPE
LSEG lseg;
@@ -996,9 +1000,9 @@ line_intersect(PG_FUNCTION_ARGS)
LINE *l1 = PG_GETARG_LINE_P(0);
LINE *l2 = PG_GETARG_LINE_P(1);
- PG_RETURN_BOOL(! DatumGetBool(DirectFunctionCall2(line_parallel,
- LinePGetDatum(l1),
- LinePGetDatum(l2))));
+ PG_RETURN_BOOL(!DatumGetBool(DirectFunctionCall2(line_parallel,
+ LinePGetDatum(l1),
+ LinePGetDatum(l2))));
}
Datum
@@ -1089,9 +1093,9 @@ line_distance(PG_FUNCTION_ARGS)
float8 result;
Point *tmp;
- if (! DatumGetBool(DirectFunctionCall2(line_parallel,
- LinePGetDatum(l1),
- LinePGetDatum(l2))))
+ if (!DatumGetBool(DirectFunctionCall2(line_parallel,
+ LinePGetDatum(l1),
+ LinePGetDatum(l2))))
PG_RETURN_FLOAT8(0.0);
if (FPzero(l1->B)) /* vertical? */
PG_RETURN_FLOAT8(fabs(l1->C - l2->C));
@@ -1131,9 +1135,10 @@ line_interpt_internal(LINE *l1, LINE *l2)
y;
/*
- * NOTE: if the lines are identical then we will find they are parallel
- * and report "no intersection". This is a little weird, but since
- * there's no *unique* intersection, maybe it's appropriate behavior.
+ * NOTE: if the lines are identical then we will find they are
+ * parallel and report "no intersection". This is a little weird, but
+ * since there's no *unique* intersection, maybe it's appropriate
+ * behavior.
*/
if (DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
@@ -1226,7 +1231,7 @@ path_in(PG_FUNCTION_ARGS)
depth++;
}
- size = offsetof(PATH, p[0]) + sizeof(path->p[0]) * npts;
+ size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * npts;
path = (PATH *) palloc(size);
path->size = size;
@@ -1321,7 +1326,7 @@ path_isopen(PG_FUNCTION_ARGS)
{
PATH *path = PG_GETARG_PATH_P(0);
- PG_RETURN_BOOL(! path->closed);
+ PG_RETURN_BOOL(!path->closed);
}
Datum
@@ -1434,7 +1439,7 @@ path_distance(PG_FUNCTION_ARGS)
tmp = DatumGetFloat8(DirectFunctionCall2(lseg_distance,
LsegPGetDatum(&seg1),
- LsegPGetDatum(&seg2)));
+ LsegPGetDatum(&seg2)));
if (!have_min || tmp < min)
{
min = tmp;
@@ -1443,7 +1448,7 @@ path_distance(PG_FUNCTION_ARGS)
}
}
- if (! have_min)
+ if (!have_min)
PG_RETURN_NULL();
PG_RETURN_FLOAT8(min);
@@ -1992,9 +1997,10 @@ lseg_interpt(PG_FUNCTION_ARGS)
result = line_interpt_internal(&tmp1, &tmp2);
if (!PointerIsValid(result))
PG_RETURN_NULL();
+
/*
- * If the line intersection point isn't within l1 (or equivalently l2),
- * there is no valid segment intersection point at all.
+ * If the line intersection point isn't within l1 (or equivalently
+ * l2), there is no valid segment intersection point at all.
*/
if (!on_ps_internal(result, l1) ||
!on_ps_internal(result, l2))
@@ -2002,10 +2008,11 @@ lseg_interpt(PG_FUNCTION_ARGS)
pfree(result);
PG_RETURN_NULL();
}
+
/*
* If there is an intersection, then check explicitly for matching
- * endpoints since there may be rounding effects with annoying
- * lsb residue. - tgl 1997-07-09
+ * endpoints since there may be rounding effects with annoying lsb
+ * residue. - tgl 1997-07-09
*/
if ((FPeq(l1->p[0].x, l2->p[0].x) && FPeq(l1->p[0].y, l2->p[0].y)) ||
(FPeq(l1->p[0].x, l2->p[1].x) && FPeq(l1->p[0].y, l2->p[1].y)))
@@ -2014,7 +2021,7 @@ lseg_interpt(PG_FUNCTION_ARGS)
result->y = l1->p[0].y;
}
else if ((FPeq(l1->p[1].x, l2->p[0].x) && FPeq(l1->p[1].y, l2->p[0].y)) ||
- (FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
+ (FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
{
result->x = l1->p[1].x;
result->y = l1->p[1].y;
@@ -2048,7 +2055,7 @@ static double
dist_pl_internal(Point *pt, LINE *line)
{
return (line->A * pt->x + line->B * pt->y + line->C) /
- HYPOT(line->A, line->B);
+ HYPOT(line->A, line->B);
}
Datum
@@ -2080,9 +2087,7 @@ dist_ps_internal(Point *pt, LSEG *lseg)
m = (double) DBL_MAX;
}
else
- {
m = ((lseg->p[0].y - lseg->p[1].y) / (lseg->p[1].x - lseg->p[0].x));
- }
ln = line_construct_pm(pt, m);
#ifdef GEODEBUG
@@ -2188,9 +2193,7 @@ dist_sl(PG_FUNCTION_ARGS)
d2;
if (has_interpt_sl(lseg, line))
- {
result = 0.0;
- }
else
{
result = dist_pl_internal(&lseg->p[0], line);
@@ -2230,6 +2233,7 @@ dist_lb(PG_FUNCTION_ARGS)
#ifdef NOT_USED
LINE *line = PG_GETARG_LINE_P(0);
BOX *box = PG_GETARG_BOX_P(1);
+
#endif
/* think about this one for a while */
@@ -2243,7 +2247,7 @@ Datum
dist_cpoly(PG_FUNCTION_ARGS)
{
CIRCLE *circle = PG_GETARG_CIRCLE_P(0);
- POLYGON *poly = PG_GETARG_POLYGON_P(1);
+ POLYGON *poly = PG_GETARG_POLYGON_P(1);
float8 result;
float8 d;
int i;
@@ -2430,7 +2434,7 @@ close_ps(PG_FUNCTION_ARGS)
xh = lseg->p[0].x < lseg->p[1].x;
yh = lseg->p[0].y < lseg->p[1].y;
- if (FPeq(lseg->p[0].x, lseg->p[1].x)) /* vertical? */
+ if (FPeq(lseg->p[0].x, lseg->p[1].x)) /* vertical? */
{
#ifdef GEODEBUG
printf("close_ps- segment is vertical\n");
@@ -2450,7 +2454,7 @@ close_ps(PG_FUNCTION_ARGS)
result->y = pt->y;
PG_RETURN_POINT_P(result);
}
- else if (FPeq(lseg->p[0].y, lseg->p[1].y)) /* horizontal? */
+ else if (FPeq(lseg->p[0].y, lseg->p[1].y)) /* horizontal? */
{
#ifdef GEODEBUG
printf("close_ps- segment is horizontal\n");
@@ -2484,7 +2488,7 @@ close_ps(PG_FUNCTION_ARGS)
* lower end pt */
#ifdef GEODEBUG
printf("close_ps below: tmp A %f B %f C %f m %f\n",
- tmp->A,tmp->B,tmp->C, tmp->m);
+ tmp->A, tmp->B, tmp->C, tmp->m);
#endif
PG_RETURN_POINT_P(result);
}
@@ -2496,7 +2500,7 @@ close_ps(PG_FUNCTION_ARGS)
* higher end pt */
#ifdef GEODEBUG
printf("close_ps above: tmp A %f B %f C %f m %f\n",
- tmp->A,tmp->B,tmp->C, tmp->m);
+ tmp->A, tmp->B, tmp->C, tmp->m);
#endif
PG_RETURN_POINT_P(result);
}
@@ -2508,7 +2512,7 @@ close_ps(PG_FUNCTION_ARGS)
tmp = line_construct_pm(pt, invm);
#ifdef GEODEBUG
printf("close_ps- tmp A %f B %f C %f m %f\n",
- tmp->A,tmp->B,tmp->C, tmp->m);
+ tmp->A, tmp->B, tmp->C, tmp->m);
#endif
result = interpt_sl(lseg, tmp);
Assert(result != NULL);
@@ -2545,12 +2549,12 @@ close_lseg(PG_FUNCTION_ARGS)
if ((d = dist_ps_internal(&l2->p[0], l1)) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&l2->p[0]),
+ PointPGetDatum(&l2->p[0]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
pfree(result);
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&point),
+ PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
@@ -2560,12 +2564,12 @@ close_lseg(PG_FUNCTION_ARGS)
pfree(result);
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&l2->p[1]),
+ PointPGetDatum(&l2->p[1]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
pfree(result);
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&point),
+ PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
@@ -2752,6 +2756,7 @@ close_lb(PG_FUNCTION_ARGS)
#ifdef NOT_USED
LINE *line = PG_GETARG_LINE_P(0);
BOX *box = PG_GETARG_BOX_P(1);
+
#endif
/* think about this one for a while */
@@ -2858,11 +2863,11 @@ on_sl(PG_FUNCTION_ARGS)
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pl,
- PointPGetDatum(&lseg->p[0]),
- LinePGetDatum(line))) &&
+ PointPGetDatum(&lseg->p[0]),
+ LinePGetDatum(line))) &&
DatumGetBool(DirectFunctionCall2(on_pl,
- PointPGetDatum(&lseg->p[1]),
- LinePGetDatum(line))));
+ PointPGetDatum(&lseg->p[1]),
+ LinePGetDatum(line))));
}
Datum
@@ -2872,11 +2877,11 @@ on_sb(PG_FUNCTION_ARGS)
BOX *box = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pb,
- PointPGetDatum(&lseg->p[0]),
- BoxPGetDatum(box))) &&
+ PointPGetDatum(&lseg->p[0]),
+ BoxPGetDatum(box))) &&
DatumGetBool(DirectFunctionCall2(on_pb,
- PointPGetDatum(&lseg->p[1]),
- BoxPGetDatum(box))));
+ PointPGetDatum(&lseg->p[1]),
+ BoxPGetDatum(box))));
}
/*---------------------------------------------------------------------
@@ -3058,7 +3063,7 @@ poly_in(PG_FUNCTION_ARGS)
if ((npts = pair_count(str, ',')) <= 0)
elog(ERROR, "Bad polygon external representation '%s'", str);
- size = offsetof(POLYGON, p[0]) + sizeof(poly->p[0]) * npts;
+ size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc(size);
MemSet((char *) poly, 0, size); /* zero any holes */
@@ -3081,7 +3086,7 @@ poly_in(PG_FUNCTION_ARGS)
Datum
poly_out(PG_FUNCTION_ARGS)
{
- POLYGON *poly = PG_GETARG_POLYGON_P(0);
+ POLYGON *poly = PG_GETARG_POLYGON_P(0);
PG_RETURN_CSTRING(path_encode(TRUE, poly->npts, poly->p));
}
@@ -3095,13 +3100,16 @@ poly_out(PG_FUNCTION_ARGS)
Datum
poly_left(PG_FUNCTION_ARGS)
{
- POLYGON *polya = PG_GETARG_POLYGON_P(0);
- POLYGON *polyb = PG_GETARG_POLYGON_P(1);
+ POLYGON *polya = PG_GETARG_POLYGON_P(0);
+ POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.x < polyb->boundbox.low.x;
- /* Avoid leaking memory for toasted inputs ... needed for rtree indexes */
+ /*
+ * Avoid leaking memory for toasted inputs ... needed for rtree
+ * indexes
+ */
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3116,13 +3124,16 @@ poly_left(PG_FUNCTION_ARGS)
Datum
poly_overleft(PG_FUNCTION_ARGS)
{
- POLYGON *polya = PG_GETARG_POLYGON_P(0);
- POLYGON *polyb = PG_GETARG_POLYGON_P(1);
+ POLYGON *polya = PG_GETARG_POLYGON_P(0);
+ POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.x <= polyb->boundbox.high.x;
- /* Avoid leaking memory for toasted inputs ... needed for rtree indexes */
+ /*
+ * Avoid leaking memory for toasted inputs ... needed for rtree
+ * indexes
+ */
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3137,13 +3148,16 @@ poly_overleft(PG_FUNCTION_ARGS)
Datum
poly_right(PG_FUNCTION_ARGS)
{
- POLYGON *polya = PG_GETARG_POLYGON_P(0);
- POLYGON *polyb = PG_GETARG_POLYGON_P(1);
+ POLYGON *polya = PG_GETARG_POLYGON_P(0);
+ POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.low.x > polyb->boundbox.high.x;
- /* Avoid leaking memory for toasted inputs ... needed for rtree indexes */
+ /*
+ * Avoid leaking memory for toasted inputs ... needed for rtree
+ * indexes
+ */
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3158,13 +3172,16 @@ poly_right(PG_FUNCTION_ARGS)
Datum
poly_overright(PG_FUNCTION_ARGS)
{
- POLYGON *polya = PG_GETARG_POLYGON_P(0);
- POLYGON *polyb = PG_GETARG_POLYGON_P(1);
+ POLYGON *polya = PG_GETARG_POLYGON_P(0);
+ POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = polya->boundbox.high.x > polyb->boundbox.low.x;
- /* Avoid leaking memory for toasted inputs ... needed for rtree indexes */
+ /*
+ * Avoid leaking memory for toasted inputs ... needed for rtree
+ * indexes
+ */
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3181,8 +3198,8 @@ poly_overright(PG_FUNCTION_ARGS)
Datum
poly_same(PG_FUNCTION_ARGS)
{
- POLYGON *polya = PG_GETARG_POLYGON_P(0);
- POLYGON *polyb = PG_GETARG_POLYGON_P(1);
+ POLYGON *polya = PG_GETARG_POLYGON_P(0);
+ POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
if (polya->npts != polyb->npts)
@@ -3190,7 +3207,10 @@ poly_same(PG_FUNCTION_ARGS)
else
result = plist_same(polya->npts, polya->p, polyb->p);
- /* Avoid leaking memory for toasted inputs ... needed for rtree indexes */
+ /*
+ * Avoid leaking memory for toasted inputs ... needed for rtree
+ * indexes
+ */
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3206,13 +3226,16 @@ poly_same(PG_FUNCTION_ARGS)
Datum
poly_overlap(PG_FUNCTION_ARGS)
{
- POLYGON *polya = PG_GETARG_POLYGON_P(0);
- POLYGON *polyb = PG_GETARG_POLYGON_P(1);
+ POLYGON *polya = PG_GETARG_POLYGON_P(0);
+ POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
result = box_ov(&polya->boundbox, &polyb->boundbox);
- /* Avoid leaking memory for toasted inputs ... needed for rtree indexes */
+ /*
+ * Avoid leaking memory for toasted inputs ... needed for rtree
+ * indexes
+ */
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3226,8 +3249,8 @@ poly_overlap(PG_FUNCTION_ARGS)
Datum
poly_contain(PG_FUNCTION_ARGS)
{
- POLYGON *polya = PG_GETARG_POLYGON_P(0);
- POLYGON *polyb = PG_GETARG_POLYGON_P(1);
+ POLYGON *polya = PG_GETARG_POLYGON_P(0);
+ POLYGON *polyb = PG_GETARG_POLYGON_P(1);
bool result;
int i;
@@ -3275,7 +3298,10 @@ poly_contain(PG_FUNCTION_ARGS)
result = false;
}
- /* Avoid leaking memory for toasted inputs ... needed for rtree indexes */
+ /*
+ * Avoid leaking memory for toasted inputs ... needed for rtree
+ * indexes
+ */
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3310,7 +3336,7 @@ poly_contained(PG_FUNCTION_ARGS)
Datum
poly_contain_pt(PG_FUNCTION_ARGS)
{
- POLYGON *poly = PG_GETARG_POLYGON_P(0);
+ POLYGON *poly = PG_GETARG_POLYGON_P(0);
Point *p = PG_GETARG_POINT_P(1);
PG_RETURN_BOOL(point_inside(p, poly->npts, poly->p) != 0);
@@ -3320,7 +3346,7 @@ Datum
pt_contained_poly(PG_FUNCTION_ARGS)
{
Point *p = PG_GETARG_POINT_P(0);
- POLYGON *poly = PG_GETARG_POLYGON_P(1);
+ POLYGON *poly = PG_GETARG_POLYGON_P(1);
PG_RETURN_BOOL(point_inside(p, poly->npts, poly->p) != 0);
}
@@ -3330,8 +3356,9 @@ Datum
poly_distance(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
- POLYGON *polya = PG_GETARG_POLYGON_P(0);
- POLYGON *polyb = PG_GETARG_POLYGON_P(1);
+ POLYGON *polya = PG_GETARG_POLYGON_P(0);
+ POLYGON *polyb = PG_GETARG_POLYGON_P(1);
+
#endif
elog(ERROR, "poly_distance not implemented");
@@ -3531,7 +3558,7 @@ path_add(PG_FUNCTION_ARGS)
if (p1->closed || p2->closed)
PG_RETURN_NULL();
- size = offsetof(PATH, p[0]) + sizeof(p1->p[0]) * (p1->npts + p2->npts);
+ size = offsetof(PATH, p[0]) +sizeof(p1->p[0]) * (p1->npts + p2->npts);
result = (PATH *) palloc(size);
result->size = size;
@@ -3601,7 +3628,7 @@ path_mul_pt(PG_FUNCTION_ARGS)
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_mul,
- PointPGetDatum(&path->p[i]),
+ PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
@@ -3622,7 +3649,7 @@ path_div_pt(PG_FUNCTION_ARGS)
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_div,
- PointPGetDatum(&path->p[i]),
+ PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
@@ -3638,6 +3665,7 @@ path_center(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
PATH *path = PG_GETARG_PATH_P(0);
+
#endif
elog(ERROR, "path_center not implemented");
@@ -3657,7 +3685,7 @@ path_poly(PG_FUNCTION_ARGS)
if (!path->closed)
elog(ERROR, "Open path cannot be converted to polygon");
- size = offsetof(POLYGON, p[0]) + sizeof(poly->p[0]) * path->npts;
+ size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * path->npts;
poly = (POLYGON *) palloc(size);
poly->size = size;
@@ -3684,7 +3712,7 @@ path_poly(PG_FUNCTION_ARGS)
Datum
poly_npoints(PG_FUNCTION_ARGS)
{
- POLYGON *poly = PG_GETARG_POLYGON_P(0);
+ POLYGON *poly = PG_GETARG_POLYGON_P(0);
PG_RETURN_INT32(poly->npts);
}
@@ -3693,7 +3721,7 @@ poly_npoints(PG_FUNCTION_ARGS)
Datum
poly_center(PG_FUNCTION_ARGS)
{
- POLYGON *poly = PG_GETARG_POLYGON_P(0);
+ POLYGON *poly = PG_GETARG_POLYGON_P(0);
Datum result;
CIRCLE *circle;
@@ -3710,7 +3738,7 @@ poly_center(PG_FUNCTION_ARGS)
Datum
poly_box(PG_FUNCTION_ARGS)
{
- POLYGON *poly = PG_GETARG_POLYGON_P(0);
+ POLYGON *poly = PG_GETARG_POLYGON_P(0);
BOX *box;
if (poly->npts < 1)
@@ -3733,7 +3761,7 @@ box_poly(PG_FUNCTION_ARGS)
int size;
/* map four corners of the box to a polygon */
- size = offsetof(POLYGON, p[0]) + sizeof(poly->p[0]) * 4;
+ size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * 4;
poly = (POLYGON *) palloc(size);
poly->size = size;
@@ -3758,12 +3786,12 @@ box_poly(PG_FUNCTION_ARGS)
Datum
poly_path(PG_FUNCTION_ARGS)
{
- POLYGON *poly = PG_GETARG_POLYGON_P(0);
+ POLYGON *poly = PG_GETARG_POLYGON_P(0);
PATH *path;
int size;
int i;
- size = offsetof(PATH, p[0]) + sizeof(path->p[0]) * poly->npts;
+ size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * poly->npts;
path = (PATH *) palloc(size);
path->size = size;
@@ -4133,7 +4161,7 @@ circle_mul_pt(PG_FUNCTION_ARGS)
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_mul,
- PointPGetDatum(&circle->center),
+ PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
@@ -4154,7 +4182,7 @@ circle_div_pt(PG_FUNCTION_ARGS)
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_div,
- PointPGetDatum(&circle->center),
+ PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
@@ -4381,7 +4409,7 @@ circle_poly(PG_FUNCTION_ARGS)
Datum
poly_circle(PG_FUNCTION_ARGS)
{
- POLYGON *poly = PG_GETARG_POLYGON_P(0);
+ POLYGON *poly = PG_GETARG_POLYGON_P(0);
CIRCLE *circle;
int i;
diff --git a/src/backend/utils/adt/inet_net_ntop.c b/src/backend/utils/adt/inet_net_ntop.c
index ecc83cab814..73329300956 100644
--- a/src/backend/utils/adt/inet_net_ntop.c
+++ b/src/backend/utils/adt/inet_net_ntop.c
@@ -16,7 +16,7 @@
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static const char rcsid[] = "$Id: inet_net_ntop.c,v 1.9 2000/11/10 20:13:25 tgl Exp $";
+static const char rcsid[] = "$Id: inet_net_ntop.c,v 1.10 2001/03/22 03:59:51 momjian Exp $";
#endif
@@ -56,7 +56,7 @@ inet_cidr_ntop(int af, const void *src, int bits, char *dst, size_t size)
{
switch (af)
{
- case AF_INET:
+ case AF_INET:
return (inet_cidr_ntop_ipv4(src, bits, dst, size));
default:
errno = EAFNOSUPPORT;
@@ -157,7 +157,7 @@ inet_net_ntop(int af, const void *src, int bits, char *dst, size_t size)
{
switch (af)
{
- case AF_INET:
+ case AF_INET:
return (inet_net_ntop_ipv4(src, bits, dst, size));
default:
errno = EAFNOSUPPORT;
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 189cd384162..00c99805c9e 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/int.c,v 1.45 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/int.c,v 1.46 2001/03/22 03:59:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ Datum
int2out(PG_FUNCTION_ARGS)
{
int16 arg1 = PG_GETARG_INT16(0);
- char *result = (char *) palloc(7); /* sign, 5 digits, '\0' */
+ char *result = (char *) palloc(7); /* sign, 5 digits, '\0' */
pg_itoa(arg1, result);
PG_RETURN_CSTRING(result);
@@ -180,7 +180,8 @@ Datum
int44out(PG_FUNCTION_ARGS)
{
int32 *an_array = (int32 *) PG_GETARG_POINTER(0);
- char *result = (char *) palloc(16 * 4); /* Allow 14 digits + sign */
+ char *result = (char *) palloc(16 * 4); /* Allow 14 digits +
+ * sign */
int i;
char *walk;
@@ -219,7 +220,7 @@ Datum
int4out(PG_FUNCTION_ARGS)
{
int32 arg1 = PG_GETARG_INT32(0);
- char *result = (char *) palloc(12); /* sign, 10 digits, '\0' */
+ char *result = (char *) palloc(12); /* sign, 10 digits, '\0' */
pg_ltoa(arg1, result);
PG_RETURN_CSTRING(result);
@@ -257,7 +258,7 @@ Datum
int2_text(PG_FUNCTION_ARGS)
{
int16 arg1 = PG_GETARG_INT16(0);
- text *result = (text *) palloc(7+VARHDRSZ); /* sign,5 digits, '\0' */
+ text *result = (text *) palloc(7 + VARHDRSZ); /* sign,5 digits, '\0' */
pg_itoa(arg1, VARDATA(result));
VARATT_SIZEP(result) = strlen(VARDATA(result)) + VARHDRSZ;
@@ -288,7 +289,7 @@ Datum
int4_text(PG_FUNCTION_ARGS)
{
int32 arg1 = PG_GETARG_INT32(0);
- text *result = (text *) palloc(12+VARHDRSZ); /* sign,10 digits,'\0' */
+ text *result = (text *) palloc(12 + VARHDRSZ); /* sign,10 digits,'\0' */
pg_ltoa(arg1, VARDATA(result));
VARATT_SIZEP(result) = strlen(VARDATA(result)) + VARHDRSZ;
@@ -960,4 +961,3 @@ int2shr(PG_FUNCTION_ARGS)
PG_RETURN_INT16(arg1 >> arg2);
}
-
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index a7df878c65b..3f286069b7d 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/int8.c,v 1.28 2001/01/26 22:50:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/int8.c,v 1.29 2001/03/22 03:59:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,15 +67,15 @@ int8in(PG_FUNCTION_ARGS)
* Do our own scan, rather than relying on sscanf which might be
* broken for long long.
*/
- while (*ptr && isspace((unsigned char) *ptr)) /* skip leading spaces */
+ while (*ptr && isspace((unsigned char) *ptr)) /* skip leading spaces */
ptr++;
if (*ptr == '-') /* handle sign */
sign = -1, ptr++;
else if (*ptr == '+')
ptr++;
- if (!isdigit((unsigned char) *ptr)) /* require at least one digit */
+ if (!isdigit((unsigned char) *ptr)) /* require at least one digit */
elog(ERROR, "Bad int8 external representation \"%s\"", str);
- while (*ptr && isdigit((unsigned char) *ptr)) /* process digits */
+ while (*ptr && isdigit((unsigned char) *ptr)) /* process digits */
{
int64 newtmp = tmp * 10 + (*ptr++ - '0');
@@ -409,7 +409,7 @@ int8um(PG_FUNCTION_ARGS)
{
int64 val = PG_GETARG_INT64(0);
- PG_RETURN_INT64(- val);
+ PG_RETURN_INT64(-val);
}
Datum
@@ -702,10 +702,11 @@ dtoi8(PG_FUNCTION_ARGS)
/* Round val to nearest integer (but it's still in float form) */
val = rint(val);
+
/*
- * Does it fit in an int64? Avoid assuming that we have handy constants
- * defined for the range boundaries, instead test for overflow by
- * reverse-conversion.
+ * Does it fit in an int64? Avoid assuming that we have handy
+ * constants defined for the range boundaries, instead test for
+ * overflow by reverse-conversion.
*/
result = (int64) val;
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 7bac7793fba..f27bc067d6a 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/like.c,v 1.44 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/like.c,v 1.45 2001/03/22 03:59:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,10 +30,10 @@
#define LIKE_ABORT (-1)
-static int MatchText(unsigned char * t, int tlen,
- unsigned char * p, int plen);
-static int MatchTextIC(unsigned char * t, int tlen,
- unsigned char * p, int plen);
+static int MatchText(unsigned char *t, int tlen,
+ unsigned char *p, int plen);
+static int MatchTextIC(unsigned char *t, int tlen,
+ unsigned char *p, int plen);
#ifdef MULTIBYTE
@@ -42,19 +42,20 @@ static int MatchTextIC(unsigned char * t, int tlen,
* as wide characters. If they match, returns 1 otherwise returns 0.
*--------------------
*/
-static int wchareq(unsigned char *p1, unsigned char *p2)
+static int
+wchareq(unsigned char *p1, unsigned char *p2)
{
- int l;
+ int l;
l = pg_mblen(p1);
- if (pg_mblen(p2) != l) {
- return(0);
- }
- while (l--) {
+ if (pg_mblen(p2) != l)
+ return (0);
+ while (l--)
+ {
if (*p1++ != *p2++)
- return(0);
+ return (0);
}
- return(1);
+ return (1);
}
/*--------------------
@@ -65,32 +66,38 @@ static int wchareq(unsigned char *p1, unsigned char *p2)
*/
#define CHARMAX 0x80
-static int iwchareq(unsigned char *p1, unsigned char *p2)
+static int
+iwchareq(unsigned char *p1, unsigned char *p2)
{
- int c1[2], c2[2];
- int l;
+ int c1[2],
+ c2[2];
+ int l;
- /* short cut. if *p1 and *p2 is lower than CHARMAX, then
- we could assume they are ASCII */
+ /*
+ * short cut. if *p1 and *p2 is lower than CHARMAX, then we could
+ * assume they are ASCII
+ */
if (*p1 < CHARMAX && *p2 < CHARMAX)
- return(tolower(*p1) == tolower(*p2));
+ return (tolower(*p1) == tolower(*p2));
- /* if one of them is an ASCII while the other is not, then
- they must be different characters
- */
+ /*
+ * if one of them is an ASCII while the other is not, then they must
+ * be different characters
+ */
else if (*p1 < CHARMAX || *p2 < CHARMAX)
- return(0);
+ return (0);
- /* ok, p1 and p2 are both > CHARMAX, then they must be multi-byte
- characters
- */
+ /*
+ * ok, p1 and p2 are both > CHARMAX, then they must be multi-byte
+ * characters
+ */
l = pg_mblen(p1);
- (void)pg_mb2wchar_with_len(p1, (pg_wchar *)c1, l);
+ (void) pg_mb2wchar_with_len(p1, (pg_wchar *) c1, l);
c1[0] = tolower(c1[0]);
l = pg_mblen(p2);
- (void)pg_mb2wchar_with_len(p2, (pg_wchar *)c2, l);
+ (void) pg_mb2wchar_with_len(p2, (pg_wchar *) c2, l);
c2[0] = tolower(c2[0]);
- return(c1[0] == c2[0]);
+ return (c1[0] == c2[0]);
}
#endif
@@ -124,13 +131,15 @@ namelike(PG_FUNCTION_ARGS)
Name str = PG_GETARG_NAME(0);
text *pat = PG_GETARG_TEXT_P(1);
bool result;
- unsigned char *s, *p;
- int slen, plen;
+ unsigned char *s,
+ *p;
+ int slen,
+ plen;
s = NameStr(*str);
slen = strlen(s);
p = VARDATA(pat);
- plen = (VARSIZE(pat)-VARHDRSZ);
+ plen = (VARSIZE(pat) - VARHDRSZ);
result = (MatchText(s, slen, p, plen) == LIKE_TRUE);
@@ -143,13 +152,15 @@ namenlike(PG_FUNCTION_ARGS)
Name str = PG_GETARG_NAME(0);
text *pat = PG_GETARG_TEXT_P(1);
bool result;
- unsigned char *s, *p;
- int slen, plen;
+ unsigned char *s,
+ *p;
+ int slen,
+ plen;
s = NameStr(*str);
slen = strlen(s);
p = VARDATA(pat);
- plen = (VARSIZE(pat)-VARHDRSZ);
+ plen = (VARSIZE(pat) - VARHDRSZ);
result = (MatchText(s, slen, p, plen) != LIKE_TRUE);
@@ -162,13 +173,15 @@ textlike(PG_FUNCTION_ARGS)
text *str = PG_GETARG_TEXT_P(0);
text *pat = PG_GETARG_TEXT_P(1);
bool result;
- unsigned char *s, *p;
- int slen, plen;
+ unsigned char *s,
+ *p;
+ int slen,
+ plen;
s = VARDATA(str);
- slen = (VARSIZE(str)-VARHDRSZ);
+ slen = (VARSIZE(str) - VARHDRSZ);
p = VARDATA(pat);
- plen = (VARSIZE(pat)-VARHDRSZ);
+ plen = (VARSIZE(pat) - VARHDRSZ);
result = (MatchText(s, slen, p, plen) == LIKE_TRUE);
@@ -181,13 +194,15 @@ textnlike(PG_FUNCTION_ARGS)
text *str = PG_GETARG_TEXT_P(0);
text *pat = PG_GETARG_TEXT_P(1);
bool result;
- unsigned char *s, *p;
- int slen, plen;
+ unsigned char *s,
+ *p;
+ int slen,
+ plen;
s = VARDATA(str);
- slen = (VARSIZE(str)-VARHDRSZ);
+ slen = (VARSIZE(str) - VARHDRSZ);
p = VARDATA(pat);
- plen = (VARSIZE(pat)-VARHDRSZ);
+ plen = (VARSIZE(pat) - VARHDRSZ);
result = (MatchText(s, slen, p, plen) != LIKE_TRUE);
@@ -204,13 +219,15 @@ nameiclike(PG_FUNCTION_ARGS)
Name str = PG_GETARG_NAME(0);
text *pat = PG_GETARG_TEXT_P(1);
bool result;
- unsigned char *s, *p;
- int slen, plen;
+ unsigned char *s,
+ *p;
+ int slen,
+ plen;
s = NameStr(*str);
slen = strlen(s);
p = VARDATA(pat);
- plen = (VARSIZE(pat)-VARHDRSZ);
+ plen = (VARSIZE(pat) - VARHDRSZ);
result = (MatchTextIC(s, slen, p, plen) == LIKE_TRUE);
@@ -223,13 +240,15 @@ nameicnlike(PG_FUNCTION_ARGS)
Name str = PG_GETARG_NAME(0);
text *pat = PG_GETARG_TEXT_P(1);
bool result;
- unsigned char *s, *p;
- int slen, plen;
+ unsigned char *s,
+ *p;
+ int slen,
+ plen;
s = NameStr(*str);
slen = strlen(s);
p = VARDATA(pat);
- plen = (VARSIZE(pat)-VARHDRSZ);
+ plen = (VARSIZE(pat) - VARHDRSZ);
result = (MatchTextIC(s, slen, p, plen) != LIKE_TRUE);
@@ -242,13 +261,15 @@ texticlike(PG_FUNCTION_ARGS)
text *str = PG_GETARG_TEXT_P(0);
text *pat = PG_GETARG_TEXT_P(1);
bool result;
- unsigned char *s, *p;
- int slen, plen;
+ unsigned char *s,
+ *p;
+ int slen,
+ plen;
s = VARDATA(str);
- slen = (VARSIZE(str)-VARHDRSZ);
+ slen = (VARSIZE(str) - VARHDRSZ);
p = VARDATA(pat);
- plen = (VARSIZE(pat)-VARHDRSZ);
+ plen = (VARSIZE(pat) - VARHDRSZ);
result = (MatchTextIC(s, slen, p, plen) == LIKE_TRUE);
@@ -261,13 +282,15 @@ texticnlike(PG_FUNCTION_ARGS)
text *str = PG_GETARG_TEXT_P(0);
text *pat = PG_GETARG_TEXT_P(1);
bool result;
- unsigned char *s, *p;
- int slen, plen;
+ unsigned char *s,
+ *p;
+ int slen,
+ plen;
s = VARDATA(str);
- slen = (VARSIZE(str)-VARHDRSZ);
+ slen = (VARSIZE(str) - VARHDRSZ);
p = VARDATA(pat);
- plen = (VARSIZE(pat)-VARHDRSZ);
+ plen = (VARSIZE(pat) - VARHDRSZ);
result = (MatchTextIC(s, slen, p, plen) != LIKE_TRUE);
@@ -284,14 +307,17 @@ like_escape(PG_FUNCTION_ARGS)
text *pat = PG_GETARG_TEXT_P(0);
text *esc = PG_GETARG_TEXT_P(1);
text *result;
- unsigned char *p, *e, *r;
- int plen, elen;
+ unsigned char *p,
+ *e,
+ *r;
+ int plen,
+ elen;
bool afterescape;
p = VARDATA(pat);
- plen = (VARSIZE(pat)-VARHDRSZ);
+ plen = (VARSIZE(pat) - VARHDRSZ);
e = VARDATA(esc);
- elen = (VARSIZE(esc)-VARHDRSZ);
+ elen = (VARSIZE(esc) - VARHDRSZ);
/*
* Worst-case pattern growth is 2x --- unlikely, but it's hardly worth
@@ -302,6 +328,7 @@ like_escape(PG_FUNCTION_ARGS)
if (elen == 0)
{
+
/*
* No escape character is wanted. Double any backslashes in the
* pattern to make them act like ordinary characters.
@@ -315,6 +342,7 @@ like_escape(PG_FUNCTION_ARGS)
}
else
{
+
/*
* The specified escape must be only a single character.
*/
@@ -322,6 +350,7 @@ like_escape(PG_FUNCTION_ARGS)
if (elen != 0)
elog(ERROR, "ESCAPE string must be empty or one character");
e = VARDATA(esc);
+
/*
* If specified escape is '\', just copy the pattern as-is.
*/
@@ -330,15 +359,16 @@ like_escape(PG_FUNCTION_ARGS)
memcpy(result, pat, VARSIZE(pat));
PG_RETURN_TEXT_P(result);
}
+
/*
- * Otherwise, convert occurrences of the specified escape character
- * to '\', and double occurrences of '\' --- unless they immediately
- * follow an escape character!
+ * Otherwise, convert occurrences of the specified escape
+ * character to '\', and double occurrences of '\' --- unless they
+ * immediately follow an escape character!
*/
afterescape = false;
while (plen > 0)
{
- if (CHAREQ(p,e) && !afterescape)
+ if (CHAREQ(p, e) && !afterescape)
{
*r++ = '\\';
NextChar(p, plen);
@@ -347,7 +377,7 @@ like_escape(PG_FUNCTION_ARGS)
else if (*p == '\\')
{
*r++ = '\\';
- if (! afterescape)
+ if (!afterescape)
*r++ = '\\';
NextChar(p, plen);
afterescape = false;
@@ -413,7 +443,7 @@ like_escape(PG_FUNCTION_ARGS)
*/
static int
-MatchText(unsigned char * t, int tlen, unsigned char * p, int plen)
+MatchText(unsigned char *t, int tlen, unsigned char *p, int plen)
{
/* Fast path for match-everything pattern */
if ((plen == 1) && (*p == '%'))
@@ -425,7 +455,7 @@ MatchText(unsigned char * t, int tlen, unsigned char * p, int plen)
{
/* Next pattern char must match literally, whatever it is */
NextChar(p, plen);
- if ((plen <= 0) || !CHAREQ(t,p))
+ if ((plen <= 0) || !CHAREQ(t, p))
return LIKE_FALSE;
}
else if (*p == '%')
@@ -439,22 +469,22 @@ MatchText(unsigned char * t, int tlen, unsigned char * p, int plen)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can
- * match the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match
+ * the rest of the pattern.
*/
while (tlen > 0)
{
+
/*
- * Optimization to prevent most recursion: don't
- * recurse unless first pattern char might match this
- * text char.
+ * Optimization to prevent most recursion: don't recurse
+ * unless first pattern char might match this text char.
*/
- if (CHAREQ(t,p) || (*p == '\\') || (*p == '_'))
+ if (CHAREQ(t, p) || (*p == '\\') || (*p == '_'))
{
- int matched = MatchText(t, tlen, p, plen);
+ int matched = MatchText(t, tlen, p, plen);
if (matched != LIKE_FALSE)
- return matched; /* TRUE or ABORT */
+ return matched; /* TRUE or ABORT */
}
NextChar(t, tlen);
@@ -466,9 +496,11 @@ MatchText(unsigned char * t, int tlen, unsigned char * p, int plen)
*/
return LIKE_ABORT;
}
- else if ((*p != '_') && !CHAREQ(t,p))
+ else if ((*p != '_') && !CHAREQ(t, p))
{
- /* Not the single-character wildcard and no explicit match?
+
+ /*
+ * Not the single-character wildcard and no explicit match?
* Then time to quit...
*/
return LIKE_FALSE;
@@ -482,7 +514,8 @@ MatchText(unsigned char * t, int tlen, unsigned char * p, int plen)
return LIKE_FALSE; /* end of pattern, but not of text */
/* End of input string. Do we have matching pattern remaining? */
- while ((plen > 0) && (*p == '%')) /* allow multiple %'s at end of pattern */
+ while ((plen > 0) && (*p == '%')) /* allow multiple %'s at end of
+ * pattern */
NextChar(p, plen);
if (plen <= 0)
return LIKE_TRUE;
@@ -492,13 +525,13 @@ MatchText(unsigned char * t, int tlen, unsigned char * p, int plen)
* start matching this pattern.
*/
return LIKE_ABORT;
-} /* MatchText() */
+} /* MatchText() */
/*
* Same as above, but ignore case
*/
static int
-MatchTextIC(unsigned char * t, int tlen, unsigned char * p, int plen)
+MatchTextIC(unsigned char *t, int tlen, unsigned char *p, int plen)
{
/* Fast path for match-everything pattern */
if ((plen == 1) && (*p == '%'))
@@ -510,7 +543,7 @@ MatchTextIC(unsigned char * t, int tlen, unsigned char * p, int plen)
{
/* Next pattern char must match literally, whatever it is */
NextChar(p, plen);
- if ((plen <= 0) || !ICHAREQ(t,p))
+ if ((plen <= 0) || !ICHAREQ(t, p))
return LIKE_FALSE;
}
else if (*p == '%')
@@ -524,22 +557,22 @@ MatchTextIC(unsigned char * t, int tlen, unsigned char * p, int plen)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can
- * match the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match
+ * the rest of the pattern.
*/
while (tlen > 0)
{
+
/*
- * Optimization to prevent most recursion: don't
- * recurse unless first pattern char might match this
- * text char.
+ * Optimization to prevent most recursion: don't recurse
+ * unless first pattern char might match this text char.
*/
- if (ICHAREQ(t,p) || (*p == '\\') || (*p == '_'))
+ if (ICHAREQ(t, p) || (*p == '\\') || (*p == '_'))
{
- int matched = MatchTextIC(t, tlen, p, plen);
+ int matched = MatchTextIC(t, tlen, p, plen);
if (matched != LIKE_FALSE)
- return matched; /* TRUE or ABORT */
+ return matched; /* TRUE or ABORT */
}
NextChar(t, tlen);
@@ -551,9 +584,11 @@ MatchTextIC(unsigned char * t, int tlen, unsigned char * p, int plen)
*/
return LIKE_ABORT;
}
- else if ((*p != '_') && !ICHAREQ(t,p))
+ else if ((*p != '_') && !ICHAREQ(t, p))
{
- /* Not the single-character wildcard and no explicit match?
+
+ /*
+ * Not the single-character wildcard and no explicit match?
* Then time to quit...
*/
return LIKE_FALSE;
@@ -567,7 +602,8 @@ MatchTextIC(unsigned char * t, int tlen, unsigned char * p, int plen)
return LIKE_FALSE; /* end of pattern, but not of text */
/* End of input string. Do we have matching pattern remaining? */
- while ((plen > 0) && (*p == '%')) /* allow multiple %'s at end of pattern */
+ while ((plen > 0) && (*p == '%')) /* allow multiple %'s at end of
+ * pattern */
NextChar(p, plen);
if (plen <= 0)
return LIKE_TRUE;
@@ -577,4 +613,4 @@ MatchTextIC(unsigned char * t, int tlen, unsigned char * p, int plen)
* start matching this pattern.
*/
return LIKE_ABORT;
-} /* MatchTextIC() */
+} /* MatchTextIC() */
diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c
index 17754588e97..0724abec5b2 100644
--- a/src/backend/utils/adt/mac.c
+++ b/src/backend/utils/adt/mac.c
@@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for MAC addresses.
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/mac.c,v 1.19 2000/12/08 23:57:03 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/mac.c,v 1.20 2001/03/22 03:59:51 momjian Exp $
*/
#include "postgres.h"
@@ -82,7 +82,7 @@ macaddr_in(PG_FUNCTION_ARGS)
Datum
macaddr_out(PG_FUNCTION_ARGS)
{
- macaddr *addr = PG_GETARG_MACADDR_P(0);
+ macaddr *addr = PG_GETARG_MACADDR_P(0);
char *result;
result = (char *) palloc(32);
@@ -139,16 +139,16 @@ text_macaddr(PG_FUNCTION_ARGS)
char str[18];
int len;
- len = (VARSIZE(addr)-VARHDRSZ);
+ len = (VARSIZE(addr) - VARHDRSZ);
if (len >= 18)
elog(ERROR, "Text is too long to convert to MAC address");
memmove(str, VARDATA(addr), len);
- *(str+len) = '\0';
+ *(str + len) = '\0';
- result = DirectFunctionCall1(macaddr_in, CStringGetDatum(str));
+ result = DirectFunctionCall1(macaddr_in, CStringGetDatum(str));
- return(result);
+ return (result);
}
/*
@@ -173,8 +173,8 @@ macaddr_cmp_internal(macaddr *a1, macaddr *a2)
Datum
macaddr_cmp(PG_FUNCTION_ARGS)
{
- macaddr *a1 = PG_GETARG_MACADDR_P(0);
- macaddr *a2 = PG_GETARG_MACADDR_P(1);
+ macaddr *a1 = PG_GETARG_MACADDR_P(0);
+ macaddr *a2 = PG_GETARG_MACADDR_P(1);
PG_RETURN_INT32(macaddr_cmp_internal(a1, a2));
}
@@ -186,8 +186,8 @@ macaddr_cmp(PG_FUNCTION_ARGS)
Datum
macaddr_lt(PG_FUNCTION_ARGS)
{
- macaddr *a1 = PG_GETARG_MACADDR_P(0);
- macaddr *a2 = PG_GETARG_MACADDR_P(1);
+ macaddr *a1 = PG_GETARG_MACADDR_P(0);
+ macaddr *a2 = PG_GETARG_MACADDR_P(1);
PG_RETURN_BOOL(macaddr_cmp_internal(a1, a2) < 0);
}
@@ -195,8 +195,8 @@ macaddr_lt(PG_FUNCTION_ARGS)
Datum
macaddr_le(PG_FUNCTION_ARGS)
{
- macaddr *a1 = PG_GETARG_MACADDR_P(0);
- macaddr *a2 = PG_GETARG_MACADDR_P(1);
+ macaddr *a1 = PG_GETARG_MACADDR_P(0);
+ macaddr *a2 = PG_GETARG_MACADDR_P(1);
PG_RETURN_BOOL(macaddr_cmp_internal(a1, a2) <= 0);
}
@@ -204,8 +204,8 @@ macaddr_le(PG_FUNCTION_ARGS)
Datum
macaddr_eq(PG_FUNCTION_ARGS)
{
- macaddr *a1 = PG_GETARG_MACADDR_P(0);
- macaddr *a2 = PG_GETARG_MACADDR_P(1);
+ macaddr *a1 = PG_GETARG_MACADDR_P(0);
+ macaddr *a2 = PG_GETARG_MACADDR_P(1);
PG_RETURN_BOOL(macaddr_cmp_internal(a1, a2) == 0);
}
@@ -213,8 +213,8 @@ macaddr_eq(PG_FUNCTION_ARGS)
Datum
macaddr_ge(PG_FUNCTION_ARGS)
{
- macaddr *a1 = PG_GETARG_MACADDR_P(0);
- macaddr *a2 = PG_GETARG_MACADDR_P(1);
+ macaddr *a1 = PG_GETARG_MACADDR_P(0);
+ macaddr *a2 = PG_GETARG_MACADDR_P(1);
PG_RETURN_BOOL(macaddr_cmp_internal(a1, a2) >= 0);
}
@@ -222,8 +222,8 @@ macaddr_ge(PG_FUNCTION_ARGS)
Datum
macaddr_gt(PG_FUNCTION_ARGS)
{
- macaddr *a1 = PG_GETARG_MACADDR_P(0);
- macaddr *a2 = PG_GETARG_MACADDR_P(1);
+ macaddr *a1 = PG_GETARG_MACADDR_P(0);
+ macaddr *a2 = PG_GETARG_MACADDR_P(1);
PG_RETURN_BOOL(macaddr_cmp_internal(a1, a2) > 0);
}
@@ -231,8 +231,8 @@ macaddr_gt(PG_FUNCTION_ARGS)
Datum
macaddr_ne(PG_FUNCTION_ARGS)
{
- macaddr *a1 = PG_GETARG_MACADDR_P(0);
- macaddr *a2 = PG_GETARG_MACADDR_P(1);
+ macaddr *a1 = PG_GETARG_MACADDR_P(0);
+ macaddr *a2 = PG_GETARG_MACADDR_P(1);
PG_RETURN_BOOL(macaddr_cmp_internal(a1, a2) != 0);
}
@@ -243,7 +243,7 @@ macaddr_ne(PG_FUNCTION_ARGS)
Datum
hashmacaddr(PG_FUNCTION_ARGS)
{
- macaddr *key = PG_GETARG_MACADDR_P(0);
+ macaddr *key = PG_GETARG_MACADDR_P(0);
return hash_any((char *) key, sizeof(macaddr));
}
@@ -255,8 +255,8 @@ hashmacaddr(PG_FUNCTION_ARGS)
Datum
macaddr_trunc(PG_FUNCTION_ARGS)
{
- macaddr *result;
- macaddr *addr = PG_GETARG_MACADDR_P(0);
+ macaddr *result;
+ macaddr *addr = PG_GETARG_MACADDR_P(0);
result = (macaddr *) palloc(sizeof(macaddr));
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index d3a82eaf8a1..af08bda8789 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/misc.c,v 1.21 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/misc.c,v 1.22 2001/03/22 03:59:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,7 +53,7 @@ nonnullvalue(PG_FUNCTION_ARGS)
* select * from TEMP where oidrand(TEMP.oid, 10)
* will return about 1/10 of the tuples in TEMP
*
- * NOTE: the OID input is not used at all. It is there just because of
+ * NOTE: the OID input is not used at all. It is there just because of
* an old optimizer bug: a qual expression containing no variables was
* mistakenly assumed to be a constant. Pretending to access the row's OID
* prevented the optimizer from treating the oidrand() result as constant.
@@ -66,6 +66,7 @@ oidrand(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED
Oid o = PG_GETARG_OID(0);
+
#endif
int32 X = PG_GETARG_INT32(1);
bool result;
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 99dc95d4663..972ca7a67fa 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/nabstime.c,v 1.82 2001/02/21 22:03:00 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/nabstime.c,v 1.83 2001/03/22 03:59:52 momjian Exp $
*
* NOTES
*
@@ -162,7 +162,8 @@ GetCurrentAbsoluteTime(void)
CDayLight = tm->tm_isdst;
CTimeZone = ((tm->tm_isdst > 0) ? (TIMEZONE_GLOBAL - 3600) : TIMEZONE_GLOBAL);
strcpy(CTZName, tzname[tm->tm_isdst]);
-#else /* neither HAVE_TM_ZONE nor HAVE_INT_TIMEZONE */
+#else /* neither HAVE_TM_ZONE nor
+ * HAVE_INT_TIMEZONE */
CTimeZone = tb.timezone * 60;
CDayLight = (tb.dstflag != 0);
@@ -192,7 +193,8 @@ GetCurrentTime(struct tm * tm)
void
abstime2tm(AbsoluteTime _time, int *tzp, struct tm * tm, char *tzn)
{
- time_t time = (time_t) _time;
+ time_t time = (time_t) _time;
+
#if defined(HAVE_TM_ZONE) || defined(HAVE_INT_TIMEZONE)
struct tm *tx;
@@ -207,14 +209,14 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct tm * tm, char *tzn)
if (tzp != NULL)
{
tx = localtime((time_t *) &time);
-# ifdef NO_MKTIME_BEFORE_1970
+#ifdef NO_MKTIME_BEFORE_1970
if (tx->tm_year < 70 && tx->tm_isdst == 1)
{
time -= 3600;
tx = localtime((time_t *) &time);
tx->tm_isdst = 0;
}
-# endif
+#endif
}
else
{
@@ -229,7 +231,7 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct tm * tm, char *tzn)
tm->tm_sec = tx->tm_sec;
tm->tm_isdst = tx->tm_isdst;
-# if defined(HAVE_TM_ZONE)
+#if defined(HAVE_TM_ZONE)
tm->tm_gmtoff = tx->tm_gmtoff;
tm->tm_zone = tx->tm_zone;
@@ -243,11 +245,11 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct tm * tm, char *tzn)
* Copy no more than MAXTZLEN bytes of timezone to tzn, in case it
* contains an error message, which doesn't fit in the buffer
*/
- StrNCpy(tzn, tm->tm_zone, MAXTZLEN+1);
+ StrNCpy(tzn, tm->tm_zone, MAXTZLEN + 1);
if (strlen(tm->tm_zone) > MAXTZLEN)
elog(NOTICE, "Invalid timezone \'%s\'", tm->tm_zone);
}
-# elif defined(HAVE_INT_TIMEZONE)
+#elif defined(HAVE_INT_TIMEZONE)
if (tzp != NULL)
*tzp = ((tm->tm_isdst > 0) ? (TIMEZONE_GLOBAL - 3600) : TIMEZONE_GLOBAL);
@@ -258,12 +260,12 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct tm * tm, char *tzn)
* Copy no more than MAXTZLEN bytes of timezone to tzn, in case it
* contains an error message, which doesn't fit in the buffer
*/
- StrNCpy(tzn, tzname[tm->tm_isdst], MAXTZLEN+1);
+ StrNCpy(tzn, tzname[tm->tm_isdst], MAXTZLEN + 1);
if (strlen(tzname[tm->tm_isdst]) > MAXTZLEN)
elog(NOTICE, "Invalid timezone \'%s\'", tzname[tm->tm_isdst]);
}
-# endif
-#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
+#endif
+#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
if (tzp != NULL)
*tzp = tb.timezone * 60;
@@ -470,7 +472,7 @@ AbsoluteTimeIsAfter(AbsoluteTime time1, AbsoluteTime time2)
Datum
abstime_finite(PG_FUNCTION_ARGS)
{
- AbsoluteTime abstime = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime abstime = PG_GETARG_ABSOLUTETIME(0);
PG_RETURN_BOOL((abstime != INVALID_ABSTIME) &&
(abstime != NOSTART_ABSTIME) &&
@@ -489,8 +491,8 @@ abstime_finite(PG_FUNCTION_ARGS)
Datum
abstimeeq(PG_FUNCTION_ARGS)
{
- AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
- AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
+ AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
if (t1 == INVALID_ABSTIME || t2 == INVALID_ABSTIME)
PG_RETURN_BOOL(false);
@@ -505,8 +507,8 @@ abstimeeq(PG_FUNCTION_ARGS)
Datum
abstimene(PG_FUNCTION_ARGS)
{
- AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
- AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
+ AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
if (t1 == INVALID_ABSTIME || t2 == INVALID_ABSTIME)
PG_RETURN_BOOL(false);
@@ -521,8 +523,8 @@ abstimene(PG_FUNCTION_ARGS)
Datum
abstimelt(PG_FUNCTION_ARGS)
{
- AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
- AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
+ AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
if (t1 == INVALID_ABSTIME || t2 == INVALID_ABSTIME)
PG_RETURN_BOOL(false);
@@ -537,8 +539,8 @@ abstimelt(PG_FUNCTION_ARGS)
Datum
abstimegt(PG_FUNCTION_ARGS)
{
- AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
- AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
+ AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
if (t1 == INVALID_ABSTIME || t2 == INVALID_ABSTIME)
PG_RETURN_BOOL(false);
@@ -553,8 +555,8 @@ abstimegt(PG_FUNCTION_ARGS)
Datum
abstimele(PG_FUNCTION_ARGS)
{
- AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
- AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
+ AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
if (t1 == INVALID_ABSTIME || t2 == INVALID_ABSTIME)
PG_RETURN_BOOL(false);
@@ -569,8 +571,8 @@ abstimele(PG_FUNCTION_ARGS)
Datum
abstimege(PG_FUNCTION_ARGS)
{
- AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
- AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
+ AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime t2 = PG_GETARG_ABSOLUTETIME(1);
if (t1 == INVALID_ABSTIME || t2 == INVALID_ABSTIME)
PG_RETURN_BOOL(false);
@@ -596,17 +598,11 @@ timestamp_abstime(PG_FUNCTION_ARGS)
*tm = &tt;
if (TIMESTAMP_IS_INVALID(timestamp))
- {
result = INVALID_ABSTIME;
- }
else if (TIMESTAMP_IS_NOBEGIN(timestamp))
- {
result = NOSTART_ABSTIME;
- }
else if (TIMESTAMP_IS_NOEND(timestamp))
- {
result = NOEND_ABSTIME;
- }
else
{
if (TIMESTAMP_IS_RELATIVE(timestamp))
@@ -615,13 +611,9 @@ timestamp_abstime(PG_FUNCTION_ARGS)
result = tm2abstime(tm, 0);
}
else if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL) == 0)
- {
result = tm2abstime(tm, 0);
- }
else
- {
result = INVALID_ABSTIME;
- }
}
PG_RETURN_ABSOLUTETIME(result);
@@ -633,7 +625,7 @@ timestamp_abstime(PG_FUNCTION_ARGS)
Datum
abstime_timestamp(PG_FUNCTION_ARGS)
{
- AbsoluteTime abstime = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime abstime = PG_GETARG_ABSOLUTETIME(0);
Timestamp result;
switch (abstime)
@@ -677,7 +669,7 @@ abstime_timestamp(PG_FUNCTION_ARGS)
Datum
reltimein(PG_FUNCTION_ARGS)
{
- char *str = PG_GETARG_CSTRING(0);
+ char *str = PG_GETARG_CSTRING(0);
RelativeTime result;
struct tm tt,
*tm = &tt;
@@ -724,9 +716,7 @@ reltimeout(PG_FUNCTION_ARGS)
char buf[MAXDATELEN + 1];
if (time == INVALID_RELTIME)
- {
strcpy(buf, INVALID_RELTIME_STR);
- }
else
{
reltime2tm(time, tm);
@@ -840,12 +830,12 @@ tintervalout(PG_FUNCTION_ARGS)
else
{
p = DatumGetCString(DirectFunctionCall1(nabstimeout,
- AbsoluteTimeGetDatum(interval->data[0])));
+ AbsoluteTimeGetDatum(interval->data[0])));
strcat(i_str, p);
pfree(p);
strcat(i_str, "\" \"");
p = DatumGetCString(DirectFunctionCall1(nabstimeout,
- AbsoluteTimeGetDatum(interval->data[1])));
+ AbsoluteTimeGetDatum(interval->data[1])));
strcat(i_str, p);
pfree(p);
}
@@ -868,9 +858,7 @@ interval_reltime(PG_FUNCTION_ARGS)
double span;
if (INTERVAL_IS_INVALID(*interval))
- {
time = INVALID_RELTIME;
- }
else
{
if (interval->month == 0)
@@ -965,8 +953,8 @@ mktinterval(PG_FUNCTION_ARGS)
Datum
timepl(PG_FUNCTION_ARGS)
{
- AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
- RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
+ AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
+ RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
if (t1 == CURRENT_ABSTIME)
t1 = GetCurrentTransactionStartTime();
@@ -987,8 +975,8 @@ timepl(PG_FUNCTION_ARGS)
Datum
timemi(PG_FUNCTION_ARGS)
{
- AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
- RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
+ AbsoluteTime t1 = PG_GETARG_ABSOLUTETIME(0);
+ RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
if (t1 == CURRENT_ABSTIME)
t1 = GetCurrentTransactionStartTime();
@@ -1030,17 +1018,17 @@ abstimemi(AbsoluteTime t1, AbsoluteTime t2)
Datum
intinterval(PG_FUNCTION_ARGS)
{
- AbsoluteTime t = PG_GETARG_ABSOLUTETIME(0);
- TimeInterval interval = PG_GETARG_TIMEINTERVAL(1);
+ AbsoluteTime t = PG_GETARG_ABSOLUTETIME(0);
+ TimeInterval interval = PG_GETARG_TIMEINTERVAL(1);
if (interval->status == T_INTERVAL_VALID && t != INVALID_ABSTIME)
{
if (DatumGetBool(DirectFunctionCall2(abstimege,
- AbsoluteTimeGetDatum(t),
- AbsoluteTimeGetDatum(interval->data[0]))) &&
+ AbsoluteTimeGetDatum(t),
+ AbsoluteTimeGetDatum(interval->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimele,
- AbsoluteTimeGetDatum(t),
- AbsoluteTimeGetDatum(interval->data[1]))))
+ AbsoluteTimeGetDatum(t),
+ AbsoluteTimeGetDatum(interval->data[1]))))
PG_RETURN_BOOL(true);
}
PG_RETURN_BOOL(false);
@@ -1086,8 +1074,8 @@ timenow(PG_FUNCTION_ARGS)
Datum
reltimeeq(PG_FUNCTION_ARGS)
{
- RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
- RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
+ RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
+ RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
if (t1 == INVALID_RELTIME || t2 == INVALID_RELTIME)
PG_RETURN_BOOL(false);
@@ -1097,8 +1085,8 @@ reltimeeq(PG_FUNCTION_ARGS)
Datum
reltimene(PG_FUNCTION_ARGS)
{
- RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
- RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
+ RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
+ RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
if (t1 == INVALID_RELTIME || t2 == INVALID_RELTIME)
PG_RETURN_BOOL(false);
@@ -1108,8 +1096,8 @@ reltimene(PG_FUNCTION_ARGS)
Datum
reltimelt(PG_FUNCTION_ARGS)
{
- RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
- RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
+ RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
+ RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
if (t1 == INVALID_RELTIME || t2 == INVALID_RELTIME)
PG_RETURN_BOOL(false);
@@ -1119,8 +1107,8 @@ reltimelt(PG_FUNCTION_ARGS)
Datum
reltimegt(PG_FUNCTION_ARGS)
{
- RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
- RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
+ RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
+ RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
if (t1 == INVALID_RELTIME || t2 == INVALID_RELTIME)
PG_RETURN_BOOL(false);
@@ -1130,8 +1118,8 @@ reltimegt(PG_FUNCTION_ARGS)
Datum
reltimele(PG_FUNCTION_ARGS)
{
- RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
- RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
+ RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
+ RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
if (t1 == INVALID_RELTIME || t2 == INVALID_RELTIME)
PG_RETURN_BOOL(false);
@@ -1141,8 +1129,8 @@ reltimele(PG_FUNCTION_ARGS)
Datum
reltimege(PG_FUNCTION_ARGS)
{
- RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
- RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
+ RelativeTime t1 = PG_GETARG_RELATIVETIME(0);
+ RelativeTime t2 = PG_GETARG_RELATIVETIME(1);
if (t1 == INVALID_RELTIME || t2 == INVALID_RELTIME)
PG_RETURN_BOOL(false);
@@ -1157,18 +1145,18 @@ reltimege(PG_FUNCTION_ARGS)
Datum
tintervalsame(PG_FUNCTION_ARGS)
{
- TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
- TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
+ TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
if (i1->status == T_INTERVAL_INVAL || i2->status == T_INTERVAL_INVAL)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimeeq,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[0]))) &&
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimeeq,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(true);
PG_RETURN_BOOL(false);
}
@@ -1181,8 +1169,8 @@ tintervalsame(PG_FUNCTION_ARGS)
Datum
tintervaleq(PG_FUNCTION_ARGS)
{
- TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
- TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
+ TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
AbsoluteTime t10,
t11,
t20,
@@ -1215,8 +1203,8 @@ tintervaleq(PG_FUNCTION_ARGS)
Datum
tintervalne(PG_FUNCTION_ARGS)
{
- TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
- TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
+ TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
AbsoluteTime t10,
t11,
t20,
@@ -1249,8 +1237,8 @@ tintervalne(PG_FUNCTION_ARGS)
Datum
tintervallt(PG_FUNCTION_ARGS)
{
- TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
- TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
+ TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
AbsoluteTime t10,
t11,
t20,
@@ -1283,8 +1271,8 @@ tintervallt(PG_FUNCTION_ARGS)
Datum
tintervalle(PG_FUNCTION_ARGS)
{
- TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
- TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
+ TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
AbsoluteTime t10,
t11,
t20,
@@ -1317,8 +1305,8 @@ tintervalle(PG_FUNCTION_ARGS)
Datum
tintervalgt(PG_FUNCTION_ARGS)
{
- TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
- TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
+ TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
AbsoluteTime t10,
t11,
t20,
@@ -1351,8 +1339,8 @@ tintervalgt(PG_FUNCTION_ARGS)
Datum
tintervalge(PG_FUNCTION_ARGS)
{
- TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
- TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
+ TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
AbsoluteTime t10,
t11,
t20,
@@ -1407,7 +1395,7 @@ tintervalleneq(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt == t);
}
@@ -1421,7 +1409,7 @@ tintervallenne(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt != t);
}
@@ -1435,7 +1423,7 @@ tintervallenlt(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt < t);
}
@@ -1449,7 +1437,7 @@ tintervallengt(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt > t);
}
@@ -1463,7 +1451,7 @@ tintervallenle(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt <= t);
}
@@ -1477,7 +1465,7 @@ tintervallenge(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt >= t);
}
@@ -1487,17 +1475,17 @@ tintervallenge(PG_FUNCTION_ARGS)
Datum
tintervalct(PG_FUNCTION_ARGS)
{
- TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
- TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
+ TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
if (i1->status == T_INTERVAL_INVAL || i2->status == T_INTERVAL_INVAL)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimele,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[0]))) &&
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimege,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(true);
PG_RETURN_BOOL(false);
}
@@ -1508,17 +1496,17 @@ tintervalct(PG_FUNCTION_ARGS)
Datum
tintervalov(PG_FUNCTION_ARGS)
{
- TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
- TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
+ TimeInterval i1 = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i2 = PG_GETARG_TIMEINTERVAL(1);
if (i1->status == T_INTERVAL_INVAL || i2->status == T_INTERVAL_INVAL)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimelt,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[0]))) ||
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[0]))) ||
DatumGetBool(DirectFunctionCall2(abstimegt,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(false);
PG_RETURN_BOOL(true);
}
@@ -1529,7 +1517,7 @@ tintervalov(PG_FUNCTION_ARGS)
Datum
tintervalstart(PG_FUNCTION_ARGS)
{
- TimeInterval i = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i = PG_GETARG_TIMEINTERVAL(0);
if (i->status == T_INTERVAL_INVAL)
PG_RETURN_ABSOLUTETIME(INVALID_ABSTIME);
@@ -1542,7 +1530,7 @@ tintervalstart(PG_FUNCTION_ARGS)
Datum
tintervalend(PG_FUNCTION_ARGS)
{
- TimeInterval i = PG_GETARG_TIMEINTERVAL(0);
+ TimeInterval i = PG_GETARG_TIMEINTERVAL(0);
if (i->status == T_INTERVAL_INVAL)
PG_RETURN_ABSOLUTETIME(INVALID_ABSTIME);
@@ -1835,7 +1823,7 @@ istinterval(char *i_string,
}
/* get the first date */
*i_start = DatumGetAbsoluteTime(DirectFunctionCall1(nabstimein,
- CStringGetDatum(p)));
+ CStringGetDatum(p)));
/* rechange NULL at the end of the first date to a "'" */
*p1 = '"';
p = ++p1;
@@ -1863,7 +1851,7 @@ istinterval(char *i_string,
}
/* get the second date */
*i_end = DatumGetAbsoluteTime(DirectFunctionCall1(nabstimein,
- CStringGetDatum(p)));
+ CStringGetDatum(p)));
/* rechange NULL at the end of the first date to a ''' */
*p1 = '"';
p = ++p1;
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index 97e0825adf8..56bc534c230 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -3,7 +3,7 @@
* is for IP V4 CIDR notation, but prepared for V6: just
* add the necessary bits where the comments indicate.
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/network.c,v 1.28 2000/12/22 18:00:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/network.c,v 1.29 2001/03/22 03:59:52 momjian Exp $
*
* Jon Postel RIP 16 Oct 1998
*/
@@ -21,7 +21,7 @@
static int32 network_cmp_internal(inet *a1, inet *a2);
-static int v4bitncmp(unsigned long a1, unsigned long a2, int bits);
+static int v4bitncmp(unsigned long a1, unsigned long a2, int bits);
static bool v4addressOK(unsigned long a1, int bits);
/*
@@ -66,12 +66,12 @@ network_in(char *src, int type)
}
/*
- * Error check: CIDR values must not have any bits set beyond the masklen.
- * XXX this code is not IPV6 ready.
+ * Error check: CIDR values must not have any bits set beyond the
+ * masklen. XXX this code is not IPV6 ready.
*/
if (type)
{
- if (! v4addressOK(ip_v4addr(dst), bits))
+ if (!v4addressOK(ip_v4addr(dst), bits))
elog(ERROR, "invalid CIDR value '%s': has bits set to right of mask", src);
}
@@ -116,7 +116,9 @@ inet_out(PG_FUNCTION_ARGS)
if (ip_family(src) == AF_INET)
{
/* It's an IP V4 address: */
- /* Use inet style for both inet and cidr, since we don't want
+
+ /*
+ * Use inet style for both inet and cidr, since we don't want
* abbreviated CIDR style here.
*/
dst = inet_net_ntop(AF_INET, &ip_v4addr(src), ip_bits(src),
@@ -162,7 +164,7 @@ network_cmp_internal(inet *a1, inet *a2)
{
if (ip_family(a1) == AF_INET && ip_family(a2) == AF_INET)
{
- int order;
+ int order;
order = v4bitncmp(ip_v4addr(a1), ip_v4addr(a2),
Min(ip_bits(a1), ip_bits(a2)));
@@ -260,7 +262,7 @@ network_sub(PG_FUNCTION_ARGS)
if ((ip_family(a1) == AF_INET) && (ip_family(a2) == AF_INET))
{
PG_RETURN_BOOL(ip_bits(a1) > ip_bits(a2)
- && v4bitncmp(ip_v4addr(a1), ip_v4addr(a2), ip_bits(a2)) == 0);
+ && v4bitncmp(ip_v4addr(a1), ip_v4addr(a2), ip_bits(a2)) == 0);
}
else
{
@@ -280,7 +282,7 @@ network_subeq(PG_FUNCTION_ARGS)
if ((ip_family(a1) == AF_INET) && (ip_family(a2) == AF_INET))
{
PG_RETURN_BOOL(ip_bits(a1) >= ip_bits(a2)
- && v4bitncmp(ip_v4addr(a1), ip_v4addr(a2), ip_bits(a2)) == 0);
+ && v4bitncmp(ip_v4addr(a1), ip_v4addr(a2), ip_bits(a2)) == 0);
}
else
{
@@ -300,7 +302,7 @@ network_sup(PG_FUNCTION_ARGS)
if ((ip_family(a1) == AF_INET) && (ip_family(a2) == AF_INET))
{
PG_RETURN_BOOL(ip_bits(a1) < ip_bits(a2)
- && v4bitncmp(ip_v4addr(a1), ip_v4addr(a2), ip_bits(a1)) == 0);
+ && v4bitncmp(ip_v4addr(a1), ip_v4addr(a2), ip_bits(a1)) == 0);
}
else
{
@@ -320,7 +322,7 @@ network_supeq(PG_FUNCTION_ARGS)
if ((ip_family(a1) == AF_INET) && (ip_family(a2) == AF_INET))
{
PG_RETURN_BOOL(ip_bits(a1) <= ip_bits(a2)
- && v4bitncmp(ip_v4addr(a1), ip_v4addr(a2), ip_bits(a1)) == 0);
+ && v4bitncmp(ip_v4addr(a1), ip_v4addr(a2), ip_bits(a1)) == 0);
}
else
{
@@ -456,8 +458,9 @@ network_broadcast(PG_FUNCTION_ARGS)
/* It's an IP V4 address: */
unsigned long mask = 0xffffffff;
- /* Shifting by 32 or more bits does not yield portable results,
- * so don't try it.
+ /*
+ * Shifting by 32 or more bits does not yield portable results, so
+ * don't try it.
*/
if (ip_bits(ip) < 32)
mask >>= ip_bits(ip);
@@ -495,8 +498,9 @@ network_network(PG_FUNCTION_ARGS)
/* It's an IP V4 address: */
unsigned long mask = 0xffffffff;
- /* Shifting by 32 or more bits does not yield portable results,
- * so don't try it.
+ /*
+ * Shifting by 32 or more bits does not yield portable results, so
+ * don't try it.
*/
if (ip_bits(ip) > 0)
mask <<= (32 - ip_bits(ip));
@@ -534,8 +538,9 @@ network_netmask(PG_FUNCTION_ARGS)
/* It's an IP V4 address: */
unsigned long mask = 0xffffffff;
- /* Shifting by 32 or more bits does not yield portable results,
- * so don't try it.
+ /*
+ * Shifting by 32 or more bits does not yield portable results, so
+ * don't try it.
*/
if (ip_bits(ip) > 0)
mask <<= (32 - ip_bits(ip));
@@ -568,8 +573,9 @@ v4bitncmp(unsigned long a1, unsigned long a2, int bits)
{
unsigned long mask;
- /* Shifting by 32 or more bits does not yield portable results,
- * so don't try it.
+ /*
+ * Shifting by 32 or more bits does not yield portable results, so
+ * don't try it.
*/
if (bits > 0)
mask = (0xFFFFFFFFL << (32 - bits)) & 0xFFFFFFFFL;
@@ -592,8 +598,9 @@ v4addressOK(unsigned long a1, int bits)
{
unsigned long mask;
- /* Shifting by 32 or more bits does not yield portable results,
- * so don't try it.
+ /*
+ * Shifting by 32 or more bits does not yield portable results, so
+ * don't try it.
*/
if (bits > 0)
mask = (0xFFFFFFFFL << (32 - bits)) & 0xFFFFFFFFL;
diff --git a/src/backend/utils/adt/not_in.c b/src/backend/utils/adt/not_in.c
index 450cfb2484a..ac596b1f123 100644
--- a/src/backend/utils/adt/not_in.c
+++ b/src/backend/utils/adt/not_in.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/Attic/not_in.c,v 1.25 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/Attic/not_in.c,v 1.26 2001/03/22 03:59:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@ int4notin(PG_FUNCTION_ARGS)
/* make a null-terminated copy of text */
strlength = VARSIZE(relation_and_attr) - VARHDRSZ;
if (strlength >= sizeof(my_copy))
- strlength = sizeof(my_copy)-1;
+ strlength = sizeof(my_copy) - 1;
memcpy(my_copy, VARDATA(relation_and_attr), strlength);
my_copy[strlength] = '\0';
@@ -110,8 +110,10 @@ Datum
oidnotin(PG_FUNCTION_ARGS)
{
Oid the_oid = PG_GETARG_OID(0);
+
#ifdef NOT_USED
text *relation_and_attr = PG_GETARG_TEXT_P(1);
+
#endif
if (the_oid == InvalidOid)
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index d18d3003a08..4a16741bb91 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -5,7 +5,7 @@
*
* 1998 Jan Wieck
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/numeric.c,v 1.37 2001/03/14 16:50:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/numeric.c,v 1.38 2001/03/22 03:59:52 momjian Exp $
*
* ----------
*/
@@ -192,8 +192,10 @@ Datum
numeric_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
+
#endif
int32 typmod = PG_GETARG_INT32(2);
NumericVar value;
@@ -1865,8 +1867,8 @@ do_numeric_accum(ArrayType *transarray, Numeric newval)
NumericGetDatum(newval));
sumX2 = DirectFunctionCall2(numeric_add, sumX2,
DirectFunctionCall2(numeric_mul,
- NumericGetDatum(newval),
- NumericGetDatum(newval)));
+ NumericGetDatum(newval),
+ NumericGetDatum(newval)));
transdatums[0] = N;
transdatums[1] = sumX;
@@ -2011,8 +2013,8 @@ numeric_variance(PG_FUNCTION_ARGS)
mul_var(&vsumX, &vsumX, &vsumX); /* now vsumX contains sumX * sumX */
mul_var(&vN, &vsumX2, &vsumX2); /* now vsumX2 contains N * sumX2 */
sub_var(&vsumX2, &vsumX, &vsumX2); /* N * sumX2 - sumX * sumX */
- mul_var(&vN, &vNminus1, &vNminus1); /* N * (N - 1) */
- div_var(&vsumX2, &vNminus1, &vsumX); /* variance */
+ mul_var(&vN, &vNminus1, &vNminus1); /* N * (N - 1) */
+ div_var(&vsumX2, &vNminus1, &vsumX); /* variance */
res = make_result(&vsumX);
@@ -2078,9 +2080,9 @@ numeric_stddev(PG_FUNCTION_ARGS)
mul_var(&vsumX, &vsumX, &vsumX); /* now vsumX contains sumX * sumX */
mul_var(&vN, &vsumX2, &vsumX2); /* now vsumX2 contains N * sumX2 */
sub_var(&vsumX2, &vsumX, &vsumX2); /* N * sumX2 - sumX * sumX */
- mul_var(&vN, &vNminus1, &vNminus1); /* N * (N - 1) */
- div_var(&vsumX2, &vNminus1, &vsumX); /* variance */
- sqrt_var(&vsumX, &vsumX); /* stddev */
+ mul_var(&vN, &vNminus1, &vNminus1); /* N * (N - 1) */
+ div_var(&vsumX2, &vNminus1, &vsumX); /* variance */
+ sqrt_var(&vsumX, &vsumX); /* stddev */
res = make_result(&vsumX);
@@ -2096,9 +2098,9 @@ numeric_stddev(PG_FUNCTION_ARGS)
/*
* SUM transition functions for integer datatypes.
*
- * We use a Numeric accumulator to avoid overflow. Because SQL92 defines
+ * We use a Numeric accumulator to avoid overflow. Because SQL92 defines
* the SUM() of no values to be NULL, not zero, the initial condition of
- * the transition data value needs to be NULL. This means we can't rely
+ * the transition data value needs to be NULL. This means we can't rely
* on ExecAgg to automatically insert the first non-null data value into
* the transition data: it doesn't know how to do the type conversion.
* The upshot is that these routines have to be marked non-strict and
@@ -3563,7 +3565,7 @@ exp_var(NumericVar *arg, NumericVar *result)
set_var_from_var(&const_one, &ifac);
set_var_from_var(&const_one, &ni);
- for (i = 2; ; i++)
+ for (i = 2;; i++)
{
add_var(&ni, &const_one, &ni);
mul_var(&xpow, &x, &xpow);
@@ -3647,7 +3649,7 @@ ln_var(NumericVar *arg, NumericVar *result)
set_var_from_var(&const_one, &ni);
- for (i = 2; ; i++)
+ for (i = 2;; i++)
{
add_var(&ni, &const_two, &ni);
mul_var(&xx, &x, &xx);
@@ -3820,6 +3822,7 @@ add_abs(NumericVar *var1, NumericVar *var2, NumericVar *result)
i1,
i2;
int carry = 0;
+
/* copy these values into local vars for speed in inner loop */
int var1ndigits = var1->ndigits;
int var2ndigits = var2->ndigits;
@@ -3906,6 +3909,7 @@ sub_abs(NumericVar *var1, NumericVar *var2, NumericVar *result)
i1,
i2;
int borrow = 0;
+
/* copy these values into local vars for speed in inner loop */
int var1ndigits = var1->ndigits;
int var2ndigits = var2->ndigits;
diff --git a/src/backend/utils/adt/numutils.c b/src/backend/utils/adt/numutils.c
index 11e779d45d6..92af2ab56cf 100644
--- a/src/backend/utils/adt/numutils.c
+++ b/src/backend/utils/adt/numutils.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/numutils.c,v 1.44 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/numutils.c,v 1.45 2001/03/22 03:59:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,11 +63,11 @@ pg_atoi(char *s, int size, int c)
l = (long) 0;
else
l = strtol(s, &badp, 10);
+
/*
- * strtol() normally only sets ERANGE. On some systems it also
- * may set EINVAL, which simply means it couldn't parse the
- * input string. This is handled by the second "if" consistent
- * across platforms.
+ * strtol() normally only sets ERANGE. On some systems it also may
+ * set EINVAL, which simply means it couldn't parse the input string.
+ * This is handled by the second "if" consistent across platforms.
*/
if (errno && errno != EINVAL)
elog(ERROR, "pg_atoi: error reading \"%s\": %m", s);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index 0cb4ea1a0a2..91dbf6c54eb 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/oid.c,v 1.44 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/oid.c,v 1.45 2001/03/22 03:59:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,11 +36,11 @@ oidin_subr(const char *funcname, const char *s, char **endloc)
cvt = strtoul(s, &endptr, 10);
/*
- * strtoul() normally only sets ERANGE. On some systems it also
- * may set EINVAL, which simply means it couldn't parse the
- * input string. This is handled by the second "if" consistent
- * across platforms. Note that for historical reasons we accept
- * an empty string as meaning 0.
+ * strtoul() normally only sets ERANGE. On some systems it also may
+ * set EINVAL, which simply means it couldn't parse the input string.
+ * This is handled by the second "if" consistent across platforms.
+ * Note that for historical reasons we accept an empty string as
+ * meaning 0.
*/
if (errno && errno != EINVAL)
elog(ERROR, "%s: error reading \"%s\": %m",
@@ -67,21 +67,20 @@ oidin_subr(const char *funcname, const char *s, char **endloc)
result = (Oid) cvt;
/*
- * Cope with possibility that unsigned long is wider than Oid,
- * in which case strtoul will not raise an error for some values
- * that are out of the range of Oid.
+ * Cope with possibility that unsigned long is wider than Oid, in
+ * which case strtoul will not raise an error for some values that are
+ * out of the range of Oid.
*
- * For backwards compatibility, we want to accept inputs that
- * are given with a minus sign, so allow the input value if it
- * matches after either signed or unsigned extension to long.
+ * For backwards compatibility, we want to accept inputs that are given
+ * with a minus sign, so allow the input value if it matches after
+ * either signed or unsigned extension to long.
*
- * To ensure consistent results on 32-bit and 64-bit platforms,
- * make sure the error message is the same as if strtoul() had
- * returned ERANGE.
+ * To ensure consistent results on 32-bit and 64-bit platforms, make sure
+ * the error message is the same as if strtoul() had returned ERANGE.
*/
#if OID_MAX != ULONG_MAX
if (cvt != (unsigned long) result &&
- cvt != (unsigned long) ((int) result))
+ cvt != (unsigned long) ((int) result))
elog(ERROR, "%s: error reading \"%s\": %s",
funcname, s, strerror(ERANGE));
#endif
@@ -235,8 +234,8 @@ oidgt(PG_FUNCTION_ARGS)
Datum
oidvectoreq(PG_FUNCTION_ARGS)
{
- Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
- Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
+ Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
+ Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
PG_RETURN_BOOL(memcmp(arg1, arg2, INDEX_MAX_KEYS * sizeof(Oid)) == 0);
}
@@ -244,8 +243,8 @@ oidvectoreq(PG_FUNCTION_ARGS)
Datum
oidvectorne(PG_FUNCTION_ARGS)
{
- Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
- Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
+ Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
+ Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
PG_RETURN_BOOL(memcmp(arg1, arg2, INDEX_MAX_KEYS * sizeof(Oid)) != 0);
}
@@ -253,8 +252,8 @@ oidvectorne(PG_FUNCTION_ARGS)
Datum
oidvectorlt(PG_FUNCTION_ARGS)
{
- Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
- Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
+ Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
+ Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
int i;
for (i = 0; i < INDEX_MAX_KEYS; i++)
@@ -266,8 +265,8 @@ oidvectorlt(PG_FUNCTION_ARGS)
Datum
oidvectorle(PG_FUNCTION_ARGS)
{
- Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
- Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
+ Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
+ Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
int i;
for (i = 0; i < INDEX_MAX_KEYS; i++)
@@ -279,8 +278,8 @@ oidvectorle(PG_FUNCTION_ARGS)
Datum
oidvectorge(PG_FUNCTION_ARGS)
{
- Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
- Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
+ Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
+ Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
int i;
for (i = 0; i < INDEX_MAX_KEYS; i++)
@@ -292,8 +291,8 @@ oidvectorge(PG_FUNCTION_ARGS)
Datum
oidvectorgt(PG_FUNCTION_ARGS)
{
- Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
- Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
+ Oid *arg1 = (Oid *) PG_GETARG_POINTER(0);
+ Oid *arg2 = (Oid *) PG_GETARG_POINTER(1);
int i;
for (i = 0; i < INDEX_MAX_KEYS; i++)
diff --git a/src/backend/utils/adt/oracle_compat.c b/src/backend/utils/adt/oracle_compat.c
index 5a83c56687f..b91230b1f0a 100644
--- a/src/backend/utils/adt/oracle_compat.c
+++ b/src/backend/utils/adt/oracle_compat.c
@@ -1,7 +1,7 @@
/*
* Edmund Mergl <E.Mergl@bawue.de>
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/oracle_compat.c,v 1.30 2000/12/07 23:22:56 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/oracle_compat.c,v 1.31 2001/03/22 03:59:52 momjian Exp $
*
*/
@@ -596,8 +596,8 @@ ascii(PG_FUNCTION_ARGS)
Datum
chr(PG_FUNCTION_ARGS)
{
- int32 cvalue = PG_GETARG_INT32(0);
- text *result;
+ int32 cvalue = PG_GETARG_INT32(0);
+ text *result;
result = (text *) palloc(VARHDRSZ + 1);
VARATT_SIZEP(result) = VARHDRSZ + 1;
@@ -616,20 +616,20 @@ chr(PG_FUNCTION_ARGS)
*
* Purpose:
*
- * Repeat string by val.
+ * Repeat string by val.
*
********************************************************************/
Datum
repeat(PG_FUNCTION_ARGS)
{
- text *string = PG_GETARG_TEXT_P(0);
- int32 count = PG_GETARG_INT32(1);
- text *result;
- int slen,
- tlen;
- int i;
- char *cp;
+ text *string = PG_GETARG_TEXT_P(0);
+ int32 count = PG_GETARG_INT32(1);
+ text *result;
+ int slen,
+ tlen;
+ int i;
+ char *cp;
if (count < 0)
count = 0;
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 6d30a53edcb..faa81cd09af 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -4,7 +4,7 @@
* The PostgreSQL locale utils.
*
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_locale.c,v 1.8 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_locale.c,v 1.9 2001/03/22 03:59:52 momjian Exp $
*
* Portions Copyright (c) 1999-2000, PostgreSQL Global Development Group
*
@@ -68,7 +68,7 @@ PGLC_debug_lc(PG_LocaleCategories * lc)
lc->lc_collate,
lc->lc_monetary
#ifdef LC_MESSAGES
- , lc->lc_messages
+ ,lc->lc_messages
#endif
);
}
@@ -116,7 +116,7 @@ PGLC_setlocale(PG_LocaleCategories * lc)
/*------
* Return the POSIX lconv struct (contains number/money formatting information)
- * with locale information for all categories. Note that returned lconv
+ * with locale information for all categories. Note that returned lconv
* does not depend on currently active category settings, but on external
* environment variables for locale.
*
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index fc93031b521..f548775ad2f 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -1,7 +1,7 @@
/* ----------
* pg_lzcompress.c -
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.9 2000/10/03 03:11:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.10 2001/03/22 03:59:52 momjian Exp $
*
* This is an implementation of LZ compression for PostgreSQL.
* It uses a simple history table and generates 2-3 byte tags
@@ -120,7 +120,7 @@
* 8 times the size of the input!).
*
* The compressor creates a table for 8192 lists of positions.
- * For each input position (except the last 3), a hash key is
+ * For each input position (except the last 3), a hash key is
* built from the 4 next input bytes and the posiiton remembered
* in the appropriate list. Thus, the table points to linked
* lists of likely to be at least in the first 4 characters
@@ -152,7 +152,7 @@
*
* - a match >= good_match is found
* - there are no more history entries to look at
- * - the next history entry is already too far back
+ * - the next history entry is already too far back
* to be coded into a tag.
*
* Finally the match algorithm checks that at least a match
@@ -286,18 +286,18 @@ static PGLZ_HistEntry hist_entries[PGLZ_HISTORY_SIZE];
#define pglz_hist_add(_hs,_he,_hn,_s,_e) { \
int __hindex = pglz_hist_idx((_s),(_e)); \
if ((_he)[(_hn)].prev == NULL) { \
- (_hs)[__hindex] = (_he)[(_hn)].next; \
+ (_hs)[__hindex] = (_he)[(_hn)].next; \
} else { \
- (_he)[(_hn)].prev->next = (_he)[(_hn)].next; \
+ (_he)[(_hn)].prev->next = (_he)[(_hn)].next; \
} \
if ((_he)[(_hn)].next != NULL) { \
- (_he)[(_hn)].next->prev = (_he)[(_hn)].prev; \
+ (_he)[(_hn)].next->prev = (_he)[(_hn)].prev; \
} \
(_he)[(_hn)].next = (_hs)[__hindex]; \
(_he)[(_hn)].prev = NULL; \
(_he)[(_hn)].pos = (_s); \
if ((_hs)[__hindex] != NULL) { \
- (_hs)[__hindex]->prev = &((_he)[(_hn)]); \
+ (_hs)[__hindex]->prev = &((_he)[(_hn)]); \
} \
(_hs)[__hindex] = &((_he)[(_hn)]); \
if (++(_hn) >= PGLZ_HISTORY_SIZE) { \
@@ -476,7 +476,7 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
int
pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strategy)
{
- int hist_next = 0;
+ int hist_next = 0;
unsigned char *bp = ((unsigned char *) dest) + sizeof(PGLZ_Header);
unsigned char *bstart = bp;
diff --git a/src/backend/utils/adt/quote.c b/src/backend/utils/adt/quote.c
index 02aaac74134..10999150a21 100644
--- a/src/backend/utils/adt/quote.c
+++ b/src/backend/utils/adt/quote.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/quote.c,v 1.3 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/quote.c,v 1.4 2001/03/22 03:59:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,12 +35,10 @@ quote_ident(PG_FUNCTION_ARGS)
text *result;
if (quote_ident_required(t))
- {
- result = do_quote_ident(t);
- }
+ result = do_quote_ident(t);
else
{
- result = (text *)palloc(VARSIZE(t));
+ result = (text *) palloc(VARSIZE(t));
memcpy(result, t, VARSIZE(t));
}
@@ -79,23 +77,26 @@ quote_literal(PG_FUNCTION_ARGS)
static bool
quote_ident_required(text *iptr)
{
- char *cp;
- char *ep;
+ char *cp;
+ char *ep;
cp = VARDATA(iptr);
- ep = VARDATA(iptr) + VARSIZE(iptr) - VARHDRSZ;
+ ep = VARDATA(iptr) + VARSIZE(iptr) - VARHDRSZ;
if (cp >= ep)
return true;
- if (!(*cp == '_' || (*cp >= 'a' && *cp <= 'z')))
+ if (!(*cp == '_' || (*cp >= 'a' && *cp <= 'z')))
return true;
- while((++cp) < ep)
+ while ((++cp) < ep)
{
- if (*cp >= 'a' && *cp <= 'z') continue;
- if (*cp >= '0' && *cp <= '9') continue;
- if (*cp == '_') continue;
+ if (*cp >= 'a' && *cp <= 'z')
+ continue;
+ if (*cp >= '0' && *cp <= '9')
+ continue;
+ if (*cp == '_')
+ continue;
return true;
}
@@ -107,29 +108,29 @@ quote_ident_required(text *iptr)
static text *
do_quote_ident(text *iptr)
{
- text *result;
- char *cp1;
- char *cp2;
- int len;
+ text *result;
+ char *cp1;
+ char *cp2;
+ int len;
- len = VARSIZE(iptr) - VARHDRSZ;
- result = (text *)palloc(len * 2 + VARHDRSZ + 2);
+ len = VARSIZE(iptr) - VARHDRSZ;
+ result = (text *) palloc(len * 2 + VARHDRSZ + 2);
cp1 = VARDATA(iptr);
cp2 = VARDATA(result);
*cp2++ = '"';
- while(len-- > 0)
+ while (len-- > 0)
{
- if (*cp1 == '"')
+ if (*cp1 == '"')
*cp2++ = '"';
- if (*cp1 == '\\')
+ if (*cp1 == '\\')
*cp2++ = '\\';
- *cp2++ = *cp1++;
+ *cp2++ = *cp1++;
}
*cp2++ = '"';
- VARATT_SIZEP(result) = cp2 - ((char *)result);
+ VARATT_SIZEP(result) = cp2 - ((char *) result);
return result;
}
@@ -138,29 +139,29 @@ do_quote_ident(text *iptr)
static text *
do_quote_literal(text *lptr)
{
- text *result;
- char *cp1;
- char *cp2;
- int len;
+ text *result;
+ char *cp1;
+ char *cp2;
+ int len;
- len = VARSIZE(lptr) - VARHDRSZ;
- result = (text *)palloc(len * 2 + VARHDRSZ + 2);
+ len = VARSIZE(lptr) - VARHDRSZ;
+ result = (text *) palloc(len * 2 + VARHDRSZ + 2);
cp1 = VARDATA(lptr);
cp2 = VARDATA(result);
*cp2++ = '\'';
- while(len-- > 0)
+ while (len-- > 0)
{
- if (*cp1 == '\'')
+ if (*cp1 == '\'')
*cp2++ = '\'';
- if (*cp1 == '\\')
+ if (*cp1 == '\\')
*cp2++ = '\\';
- *cp2++ = *cp1++;
+ *cp2++ = *cp1++;
}
*cp2++ = '\'';
- VARATT_SIZEP(result) = cp2 - ((char *)result);
+ VARATT_SIZEP(result) = cp2 - ((char *) result);
return result;
}
@@ -171,28 +172,31 @@ do_quote_literal(text *lptr)
static bool
quote_ident_required(text *iptr)
{
- char *cp;
- char *ep;
+ char *cp;
+ char *ep;
cp = VARDATA(iptr);
- ep = VARDATA(iptr) + VARSIZE(iptr) - VARHDRSZ;
+ ep = VARDATA(iptr) + VARSIZE(iptr) - VARHDRSZ;
if (cp >= ep)
return true;
- if(pg_mblen(cp) != 1)
+ if (pg_mblen(cp) != 1)
return true;
- if (!(*cp == '_' || (*cp >= 'a' && *cp <= 'z')))
+ if (!(*cp == '_' || (*cp >= 'a' && *cp <= 'z')))
return true;
- while((++cp) < ep)
+ while ((++cp) < ep)
{
if (pg_mblen(cp) != 1)
return true;
- if (*cp >= 'a' && *cp <= 'z') continue;
- if (*cp >= '0' && *cp <= '9') continue;
- if (*cp == '_') continue;
+ if (*cp >= 'a' && *cp <= 'z')
+ continue;
+ if (*cp >= '0' && *cp <= '9')
+ continue;
+ if (*cp == '_')
+ continue;
return true;
}
@@ -204,41 +208,41 @@ quote_ident_required(text *iptr)
static text *
do_quote_ident(text *iptr)
{
- text *result;
- char *cp1;
- char *cp2;
- int len;
- int wl;
+ text *result;
+ char *cp1;
+ char *cp2;
+ int len;
+ int wl;
- len = VARSIZE(iptr) - VARHDRSZ;
- result = (text *)palloc(len * 2 + VARHDRSZ + 2);
+ len = VARSIZE(iptr) - VARHDRSZ;
+ result = (text *) palloc(len * 2 + VARHDRSZ + 2);
cp1 = VARDATA(iptr);
cp2 = VARDATA(result);
*cp2++ = '"';
- while(len > 0)
+ while (len > 0)
{
if ((wl = pg_mblen(cp1)) != 1)
{
len -= wl;
- while(wl-- > 0)
+ while (wl-- > 0)
*cp2++ = *cp1++;
continue;
}
- if (*cp1 == '"')
+ if (*cp1 == '"')
*cp2++ = '"';
- if (*cp1 == '\\')
+ if (*cp1 == '\\')
*cp2++ = '\\';
- *cp2++ = *cp1++;
+ *cp2++ = *cp1++;
len--;
}
*cp2++ = '"';
- VARATT_SIZEP(result) = cp2 - ((char *)result);
+ VARATT_SIZEP(result) = cp2 - ((char *) result);
return result;
}
@@ -247,45 +251,43 @@ do_quote_ident(text *iptr)
static text *
do_quote_literal(text *lptr)
{
- text *result;
- char *cp1;
- char *cp2;
- int len;
- int wl;
+ text *result;
+ char *cp1;
+ char *cp2;
+ int len;
+ int wl;
- len = VARSIZE(lptr) - VARHDRSZ;
- result = (text *)palloc(len * 2 + VARHDRSZ + 2);
+ len = VARSIZE(lptr) - VARHDRSZ;
+ result = (text *) palloc(len * 2 + VARHDRSZ + 2);
cp1 = VARDATA(lptr);
cp2 = VARDATA(result);
*cp2++ = '\'';
- while(len > 0)
+ while (len > 0)
{
if ((wl = pg_mblen(cp1)) != 1)
{
len -= wl;
- while(wl-- > 0)
+ while (wl-- > 0)
*cp2++ = *cp1++;
continue;
}
- if (*cp1 == '\'')
+ if (*cp1 == '\'')
*cp2++ = '\'';
- if (*cp1 == '\\')
+ if (*cp1 == '\\')
*cp2++ = '\\';
- *cp2++ = *cp1++;
+ *cp2++ = *cp1++;
len--;
}
*cp2++ = '\'';
- VARATT_SIZEP(result) = cp2 - ((char *)result);
+ VARATT_SIZEP(result) = cp2 - ((char *) result);
return result;
}
#endif
-
-
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index b55cb3b42e7..146ae742636 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/regexp.c,v 1.35 2001/03/19 22:27:46 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/regexp.c,v 1.36 2001/03/22 03:59:53 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
@@ -121,6 +121,7 @@ RE_compile_and_execute(text *text_re, char *text, int cflags)
regcomp_result = pg95_regcomp(&rev[oldest].cre_re, re, cflags);
if (regcomp_result == 0)
{
+
/*
* use malloc/free for the cre_s field because the storage has to
* persist across transactions
@@ -197,10 +198,10 @@ nameregexne(PG_FUNCTION_ARGS)
Name n = PG_GETARG_NAME(0);
text *p = PG_GETARG_TEXT_P(1);
- PG_RETURN_BOOL(! fixedlen_regexeq(NameStr(*n),
- p,
- strlen(NameStr(*n)),
- REG_EXTENDED));
+ PG_RETURN_BOOL(!fixedlen_regexeq(NameStr(*n),
+ p,
+ strlen(NameStr(*n)),
+ REG_EXTENDED));
}
Datum
@@ -221,15 +222,15 @@ textregexne(PG_FUNCTION_ARGS)
text *s = PG_GETARG_TEXT_P(0);
text *p = PG_GETARG_TEXT_P(1);
- PG_RETURN_BOOL(! fixedlen_regexeq(VARDATA(s),
- p,
- VARSIZE(s) - VARHDRSZ,
- REG_EXTENDED));
+ PG_RETURN_BOOL(!fixedlen_regexeq(VARDATA(s),
+ p,
+ VARSIZE(s) - VARHDRSZ,
+ REG_EXTENDED));
}
/*
- * routines that use the regexp stuff, but ignore the case.
+ * routines that use the regexp stuff, but ignore the case.
* for this, we use the REG_ICASE flag to pg95_regcomp
*/
@@ -252,10 +253,10 @@ texticregexne(PG_FUNCTION_ARGS)
text *s = PG_GETARG_TEXT_P(0);
text *p = PG_GETARG_TEXT_P(1);
- PG_RETURN_BOOL(! fixedlen_regexeq(VARDATA(s),
- p,
- VARSIZE(s) - VARHDRSZ,
- REG_ICASE | REG_EXTENDED));
+ PG_RETURN_BOOL(!fixedlen_regexeq(VARDATA(s),
+ p,
+ VARSIZE(s) - VARHDRSZ,
+ REG_ICASE | REG_EXTENDED));
}
Datum
@@ -276,8 +277,8 @@ nameicregexne(PG_FUNCTION_ARGS)
Name n = PG_GETARG_NAME(0);
text *p = PG_GETARG_TEXT_P(1);
- PG_RETURN_BOOL(! fixedlen_regexeq(NameStr(*n),
- p,
- strlen(NameStr(*n)),
- REG_ICASE | REG_EXTENDED));
+ PG_RETURN_BOOL(!fixedlen_regexeq(NameStr(*n),
+ p,
+ strlen(NameStr(*n)),
+ REG_ICASE | REG_EXTENDED));
}
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 6a772ee1b71..1f3abde5f11 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/regproc.c,v 1.60 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/regproc.c,v 1.61 2001/03/22 03:59:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,7 +58,7 @@ regprocin(PG_FUNCTION_ARGS)
result = (RegProcedure)
GetSysCacheOid(PROCOID,
DirectFunctionCall1(oidin,
- CStringGetDatum(pro_name_or_oid)),
+ CStringGetDatum(pro_name_or_oid)),
0, 0, 0);
if (!RegProcedureIsValid(result))
elog(ERROR, "No procedure with oid %s", pro_name_or_oid);
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 94d9b7313a1..db7f67ec601 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -9,7 +9,7 @@
* the backend does. This works because the hashtable structures
* themselves are allocated by dynahash.c in its permanent DynaHashCxt,
* and the parse/plan node trees they point to are copied into
- * TopMemoryContext using SPI_saveplan(). This is pretty ugly, since there
+ * TopMemoryContext using SPI_saveplan(). This is pretty ugly, since there
* is no way to free a no-longer-needed plan tree, but then again we don't
* yet have any bookkeeping that would allow us to detect that a plan isn't
* needed anymore. Improve it someday.
@@ -18,7 +18,7 @@
* Portions Copyright (c) 2000-2001, PostgreSQL Global Development Group
* Copyright 1999 Jan Wieck
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.21 2001/02/15 21:57:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.22 2001/03/22 03:59:53 momjian Exp $
*
* ----------
*/
@@ -535,7 +535,7 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
char del_nulls[RI_MAX_NUMKEYS + 1];
bool isnull;
int i;
- Oid save_uid;
+ Oid save_uid;
save_uid = GetUserId();
@@ -750,7 +750,7 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
char upd_nulls[RI_MAX_NUMKEYS + 1];
bool isnull;
int i;
- Oid save_uid;
+ Oid save_uid;
save_uid = GetUserId();
@@ -1611,7 +1611,7 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
char upd_nulls[RI_MAX_NUMKEYS + 1];
bool isnull;
int i;
- Oid save_uid;
+ Oid save_uid;
save_uid = GetUserId();
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 2dd460a442b..c6db1c5b30f 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -3,7 +3,7 @@
* back to source text
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/ruleutils.c,v 1.73 2001/02/21 18:53:47 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/ruleutils.c,v 1.74 2001/03/22 03:59:53 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -82,7 +82,7 @@ typedef struct
List *rtable; /* List of RangeTblEntry nodes */
List *namespace; /* List of joinlist items (RangeTblRef and
* JoinExpr nodes) */
-} deparse_namespace;
+} deparse_namespace;
/* ----------
@@ -118,15 +118,15 @@ static void get_delete_query_def(Query *query, deparse_context *context);
static void get_utility_query_def(Query *query, deparse_context *context);
static void get_basic_select_query(Query *query, deparse_context *context);
static void get_setop_query(Node *setOp, Query *query,
- deparse_context *context, bool toplevel);
+ deparse_context *context, bool toplevel);
static bool simple_distinct(List *distinctClause, List *targetList);
static void get_names_for_var(Var *var, deparse_context *context,
- char **refname, char **attname);
+ char **refname, char **attname);
static bool get_alias_for_case(CaseExpr *caseexpr, deparse_context *context,
- char **refname, char **attname);
+ char **refname, char **attname);
static bool find_alias_in_namespace(Node *nsnode, Node *expr,
- List *rangetable, int levelsup,
- char **refname, char **attname);
+ List *rangetable, int levelsup,
+ char **refname, char **attname);
static bool phony_equal(Node *expr1, Node *expr2, int levelsup);
static void get_rule_expr(Node *node, deparse_context *context);
static void get_func_expr(Expr *expr, deparse_context *context);
@@ -135,7 +135,7 @@ static void get_const_expr(Const *constval, deparse_context *context);
static void get_sublink_expr(Node *node, deparse_context *context);
static void get_from_clause(Query *query, deparse_context *context);
static void get_from_clause_item(Node *jtnode, Query *query,
- deparse_context *context);
+ deparse_context *context);
static bool tleIsArrayAssign(TargetEntry *tle);
static char *quote_identifier(char *ident);
static char *get_relation_name(Oid relid);
@@ -478,7 +478,7 @@ pg_get_indexdef(PG_FUNCTION_ARGS)
* ----------
*/
appendStringInfo(&keybuf, "%s",
- quote_identifier(get_relid_attribute_name(idxrec->indrelid,
+ quote_identifier(get_relid_attribute_name(idxrec->indrelid,
idxrec->indkey[keyno])));
/* ----------
@@ -767,8 +767,8 @@ make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc)
quote_identifier(get_relation_name(ev_class)));
if (ev_attr > 0)
appendStringInfo(buf, ".%s",
- quote_identifier(get_relid_attribute_name(ev_class,
- ev_attr)));
+ quote_identifier(get_relid_attribute_name(ev_class,
+ ev_attr)));
/* If the rule has an event qualification, add it */
if (ev_qual == NULL)
@@ -1043,9 +1043,7 @@ get_basic_select_query(Query *query, deparse_context *context)
if (query->distinctClause != NIL)
{
if (simple_distinct(query->distinctClause, query->targetList))
- {
appendStringInfo(buf, " DISTINCT");
- }
else
{
appendStringInfo(buf, " DISTINCT ON (");
@@ -1146,7 +1144,7 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
{
RangeTblRef *rtr = (RangeTblRef *) setOp;
RangeTblEntry *rte = rt_fetch(rtr->rtindex, query->rtable);
- Query *subquery = rte->subquery;
+ Query *subquery = rte->subquery;
Assert(subquery != NULL);
get_query_def(subquery, buf, context->namespaces);
@@ -1155,10 +1153,11 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
{
SetOperationStmt *op = (SetOperationStmt *) setOp;
- /* Must suppress parens at top level of a setop tree because
- * of grammar limitations...
+ /*
+ * Must suppress parens at top level of a setop tree because of
+ * grammar limitations...
*/
- if (! toplevel)
+ if (!toplevel)
appendStringInfo(buf, "(");
get_setop_query(op->larg, query, context, false);
switch (op->op)
@@ -1179,7 +1178,7 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
if (op->all)
appendStringInfo(buf, "ALL ");
get_setop_query(op->rarg, query, context, false);
- if (! toplevel)
+ if (!toplevel)
appendStringInfo(buf, ")");
}
else
@@ -1201,7 +1200,7 @@ simple_distinct(List *distinctClause, List *targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(targetList);
- if (! tle->resdom->resjunk)
+ if (!tle->resdom->resjunk)
{
if (distinctClause == NIL)
return false;
@@ -1288,9 +1287,7 @@ get_insert_query_def(Query *query, deparse_context *context)
appendStringInfoChar(buf, ')');
}
else
- {
get_query_def(select_rte->subquery, buf, NIL);
- }
}
@@ -1326,12 +1323,13 @@ get_update_query_def(Query *query, deparse_context *context)
appendStringInfo(buf, sep);
sep = ", ";
+
/*
- * If the update expression is an array assignment, we mustn't
- * put out "attname =" here; it will come out of the display
- * of the ArrayRef node instead.
+ * If the update expression is an array assignment, we mustn't put
+ * out "attname =" here; it will come out of the display of the
+ * ArrayRef node instead.
*/
- if (! tleIsArrayAssign(tle))
+ if (!tleIsArrayAssign(tle))
appendStringInfo(buf, "%s = ",
quote_identifier(tle->resdom->resname));
get_tle_expr(tle, context);
@@ -1389,6 +1387,7 @@ get_utility_query_def(Query *query, deparse_context *context)
if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt))
{
NotifyStmt *stmt = (NotifyStmt *) query->utilityStmt;
+
appendStringInfo(buf, "NOTIFY %s", quote_identifier(stmt->relname));
}
else
@@ -1428,8 +1427,8 @@ get_names_for_var(Var *var, deparse_context *context,
/*
* Otherwise, fall back on the rangetable entry. This should happen
- * only for uses of special RTEs like *NEW* and *OLD*, which won't
- * get placed in our namespace.
+ * only for uses of special RTEs like *NEW* and *OLD*, which won't get
+ * placed in our namespace.
*/
rte = rt_fetch(var->varno, dpns->rtable);
*refname = rte->eref->relname;
@@ -1448,9 +1447,9 @@ get_alias_for_case(CaseExpr *caseexpr, deparse_context *context,
int sup;
/*
- * This could be done more efficiently if we first groveled through the
- * CASE to find varlevelsup values, but it's probably not worth the
- * trouble. All this code will go away someday anyway ...
+ * This could be done more efficiently if we first groveled through
+ * the CASE to find varlevelsup values, but it's probably not worth
+ * the trouble. All this code will go away someday anyway ...
*/
sup = 0;
@@ -1525,6 +1524,7 @@ find_alias_in_namespace(Node *nsnode, Node *expr,
}
nlist = lnext(nlist);
}
+
/*
* Tables within an aliased join are invisible from outside
* the join, according to the scope rules of SQL92 (the join
@@ -1579,8 +1579,8 @@ phony_equal(Node *expr1, Node *expr2, int levelsup)
return false;
if (IsA(expr1, Var))
{
- Var *a = (Var *) expr1;
- Var *b = (Var *) expr2;
+ Var *a = (Var *) expr1;
+ Var *b = (Var *) expr2;
if (a->varno != b->varno)
return false;
@@ -1600,8 +1600,8 @@ phony_equal(Node *expr1, Node *expr2, int levelsup)
}
if (IsA(expr1, CaseExpr))
{
- CaseExpr *a = (CaseExpr *) expr1;
- CaseExpr *b = (CaseExpr *) expr2;
+ CaseExpr *a = (CaseExpr *) expr1;
+ CaseExpr *b = (CaseExpr *) expr2;
if (a->casetype != b->casetype)
return false;
@@ -1615,8 +1615,8 @@ phony_equal(Node *expr1, Node *expr2, int levelsup)
}
if (IsA(expr1, CaseWhen))
{
- CaseWhen *a = (CaseWhen *) expr1;
- CaseWhen *b = (CaseWhen *) expr2;
+ CaseWhen *a = (CaseWhen *) expr1;
+ CaseWhen *b = (CaseWhen *) expr2;
if (!phony_equal(a->expr, b->expr, levelsup))
return false;
@@ -1840,9 +1840,10 @@ get_rule_expr(Node *node, deparse_context *context)
/*
* If we are doing UPDATE array[n] = expr, we need to
- * suppress any prefix on the array name. Currently,
- * that is the only context in which we will see a non-null
- * refassgnexpr --- but someday a smarter test may be needed.
+ * suppress any prefix on the array name. Currently, that
+ * is the only context in which we will see a non-null
+ * refassgnexpr --- but someday a smarter test may be
+ * needed.
*/
if (aref->refassgnexpr)
context->varprefix = false;
@@ -1880,7 +1881,7 @@ get_rule_expr(Node *node, deparse_context *context)
/* we do NOT parenthesize the arg expression, for now */
get_rule_expr(fselect->arg, context);
typetup = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(exprType(fselect->arg)),
+ ObjectIdGetDatum(exprType(fselect->arg)),
0, 0, 0);
if (!HeapTupleIsValid(typetup))
elog(ERROR, "cache lookup of type %u failed",
@@ -2163,9 +2164,9 @@ get_const_expr(Const *constval, deparse_context *context)
}
extval = DatumGetCString(OidFunctionCall3(typeStruct->typoutput,
- constval->constvalue,
- ObjectIdGetDatum(typeStruct->typelem),
- Int32GetDatum(-1)));
+ constval->constvalue,
+ ObjectIdGetDatum(typeStruct->typelem),
+ Int32GetDatum(-1)));
switch (constval->consttype)
{
@@ -2317,16 +2318,16 @@ get_from_clause(Query *query, deparse_context *context)
/*
* We use the query's jointree as a guide to what to print. However,
- * we must ignore auto-added RTEs that are marked not inFromCl.
- * (These can only appear at the top level of the jointree, so it's
- * sufficient to check here.)
- * Also ignore the rule pseudo-RTEs for NEW and OLD.
+ * we must ignore auto-added RTEs that are marked not inFromCl. (These
+ * can only appear at the top level of the jointree, so it's
+ * sufficient to check here.) Also ignore the rule pseudo-RTEs for NEW
+ * and OLD.
*/
sep = " FROM ";
foreach(l, query->jointree->fromlist)
{
- Node *jtnode = (Node *) lfirst(l);
+ Node *jtnode = (Node *) lfirst(l);
if (IsA(jtnode, RangeTblRef))
{
@@ -2396,7 +2397,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
if (col != rte->alias->attrs)
appendStringInfo(buf, ", ");
appendStringInfo(buf, "%s",
- quote_identifier(strVal(lfirst(col))));
+ quote_identifier(strVal(lfirst(col))));
}
appendStringInfoChar(buf, ')');
}
@@ -2435,7 +2436,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
(int) j->jointype);
}
get_from_clause_item(j->rarg, query, context);
- if (! j->isNatural)
+ if (!j->isNatural)
{
if (j->using)
{
@@ -2447,7 +2448,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
if (col != j->using)
appendStringInfo(buf, ", ");
appendStringInfo(buf, "%s",
- quote_identifier(strVal(lfirst(col))));
+ quote_identifier(strVal(lfirst(col))));
}
appendStringInfoChar(buf, ')');
}
@@ -2475,7 +2476,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
if (col != j->alias->attrs)
appendStringInfo(buf, ", ");
appendStringInfo(buf, "%s",
- quote_identifier(strVal(lfirst(col))));
+ quote_identifier(strVal(lfirst(col))));
}
appendStringInfoChar(buf, ')');
}
@@ -2503,6 +2504,7 @@ tleIsArrayAssign(TargetEntry *tle)
aref = (ArrayRef *) tle->expr;
if (aref->refassgnexpr == NULL)
return false;
+
/*
* Currently, it should only be possible to see non-null refassgnexpr
* if we are indeed looking at an "UPDATE array[n] = expr" situation.
@@ -2563,8 +2565,8 @@ quote_identifier(char *ident)
* but the parser doesn't provide any easy way to test for whether
* an identifier is safe or not... so be safe not sorry.
*
- * Note: ScanKeywordLookup() does case-insensitive comparison,
- * but that's fine, since we already know we have all-lower-case.
+ * Note: ScanKeywordLookup() does case-insensitive comparison, but
+ * that's fine, since we already know we have all-lower-case.
*/
if (ScanKeywordLookup(ident) != NULL)
safe = false;
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 72be0f04722..b7af8b9ca5e 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v 1.85 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v 1.86 2001/03/22 03:59:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,18 +61,18 @@
#define NOT_MOST_COMMON_RATIO 0.1
static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
- Datum lobound, Datum hibound, Oid boundstypid,
- double *scaledlobound, double *scaledhibound);
+ Datum lobound, Datum hibound, Oid boundstypid,
+ double *scaledlobound, double *scaledhibound);
static double convert_numeric_to_scalar(Datum value, Oid typid);
static void convert_string_to_scalar(unsigned char *value,
- double *scaledvalue,
- unsigned char *lobound,
- double *scaledlobound,
- unsigned char *hibound,
- double *scaledhibound);
+ double *scaledvalue,
+ unsigned char *lobound,
+ double *scaledlobound,
+ unsigned char *hibound,
+ double *scaledhibound);
static double convert_one_string_to_scalar(unsigned char *value,
- int rangelo, int rangehi);
-static unsigned char * convert_string_datum(Datum value, Oid typid);
+ int rangelo, int rangehi);
+static unsigned char *convert_string_datum(Datum value, Oid typid);
static double convert_timevalue_to_scalar(Datum value, Oid typid);
static void getattproperties(Oid relid, AttrNumber attnum,
Oid *typid,
@@ -87,9 +87,9 @@ static bool getattstatistics(Oid relid, AttrNumber attnum,
Datum *loval,
Datum *hival);
static Selectivity prefix_selectivity(char *prefix,
- Oid relid,
- AttrNumber attno,
- Oid datatype);
+ Oid relid,
+ AttrNumber attno,
+ Oid datatype);
static Selectivity pattern_selectivity(char *patt, Pattern_Type ptype);
static bool string_lessthan(const char *str1, const char *str2,
Oid datatype);
@@ -102,7 +102,7 @@ static Datum string_to_datum(const char *str, Oid datatype);
*
* Note: this routine is also used to estimate selectivity for some
* operators that are not "=" but have comparable selectivity behavior,
- * such as "~=" (geometric approximate-match). Even for "=", we must
+ * such as "~=" (geometric approximate-match). Even for "=", we must
* keep in mind that the left and right datatypes may differ, so the type
* of the given constant "value" may be different from the type of the
* attribute.
@@ -165,7 +165,7 @@ eqsel(PG_FUNCTION_ARGS)
else
mostcommon = DatumGetBool(OidFunctionCall2(eqproc,
value,
- commonval));
+ commonval));
if (mostcommon)
{
@@ -264,15 +264,15 @@ neqsel(PG_FUNCTION_ARGS)
float8 result;
/*
- * We want 1 - eqsel() where the equality operator is the one associated
- * with this != operator, that is, its negator.
+ * We want 1 - eqsel() where the equality operator is the one
+ * associated with this != operator, that is, its negator.
*/
eqopid = get_negator(opid);
if (eqopid)
{
result = DatumGetFloat8(DirectFunctionCall5(eqsel,
- ObjectIdGetDatum(eqopid),
- ObjectIdGetDatum(relid),
+ ObjectIdGetDatum(eqopid),
+ ObjectIdGetDatum(relid),
Int16GetDatum(attno),
value,
Int32GetDatum(flag)));
@@ -432,16 +432,16 @@ scalargtsel(PG_FUNCTION_ARGS)
/*
* Compute selectivity of "<", then invert --- but only if we were
- * able to produce a non-default estimate. Note that we get the
- * negator which strictly speaking means we are looking at "<="
- * for ">" or "<" for ">=". We assume this won't matter.
+ * able to produce a non-default estimate. Note that we get the
+ * negator which strictly speaking means we are looking at "<=" for
+ * ">" or "<" for ">=". We assume this won't matter.
*/
ltopid = get_negator(opid);
if (ltopid)
{
result = DatumGetFloat8(DirectFunctionCall5(scalarltsel,
- ObjectIdGetDatum(ltopid),
- ObjectIdGetDatum(relid),
+ ObjectIdGetDatum(ltopid),
+ ObjectIdGetDatum(relid),
Int16GetDatum(attno),
value,
Int32GetDatum(flag)));
@@ -506,23 +506,28 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
if (pstatus == Pattern_Prefix_Exact)
{
- /* Pattern specifies an exact match, so pretend operator is '=' */
- Oid eqopr = find_operator("=", ltype);
- Datum eqcon;
+
+ /*
+ * Pattern specifies an exact match, so pretend operator is
+ * '='
+ */
+ Oid eqopr = find_operator("=", ltype);
+ Datum eqcon;
if (eqopr == InvalidOid)
elog(ERROR, "patternsel: no = operator for type %u", ltype);
eqcon = string_to_datum(prefix, ltype);
result = DatumGetFloat8(DirectFunctionCall5(eqsel,
- ObjectIdGetDatum(eqopr),
- ObjectIdGetDatum(relid),
- Int16GetDatum(attno),
- eqcon,
- Int32GetDatum(SEL_CONSTANT|SEL_RIGHT)));
+ ObjectIdGetDatum(eqopr),
+ ObjectIdGetDatum(relid),
+ Int16GetDatum(attno),
+ eqcon,
+ Int32GetDatum(SEL_CONSTANT | SEL_RIGHT)));
pfree(DatumGetPointer(eqcon));
}
else
{
+
/*
* Not exact-match pattern. We estimate selectivity of the
* fixed prefix and remainder of pattern separately, then
@@ -648,6 +653,7 @@ eqjoinsel(PG_FUNCTION_ARGS)
{
#ifdef NOT_USED /* see neqjoinsel() before removing me! */
Oid opid = PG_GETARG_OID(0);
+
#endif
Oid relid1 = PG_GETARG_OID(1);
AttrNumber attno1 = PG_GETARG_INT16(2);
@@ -701,8 +707,8 @@ neqjoinsel(PG_FUNCTION_ARGS)
/*
* XXX we skip looking up the negator operator here because we know
- * eqjoinsel() won't look at it anyway. If eqjoinsel() ever does look,
- * this routine will need to look more like neqsel() does.
+ * eqjoinsel() won't look at it anyway. If eqjoinsel() ever does
+ * look, this routine will need to look more like neqsel() does.
*/
result = DatumGetFloat8(eqjoinsel(fcinfo));
result = 1.0 - result;
@@ -845,48 +851,48 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
switch (valuetypid)
{
- /*
- * Built-in numeric types
- */
- case BOOLOID:
- case INT2OID:
- case INT4OID:
- case INT8OID:
- case FLOAT4OID:
- case FLOAT8OID:
- case NUMERICOID:
- case OIDOID:
- case REGPROCOID:
+ /*
+ * Built-in numeric types
+ */
+ case BOOLOID:
+ case INT2OID:
+ case INT4OID:
+ case INT8OID:
+ case FLOAT4OID:
+ case FLOAT8OID:
+ case NUMERICOID:
+ case OIDOID:
+ case REGPROCOID:
*scaledvalue = convert_numeric_to_scalar(value, valuetypid);
*scaledlobound = convert_numeric_to_scalar(lobound, boundstypid);
*scaledhibound = convert_numeric_to_scalar(hibound, boundstypid);
return true;
- /*
- * Built-in string types
- */
+ /*
+ * Built-in string types
+ */
case CHAROID:
case BPCHAROID:
case VARCHAROID:
case TEXTOID:
case NAMEOID:
- {
- unsigned char *valstr = convert_string_datum(value, valuetypid);
- unsigned char *lostr = convert_string_datum(lobound, boundstypid);
- unsigned char *histr = convert_string_datum(hibound, boundstypid);
-
- convert_string_to_scalar(valstr, scaledvalue,
- lostr, scaledlobound,
- histr, scaledhibound);
- pfree(valstr);
- pfree(lostr);
- pfree(histr);
- return true;
- }
+ {
+ unsigned char *valstr = convert_string_datum(value, valuetypid);
+ unsigned char *lostr = convert_string_datum(lobound, boundstypid);
+ unsigned char *histr = convert_string_datum(hibound, boundstypid);
+
+ convert_string_to_scalar(valstr, scaledvalue,
+ lostr, scaledlobound,
+ histr, scaledhibound);
+ pfree(valstr);
+ pfree(lostr);
+ pfree(histr);
+ return true;
+ }
- /*
- * Built-in time types
- */
+ /*
+ * Built-in time types
+ */
case TIMESTAMPOID:
case ABSTIMEOID:
case DATEOID:
@@ -911,7 +917,7 @@ convert_numeric_to_scalar(Datum value, Oid typid)
{
switch (typid)
{
- case BOOLOID:
+ case BOOLOID:
return (double) DatumGetBool(value);
case INT2OID:
return (double) DatumGetInt16(value);
@@ -931,7 +937,9 @@ convert_numeric_to_scalar(Datum value, Oid typid)
/* we can treat OIDs as integers... */
return (double) DatumGetObjectId(value);
}
- /* Can't get here unless someone tries to use scalarltsel/scalargtsel
+
+ /*
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel
* on an operator with one numeric and one non-numeric operand.
*/
elog(ERROR, "convert_numeric_to_scalar: unsupported type %u", typid);
@@ -1007,7 +1015,9 @@ convert_string_to_scalar(unsigned char *value,
if (rangehi < '9')
rangehi = '9';
}
- /* If range includes less than 10 chars, assume we have not got enough
+
+ /*
+ * If range includes less than 10 chars, assume we have not got enough
* data, and make it include regular ASCII set.
*/
if (rangehi - rangelo < 9)
@@ -1045,7 +1055,10 @@ convert_one_string_to_scalar(unsigned char *value, int rangelo, int rangehi)
if (slen <= 0)
return 0.0; /* empty string has scalar value 0 */
- /* Since base is at least 10, need not consider more than about 20 chars */
+ /*
+ * Since base is at least 10, need not consider more than about 20
+ * chars
+ */
if (slen > 20)
slen = 20;
@@ -1055,12 +1068,12 @@ convert_one_string_to_scalar(unsigned char *value, int rangelo, int rangehi)
denom = base;
while (slen-- > 0)
{
- int ch = *value++;
+ int ch = *value++;
if (ch < rangelo)
- ch = rangelo-1;
+ ch = rangelo - 1;
else if (ch > rangehi)
- ch = rangehi+1;
+ ch = rangehi + 1;
num += ((double) (ch - rangelo)) / denom;
denom *= base;
}
@@ -1078,10 +1091,12 @@ static unsigned char *
convert_string_datum(Datum value, Oid typid)
{
char *val;
+
#ifdef USE_LOCALE
char *xfrmstr;
size_t xfrmsize;
size_t xfrmlen;
+
#endif
switch (typid)
@@ -1094,25 +1109,27 @@ convert_string_datum(Datum value, Oid typid)
case BPCHAROID:
case VARCHAROID:
case TEXTOID:
- {
- char *str = (char *) VARDATA(DatumGetPointer(value));
- int strlength = VARSIZE(DatumGetPointer(value)) - VARHDRSZ;
+ {
+ char *str = (char *) VARDATA(DatumGetPointer(value));
+ int strlength = VARSIZE(DatumGetPointer(value)) - VARHDRSZ;
- val = (char *) palloc(strlength+1);
- memcpy(val, str, strlength);
- val[strlength] = '\0';
- break;
- }
+ val = (char *) palloc(strlength + 1);
+ memcpy(val, str, strlength);
+ val[strlength] = '\0';
+ break;
+ }
case NAMEOID:
- {
- NameData *nm = (NameData *) DatumGetPointer(value);
+ {
+ NameData *nm = (NameData *) DatumGetPointer(value);
- val = pstrdup(NameStr(*nm));
- break;
- }
+ val = pstrdup(NameStr(*nm));
+ break;
+ }
default:
- /* Can't get here unless someone tries to use scalarltsel
- * on an operator with one string and one non-string operand.
+
+ /*
+ * Can't get here unless someone tries to use scalarltsel on
+ * an operator with one string and one non-string operand.
*/
elog(ERROR, "convert_string_datum: unsupported type %u", typid);
return NULL;
@@ -1120,7 +1137,7 @@ convert_string_datum(Datum value, Oid typid)
#ifdef USE_LOCALE
/* Guess that transformed string is not much bigger than original */
- xfrmsize = strlen(val) + 32; /* arbitrary pad value here... */
+ xfrmsize = strlen(val) + 32;/* arbitrary pad value here... */
xfrmstr = (char *) palloc(xfrmsize);
xfrmlen = strxfrm(xfrmstr, val, xfrmsize);
if (xfrmlen >= xfrmsize)
@@ -1145,7 +1162,7 @@ convert_timevalue_to_scalar(Datum value, Oid typid)
{
switch (typid)
{
- case TIMESTAMPOID:
+ case TIMESTAMPOID:
return DatumGetTimestamp(value);
case ABSTIMEOID:
return DatumGetTimestamp(DirectFunctionCall1(abstime_timestamp,
@@ -1154,31 +1171,33 @@ convert_timevalue_to_scalar(Datum value, Oid typid)
return DatumGetTimestamp(DirectFunctionCall1(date_timestamp,
value));
case INTERVALOID:
- {
- Interval *interval = DatumGetIntervalP(value);
+ {
+ Interval *interval = DatumGetIntervalP(value);
- /*
- * Convert the month part of Interval to days using
- * assumed average month length of 365.25/12.0 days. Not
- * too accurate, but plenty good enough for our purposes.
- */
- return interval->time +
- interval->month * (365.25 / 12.0 * 24.0 * 60.0 * 60.0);
- }
+ /*
+ * Convert the month part of Interval to days using
+ * assumed average month length of 365.25/12.0 days. Not
+ * too accurate, but plenty good enough for our purposes.
+ */
+ return interval->time +
+ interval->month * (365.25 / 12.0 * 24.0 * 60.0 * 60.0);
+ }
case RELTIMEOID:
return DatumGetRelativeTime(value);
case TINTERVALOID:
- {
- TimeInterval interval = DatumGetTimeInterval(value);
+ {
+ TimeInterval interval = DatumGetTimeInterval(value);
- if (interval->status != 0)
- return interval->data[1] - interval->data[0];
- return 0; /* for lack of a better idea */
- }
+ if (interval->status != 0)
+ return interval->data[1] - interval->data[0];
+ return 0; /* for lack of a better idea */
+ }
case TIMEOID:
return DatumGetTimeADT(value);
}
- /* Can't get here unless someone tries to use scalarltsel/scalargtsel
+
+ /*
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel
* on an operator with one timevalue and one non-timevalue operand.
*/
elog(ERROR, "convert_timevalue_to_scalar: unsupported type %u", typid);
@@ -1305,7 +1324,7 @@ getattstatistics(Oid relid,
else
{
char *strval = DatumGetCString(DirectFunctionCall1(textout,
- val));
+ val));
*commonval = FunctionCall3(&inputproc,
CStringGetDatum(strval),
@@ -1329,7 +1348,7 @@ getattstatistics(Oid relid,
else
{
char *strval = DatumGetCString(DirectFunctionCall1(textout,
- val));
+ val));
*loval = FunctionCall3(&inputproc,
CStringGetDatum(strval),
@@ -1353,7 +1372,7 @@ getattstatistics(Oid relid,
else
{
char *strval = DatumGetCString(DirectFunctionCall1(textout,
- val));
+ val));
*hival = FunctionCall3(&inputproc,
CStringGetDatum(strval),
@@ -1417,12 +1436,14 @@ like_fixed_prefix(char *patt, bool case_insensitive,
if (patt[pos] == '\0')
break;
}
+
/*
* XXX I suspect isalpha() is not an adequately locale-sensitive
* test for characters that can vary under case folding?
*/
if (case_insensitive && isalpha((unsigned char) patt[pos]))
break;
+
/*
* NOTE: this code used to think that %% meant a literal %, but
* textlike() itself does not think that, and the SQL92 spec
@@ -1436,7 +1457,8 @@ like_fixed_prefix(char *patt, bool case_insensitive,
/* in LIKE, an empty pattern is an exact match! */
if (patt[pos] == '\0')
- return Pattern_Prefix_Exact; /* reached end of pattern, so exact */
+ return Pattern_Prefix_Exact; /* reached end of pattern, so
+ * exact */
if (match_pos > 0)
return Pattern_Prefix_Partial;
@@ -1463,7 +1485,8 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
return Pattern_Prefix_None;
}
- /* If unquoted | is present at paren level 0 in pattern, then there
+ /*
+ * If unquoted | is present at paren level 0 in pattern, then there
* are multiple alternatives for the start of the string.
*/
paren_depth = 0;
@@ -1495,10 +1518,12 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
/* note start at pos 1 to skip leading ^ */
for (pos = 1; patt[pos]; pos++)
{
+
/*
- * Check for characters that indicate multiple possible matches here.
- * XXX I suspect isalpha() is not an adequately locale-sensitive
- * test for characters that can vary under case folding?
+ * Check for characters that indicate multiple possible matches
+ * here. XXX I suspect isalpha() is not an adequately
+ * locale-sensitive test for characters that can vary under case
+ * folding?
*/
if (patt[pos] == '.' ||
patt[pos] == '(' ||
@@ -1506,9 +1531,11 @@ regex_fixed_prefix(char *patt, bool case_insensitive,
patt[pos] == '$' ||
(case_insensitive && isalpha((unsigned char) patt[pos])))
break;
+
/*
* Check for quantifiers. Except for +, this means the preceding
- * character is optional, so we must remove it from the prefix too!
+ * character is optional, so we must remove it from the prefix
+ * too!
*/
if (patt[pos] == '*' ||
patt[pos] == '?' ||
@@ -1573,7 +1600,7 @@ pattern_fixed_prefix(char *patt, Pattern_Type ptype,
break;
default:
elog(ERROR, "pattern_fixed_prefix: bogus ptype");
- result = Pattern_Prefix_None; /* keep compiler quiet */
+ result = Pattern_Prefix_None; /* keep compiler quiet */
break;
}
return result;
@@ -1596,7 +1623,7 @@ prefix_selectivity(char *prefix,
AttrNumber attno,
Oid datatype)
{
- Selectivity prefixsel;
+ Selectivity prefixsel;
Oid cmpopr;
Datum prefixcon;
char *greaterstr;
@@ -1608,21 +1635,21 @@ prefix_selectivity(char *prefix,
prefixcon = string_to_datum(prefix, datatype);
/* Assume scalargtsel is appropriate for all supported types */
prefixsel = DatumGetFloat8(DirectFunctionCall5(scalargtsel,
- ObjectIdGetDatum(cmpopr),
- ObjectIdGetDatum(relid),
- Int16GetDatum(attno),
- prefixcon,
- Int32GetDatum(SEL_CONSTANT|SEL_RIGHT)));
+ ObjectIdGetDatum(cmpopr),
+ ObjectIdGetDatum(relid),
+ Int16GetDatum(attno),
+ prefixcon,
+ Int32GetDatum(SEL_CONSTANT | SEL_RIGHT)));
pfree(DatumGetPointer(prefixcon));
/*
- * If we can create a string larger than the prefix,
- * say "x < greaterstr".
+ * If we can create a string larger than the prefix, say "x <
+ * greaterstr".
*/
greaterstr = make_greater_string(prefix, datatype);
if (greaterstr)
{
- Selectivity topsel;
+ Selectivity topsel;
cmpopr = find_operator("<", datatype);
if (cmpopr == InvalidOid)
@@ -1631,28 +1658,28 @@ prefix_selectivity(char *prefix,
prefixcon = string_to_datum(greaterstr, datatype);
/* Assume scalarltsel is appropriate for all supported types */
topsel = DatumGetFloat8(DirectFunctionCall5(scalarltsel,
- ObjectIdGetDatum(cmpopr),
- ObjectIdGetDatum(relid),
- Int16GetDatum(attno),
- prefixcon,
- Int32GetDatum(SEL_CONSTANT|SEL_RIGHT)));
+ ObjectIdGetDatum(cmpopr),
+ ObjectIdGetDatum(relid),
+ Int16GetDatum(attno),
+ prefixcon,
+ Int32GetDatum(SEL_CONSTANT | SEL_RIGHT)));
pfree(DatumGetPointer(prefixcon));
pfree(greaterstr);
/*
- * Merge the two selectivities in the same way as for
- * a range query (see clauselist_selectivity()).
+ * Merge the two selectivities in the same way as for a range
+ * query (see clauselist_selectivity()).
*/
prefixsel = topsel + prefixsel - 1.0;
/*
- * A zero or slightly negative prefixsel should be converted into a
- * small positive value; we probably are dealing with a very
+ * A zero or slightly negative prefixsel should be converted into
+ * a small positive value; we probably are dealing with a very
* tight range and got a bogus result due to roundoff errors.
* However, if prefixsel is very negative, then we probably have
* default selectivity estimates on one or both sides of the
- * range. In that case, insert a not-so-wildly-optimistic
- * default estimate.
+ * range. In that case, insert a not-so-wildly-optimistic default
+ * estimate.
*/
if (prefixsel <= 0.0)
{
@@ -1660,8 +1687,8 @@ prefix_selectivity(char *prefix,
{
/*
- * No data available --- use a default estimate that
- * is small, but not real small.
+ * No data available --- use a default estimate that is
+ * small, but not real small.
*/
prefixsel = 0.01;
}
@@ -1691,15 +1718,16 @@ prefix_selectivity(char *prefix,
#define FIXED_CHAR_SEL 0.04 /* about 1/25 */
#define CHAR_RANGE_SEL 0.25
-#define ANY_CHAR_SEL 0.9 /* not 1, since it won't match end-of-string */
+#define ANY_CHAR_SEL 0.9 /* not 1, since it won't match
+ * end-of-string */
#define FULL_WILDCARD_SEL 5.0
#define PARTIAL_WILDCARD_SEL 2.0
static Selectivity
like_selectivity(char *patt, bool case_insensitive)
{
- Selectivity sel = 1.0;
- int pos;
+ Selectivity sel = 1.0;
+ int pos;
/* Skip any leading %; it's already factored into initial sel */
pos = (*patt == '%') ? 1 : 0;
@@ -1730,17 +1758,17 @@ like_selectivity(char *patt, bool case_insensitive)
static Selectivity
regex_selectivity_sub(char *patt, int pattlen, bool case_insensitive)
{
- Selectivity sel = 1.0;
- int paren_depth = 0;
- int paren_pos = 0; /* dummy init to keep compiler quiet */
- int pos;
+ Selectivity sel = 1.0;
+ int paren_depth = 0;
+ int paren_pos = 0; /* dummy init to keep compiler quiet */
+ int pos;
for (pos = 0; pos < pattlen; pos++)
{
if (patt[pos] == '(')
{
if (paren_depth == 0)
- paren_pos = pos; /* remember start of parenthesized item */
+ paren_pos = pos;/* remember start of parenthesized item */
paren_depth++;
}
else if (patt[pos] == ')' && paren_depth > 0)
@@ -1753,9 +1781,10 @@ regex_selectivity_sub(char *patt, int pattlen, bool case_insensitive)
}
else if (patt[pos] == '|' && paren_depth == 0)
{
+
/*
- * If unquoted | is present at paren level 0 in pattern,
- * we have multiple alternatives; sum their probabilities.
+ * If unquoted | is present at paren level 0 in pattern, we
+ * have multiple alternatives; sum their probabilities.
*/
sel += regex_selectivity_sub(patt + (pos + 1),
pattlen - (pos + 1),
@@ -1764,19 +1793,20 @@ regex_selectivity_sub(char *patt, int pattlen, bool case_insensitive)
}
else if (patt[pos] == '[')
{
- bool negclass = false;
+ bool negclass = false;
if (patt[++pos] == '^')
{
negclass = true;
pos++;
}
- if (patt[pos] == ']') /* ']' at start of class is not special */
+ if (patt[pos] == ']') /* ']' at start of class is not
+ * special */
pos++;
while (pos < pattlen && patt[pos] != ']')
pos++;
if (paren_depth == 0)
- sel *= (negclass ? (1.0-CHAR_RANGE_SEL) : CHAR_RANGE_SEL);
+ sel *= (negclass ? (1.0 - CHAR_RANGE_SEL) : CHAR_RANGE_SEL);
}
else if (patt[pos] == '.')
{
@@ -1822,15 +1852,15 @@ regex_selectivity_sub(char *patt, int pattlen, bool case_insensitive)
static Selectivity
regex_selectivity(char *patt, bool case_insensitive)
{
- Selectivity sel;
- int pattlen = strlen(patt);
+ Selectivity sel;
+ int pattlen = strlen(patt);
/* If patt doesn't end with $, consider it to have a trailing wildcard */
- if (pattlen > 0 && patt[pattlen-1] == '$' &&
- (pattlen == 1 || patt[pattlen-2] != '\\'))
+ if (pattlen > 0 && patt[pattlen - 1] == '$' &&
+ (pattlen == 1 || patt[pattlen - 2] != '\\'))
{
/* has trailing $ */
- sel = regex_selectivity_sub(patt, pattlen-1, case_insensitive);
+ sel = regex_selectivity_sub(patt, pattlen - 1, case_insensitive);
}
else
{
@@ -1893,6 +1923,7 @@ locale_is_like_safe(void)
localeptr = setlocale(LC_COLLATE, NULL);
if (!localeptr)
elog(STOP, "Invalid LC_COLLATE setting");
+
/*
* Currently we accept only "C" and "POSIX" (do any systems still
* return "POSIX"?). Which other locales allow safe optimization?
@@ -1904,9 +1935,9 @@ locale_is_like_safe(void)
else
result = false;
return (bool) result;
-#else /* not USE_LOCALE */
- return true; /* We must be in C locale, which is OK */
-#endif /* USE_LOCALE */
+#else /* not USE_LOCALE */
+ return true; /* We must be in C locale, which is OK */
+#endif /* USE_LOCALE */
}
/*
@@ -2039,6 +2070,7 @@ find_operator(const char *opname, Oid datatype)
static Datum
string_to_datum(const char *str, Oid datatype)
{
+
/*
* We cheat a little by assuming that textin() will do for bpchar and
* varchar constants too...
diff --git a/src/backend/utils/adt/sets.c b/src/backend/utils/adt/sets.c
index 11c5579fc20..c48526a7ba1 100644
--- a/src/backend/utils/adt/sets.c
+++ b/src/backend/utils/adt/sets.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/Attic/sets.c,v 1.36 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/Attic/sets.c,v 1.37 2001/03/22 03:59:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,8 +62,8 @@ SetDefine(char *querystr, char *typename)
false, /* canCache (assume unsafe) */
false, /* isStrict (irrelevant, no args) */
100, /* byte_pct */
- 0, /* perbyte_cpu */
- 0, /* percall_cpu */
+ 0, /* perbyte_cpu */
+ 0, /* percall_cpu */
100, /* outin_ratio */
NIL, /* argList */
whereToSendOutput);
@@ -165,8 +165,8 @@ seteval(PG_FUNCTION_ARGS)
}
/*
- * Evaluate the function. NOTE: we need no econtext because there
- * are no arguments to evaluate.
+ * Evaluate the function. NOTE: we need no econtext because there are
+ * no arguments to evaluate.
*/
/* ExecMakeFunctionResult assumes these are initialized at call: */
@@ -175,14 +175,14 @@ seteval(PG_FUNCTION_ARGS)
result = ExecMakeFunctionResult(fcache,
NIL,
- NULL, /* no econtext, see above */
+ NULL, /* no econtext, see above */
&isNull,
&isDone);
/*
- * If we're done with the results of this set function, get rid of
- * its func cache so that we will start from the top next time.
- * (Can you say "memory leak"? This feature is a crock anyway...)
+ * If we're done with the results of this set function, get rid of its
+ * func cache so that we will start from the top next time. (Can you
+ * say "memory leak"? This feature is a crock anyway...)
*/
if (isDone != ExprMultipleResult)
{
@@ -197,7 +197,7 @@ seteval(PG_FUNCTION_ARGS)
if (isDone != ExprSingleResult)
{
- ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
+ ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
if (rsi && IsA(rsi, ReturnSetInfo))
rsi->isDone = isDone;
diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c
index 18bad89c48b..7e3b4bfc257 100644
--- a/src/backend/utils/adt/tid.c
+++ b/src/backend/utils/adt/tid.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/tid.c,v 1.23 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/tid.c,v 1.24 2001/03/22 03:59:54 momjian Exp $
*
* NOTES
* input routine largely stolen from boxin().
@@ -21,8 +21,8 @@
#include "access/heapam.h"
#include "utils/builtins.h"
-#define DatumGetItemPointer(X) ((ItemPointer) DatumGetPointer(X))
-#define ItemPointerGetDatum(X) PointerGetDatum(X)
+#define DatumGetItemPointer(X) ((ItemPointer) DatumGetPointer(X))
+#define ItemPointerGetDatum(X) PointerGetDatum(X)
#define PG_GETARG_ITEMPOINTER(n) DatumGetItemPointer(PG_GETARG_DATUM(n))
#define PG_RETURN_ITEMPOINTER(x) return ItemPointerGetDatum(x)
@@ -70,7 +70,7 @@ tidin(PG_FUNCTION_ARGS)
Datum
tidout(PG_FUNCTION_ARGS)
{
- ItemPointer itemPtr = PG_GETARG_ITEMPOINTER(0);
+ ItemPointer itemPtr = PG_GETARG_ITEMPOINTER(0);
BlockId blockId;
BlockNumber blockNumber;
OffsetNumber offsetNumber;
@@ -97,8 +97,8 @@ tidout(PG_FUNCTION_ARGS)
Datum
tideq(PG_FUNCTION_ARGS)
{
- ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
- ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
+ ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
+ ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
PG_RETURN_BOOL(BlockIdGetBlockNumber(&(arg1->ip_blkid)) ==
BlockIdGetBlockNumber(&(arg2->ip_blkid)) &&
@@ -109,13 +109,14 @@ tideq(PG_FUNCTION_ARGS)
Datum
tidne(PG_FUNCTION_ARGS)
{
- ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
- ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
+ ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
+ ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
PG_RETURN_BOOL(BlockIdGetBlockNumber(&(arg1->ip_blkid)) !=
BlockIdGetBlockNumber(&(arg2->ip_blkid)) ||
arg1->ip_posid != arg2->ip_posid);
}
+
#endif
/*
@@ -126,11 +127,11 @@ tidne(PG_FUNCTION_ARGS)
Datum
currtid_byreloid(PG_FUNCTION_ARGS)
{
- Oid reloid = PG_GETARG_OID(0);
- ItemPointer tid = PG_GETARG_ITEMPOINTER(1);
- ItemPointer result,
- ret;
- Relation rel;
+ Oid reloid = PG_GETARG_OID(0);
+ ItemPointer tid = PG_GETARG_ITEMPOINTER(1);
+ ItemPointer result,
+ ret;
+ Relation rel;
result = (ItemPointer) palloc(sizeof(ItemPointerData));
ItemPointerSetInvalid(result);
@@ -150,12 +151,12 @@ currtid_byreloid(PG_FUNCTION_ARGS)
Datum
currtid_byrelname(PG_FUNCTION_ARGS)
{
- text *relname = PG_GETARG_TEXT_P(0);
- ItemPointer tid = PG_GETARG_ITEMPOINTER(1);
- ItemPointer result,
- ret;
- char *str;
- Relation rel;
+ text *relname = PG_GETARG_TEXT_P(0);
+ ItemPointer tid = PG_GETARG_ITEMPOINTER(1);
+ ItemPointer result,
+ ret;
+ char *str;
+ Relation rel;
str = DatumGetCString(DirectFunctionCall1(textout,
PointerGetDatum(relname)));
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index a01a790124f..7a2e6ea8bb0 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/timestamp.c,v 1.45 2001/02/13 14:32:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/timestamp.c,v 1.46 2001/03/22 03:59:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,7 +92,7 @@ timestamp_in(PG_FUNCTION_ARGS)
default:
elog(ERROR, "Internal coding error, can't input timestamp '%s'", str);
- TIMESTAMP_INVALID(result); /* keep compiler quiet */
+ TIMESTAMP_INVALID(result); /* keep compiler quiet */
}
PG_RETURN_TIMESTAMP(result);
@@ -315,14 +315,14 @@ timestamp2tm(Timestamp dt, int *tzp, struct tm * tm, double *fsec, char **tzn)
#if defined(HAVE_TM_ZONE) || defined(HAVE_INT_TIMEZONE)
tx = localtime(&utime);
-# ifdef NO_MKTIME_BEFORE_1970
+#ifdef NO_MKTIME_BEFORE_1970
if (tx->tm_year < 70 && tx->tm_isdst == 1)
{
utime -= 3600;
tx = localtime(&utime);
tx->tm_isdst = 0;
}
-# endif
+#endif
tm->tm_year = tx->tm_year + 1900;
tm->tm_mon = tx->tm_mon + 1;
tm->tm_mday = tx->tm_mday;
@@ -341,20 +341,20 @@ timestamp2tm(Timestamp dt, int *tzp, struct tm * tm, double *fsec, char **tzn)
#endif
tm->tm_isdst = tx->tm_isdst;
-# if defined(HAVE_TM_ZONE)
+#if defined(HAVE_TM_ZONE)
tm->tm_gmtoff = tx->tm_gmtoff;
tm->tm_zone = tx->tm_zone;
*tzp = -(tm->tm_gmtoff); /* tm_gmtoff is Sun/DEC-ism */
if (tzn != NULL)
*tzn = (char *) tm->tm_zone;
-# elif defined(HAVE_INT_TIMEZONE)
+#elif defined(HAVE_INT_TIMEZONE)
*tzp = ((tm->tm_isdst > 0) ? (TIMEZONE_GLOBAL - 3600) : TIMEZONE_GLOBAL);
if (tzn != NULL)
*tzn = tzname[(tm->tm_isdst > 0)];
-# endif
+#endif
-#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
+#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
*tzp = CTimeZone; /* V7 conventions; don't know timezone? */
if (tzn != NULL)
*tzn = CTZName;
@@ -482,7 +482,7 @@ timestamp_finite(PG_FUNCTION_ARGS)
{
Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
- PG_RETURN_BOOL(! TIMESTAMP_NOT_FINITE(timestamp));
+ PG_RETURN_BOOL(!TIMESTAMP_NOT_FINITE(timestamp));
}
Datum
@@ -490,7 +490,7 @@ interval_finite(PG_FUNCTION_ARGS)
{
Interval *interval = PG_GETARG_INTERVAL_P(0);
- PG_RETURN_BOOL(! INTERVAL_NOT_FINITE(*interval));
+ PG_RETURN_BOOL(!INTERVAL_NOT_FINITE(*interval));
}
@@ -656,13 +656,9 @@ timestamp_cmp(PG_FUNCTION_ARGS)
Timestamp dt2 = PG_GETARG_TIMESTAMP(1);
if (TIMESTAMP_IS_INVALID(dt1))
- {
PG_RETURN_INT32(TIMESTAMP_IS_INVALID(dt2) ? 0 : 1);
- }
else if (TIMESTAMP_IS_INVALID(dt2))
- {
PG_RETURN_INT32(-1);
- }
else
{
if (TIMESTAMP_IS_RELATIVE(dt1))
@@ -839,7 +835,9 @@ interval_hash(PG_FUNCTION_ARGS)
Datum
overlaps_timestamp(PG_FUNCTION_ARGS)
{
- /* The arguments are Timestamps, but we leave them as generic Datums
+
+ /*
+ * The arguments are Timestamps, but we leave them as generic Datums
* to avoid unnecessary conversions between value and reference forms
* --- not to mention possible dereferences of null pointers.
*/
@@ -858,9 +856,9 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
DatumGetBool(DirectFunctionCall2(timestamp_lt,t1,t2))
/*
- * If both endpoints of interval 1 are null, the result is null (unknown).
- * If just one endpoint is null, take ts1 as the non-null one.
- * Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null
+ * (unknown). If just one endpoint is null, take ts1 as the non-null
+ * one. Otherwise, take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
@@ -874,7 +872,7 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
{
if (TIMESTAMP_GT(ts1, te1))
{
- Datum tt = ts1;
+ Datum tt = ts1;
ts1 = te1;
te1 = tt;
@@ -894,7 +892,7 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
{
if (TIMESTAMP_GT(ts2, te2))
{
- Datum tt = ts2;
+ Datum tt = ts2;
ts2 = te2;
te2 = tt;
@@ -907,7 +905,9 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
*/
if (TIMESTAMP_GT(ts1, ts2))
{
- /* This case is ts1 < te2 OR te1 < te2, which may look redundant
+
+ /*
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant
* but in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
@@ -916,7 +916,9 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(true);
if (te1IsNull)
PG_RETURN_NULL();
- /* If te1 is not null then we had ts1 <= te1 above, and we just
+
+ /*
+ * If te1 is not null then we had ts1 <= te1 above, and we just
* found ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
@@ -930,15 +932,20 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(true);
if (te2IsNull)
PG_RETURN_NULL();
- /* If te2 is not null then we had ts2 <= te2 above, and we just
+
+ /*
+ * If te2 is not null then we had ts2 <= te2 above, and we just
* found ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
else
{
- /* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else null".
+
+ /*
+ * For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
+ * rather silly way of saying "true if both are nonnull, else
+ * null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
@@ -1086,13 +1093,14 @@ timestamp_pl_span(PG_FUNCTION_ARGS)
tm->tm_year += 1900;
tm->tm_mon += 1;
-# if defined(HAVE_TM_ZONE)
- tz = -(tm->tm_gmtoff); /* tm_gmtoff is Sun/DEC-ism */
-# elif defined(HAVE_INT_TIMEZONE)
+#if defined(HAVE_TM_ZONE)
+ tz = -(tm->tm_gmtoff); /* tm_gmtoff is
+ * Sun/DEC-ism */
+#elif defined(HAVE_INT_TIMEZONE)
tz = ((tm->tm_isdst > 0) ? (TIMEZONE_GLOBAL - 3600) : TIMEZONE_GLOBAL);
-# endif
+#endif
-#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
+#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
tz = CTimeZone;
#endif
}
@@ -1129,8 +1137,8 @@ timestamp_mi_span(PG_FUNCTION_ARGS)
Interval *span = PG_GETARG_INTERVAL_P(1);
Interval tspan;
- tspan.month = - span->month;
- tspan.time = - span->time;
+ tspan.month = -span->month;
+ tspan.time = -span->time;
return DirectFunctionCall2(timestamp_pl_span,
TimestampGetDatum(timestamp),
@@ -1351,18 +1359,19 @@ interval_accum(PG_FUNCTION_ARGS)
&transdatums, &ndatums);
if (ndatums != 2)
elog(ERROR, "interval_accum: expected 2-element interval array");
+
/*
* XXX memcpy, instead of just extracting a pointer, to work around
* buggy array code: it won't ensure proper alignment of Interval
- * objects on machines where double requires 8-byte alignment.
- * That should be fixed, but in the meantime...
+ * objects on machines where double requires 8-byte alignment. That
+ * should be fixed, but in the meantime...
*/
memcpy(&sumX, DatumGetIntervalP(transdatums[0]), sizeof(Interval));
memcpy(&N, DatumGetIntervalP(transdatums[1]), sizeof(Interval));
newsum = DatumGetIntervalP(DirectFunctionCall2(interval_pl,
- IntervalPGetDatum(&sumX),
- IntervalPGetDatum(newval)));
+ IntervalPGetDatum(&sumX),
+ IntervalPGetDatum(newval)));
N.time += 1;
transdatums[0] = IntervalPGetDatum(newsum);
@@ -1389,11 +1398,12 @@ interval_avg(PG_FUNCTION_ARGS)
&transdatums, &ndatums);
if (ndatums != 2)
elog(ERROR, "interval_avg: expected 2-element interval array");
+
/*
* XXX memcpy, instead of just extracting a pointer, to work around
* buggy array code: it won't ensure proper alignment of Interval
- * objects on machines where double requires 8-byte alignment.
- * That should be fixed, but in the meantime...
+ * objects on machines where double requires 8-byte alignment. That
+ * should be fixed, but in the meantime...
*/
memcpy(&sumX, DatumGetIntervalP(transdatums[0]), sizeof(Interval));
memcpy(&N, DatumGetIntervalP(transdatums[1]), sizeof(Interval));
@@ -1439,9 +1449,7 @@ timestamp_age(PG_FUNCTION_ARGS)
if (TIMESTAMP_IS_INVALID(dt1)
|| TIMESTAMP_IS_INVALID(dt2))
- {
TIMESTAMP_INVALID(result->time);
- }
else if ((timestamp2tm(dt1, NULL, tm1, &fsec1, NULL) == 0)
&& (timestamp2tm(dt2, NULL, tm2, &fsec2, NULL) == 0))
{
@@ -1597,7 +1605,7 @@ interval_text(PG_FUNCTION_ARGS)
int len;
str = DatumGetCString(DirectFunctionCall1(interval_out,
- IntervalPGetDatum(interval)));
+ IntervalPGetDatum(interval)));
len = (strlen(str) + VARHDRSZ);
@@ -1662,7 +1670,7 @@ timestamp_trunc(PG_FUNCTION_ARGS)
if (VARSIZE(units) - VARHDRSZ > MAXDATELEN)
elog(ERROR, "Interval units '%s' not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))));
+ PointerGetDatum(units))));
up = VARDATA(units);
lp = lowunits;
for (i = 0; i < (VARSIZE(units) - VARHDRSZ); i++)
@@ -1672,9 +1680,7 @@ timestamp_trunc(PG_FUNCTION_ARGS)
type = DecodeUnits(0, lowunits, &val);
if (TIMESTAMP_NOT_FINITE(timestamp))
- {
PG_RETURN_NULL();
- }
else
{
dt = (TIMESTAMP_IS_RELATIVE(timestamp) ? SetTimestamp(timestamp) : timestamp);
@@ -1729,13 +1735,13 @@ timestamp_trunc(PG_FUNCTION_ARGS)
tm->tm_year += 1900;
tm->tm_mon += 1;
-# if defined(HAVE_TM_ZONE)
+#if defined(HAVE_TM_ZONE)
tz = -(tm->tm_gmtoff); /* tm_gmtoff is Sun/DEC-ism */
-# elif defined(HAVE_INT_TIMEZONE)
+#elif defined(HAVE_INT_TIMEZONE)
tz = ((tm->tm_isdst > 0) ? (TIMEZONE_GLOBAL - 3600) : TIMEZONE_GLOBAL);
-# endif
+#endif
-#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
+#else /* not (HAVE_TM_ZONE || HAVE_INT_TIMEZONE) */
tz = CTimeZone;
#endif
}
@@ -1789,7 +1795,7 @@ interval_trunc(PG_FUNCTION_ARGS)
if (VARSIZE(units) - VARHDRSZ > MAXDATELEN)
elog(ERROR, "Interval units '%s' not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))));
+ PointerGetDatum(units))));
up = VARDATA(units);
lp = lowunits;
for (i = 0; i < (VARSIZE(units) - VARHDRSZ); i++)
@@ -1872,7 +1878,7 @@ interval_trunc(PG_FUNCTION_ARGS)
{
elog(ERROR, "Interval units '%s' not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))));
+ PointerGetDatum(units))));
PG_RETURN_NULL();
}
@@ -1885,75 +1891,80 @@ interval_trunc(PG_FUNCTION_ARGS)
* karel 2000/08/07
*/
void
-isoweek2date( int woy, int *year, int *mon, int *mday)
+isoweek2date(int woy, int *year, int *mon, int *mday)
{
- int day0, day4, dayn;
-
+ int day0,
+ day4,
+ dayn;
+
if (!*year)
elog(ERROR, "isoweek2date(): can't convert without year information");
/* fourth day of current year */
day4 = date2j(*year, 1, 4);
-
+
/* day0 == offset to first day of week (Monday) */
day0 = (j2day(day4 - 1) % 7);
dayn = ((woy - 1) * 7) + (day4 - day0);
-
+
j2date(dayn, year, mon, mday);
}
/* date2isoweek()
- *
+ *
* Returns ISO week number of year.
*/
int
-date2isoweek(int year, int mon, int mday)
+date2isoweek(int year, int mon, int mday)
{
- float8 result;
- int day0, day4, dayn;
-
- /* current day */
+ float8 result;
+ int day0,
+ day4,
+ dayn;
+
+ /* current day */
dayn = date2j(year, mon, mday);
-
+
/* fourth day of current year */
day4 = date2j(year, 1, 4);
-
+
/* day0 == offset to first day of week (Monday) */
day0 = (j2day(day4 - 1) % 7);
-
- /* We need the first week containing a Thursday,
- * otherwise this day falls into the previous year
- * for purposes of counting weeks
+
+ /*
+ * We need the first week containing a Thursday, otherwise this day
+ * falls into the previous year for purposes of counting weeks
*/
if (dayn < (day4 - day0))
{
day4 = date2j((year - 1), 1, 4);
-
+
/* day0 == offset to first day of week (Monday) */
day0 = (j2day(day4 - 1) % 7);
}
-
+
result = (((dayn - (day4 - day0)) / 7) + 1);
-
- /* Sometimes the last few days in a year will fall into
- * the first week of the next year, so check for this.
+
+ /*
+ * Sometimes the last few days in a year will fall into the first week
+ * of the next year, so check for this.
*/
if (result >= 53)
{
day4 = date2j((year + 1), 1, 4);
-
+
/* day0 == offset to first day of week (Monday) */
day0 = (j2day(day4 - 1) % 7);
-
+
if (dayn >= (day4 - day0))
result = (((dayn - (day4 - day0)) / 7) + 1);
}
return (int) result;
-}
-
-
+}
+
+
/* timestamp_part()
* Extract specified field from timestamp.
*/
@@ -1980,7 +1991,7 @@ timestamp_part(PG_FUNCTION_ARGS)
if (VARSIZE(units) - VARHDRSZ > MAXDATELEN)
elog(ERROR, "Interval units '%s' not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))));
+ PointerGetDatum(units))));
up = VARDATA(units);
lp = lowunits;
for (i = 0; i < (VARSIZE(units) - VARHDRSZ); i++)
@@ -1992,9 +2003,7 @@ timestamp_part(PG_FUNCTION_ARGS)
type = DecodeSpecial(0, lowunits, &val);
if (TIMESTAMP_NOT_FINITE(timestamp))
- {
PG_RETURN_NULL();
- }
else
{
dt = (TIMESTAMP_IS_RELATIVE(timestamp) ? SetTimestamp(timestamp) : timestamp);
@@ -2096,7 +2105,7 @@ timestamp_part(PG_FUNCTION_ARGS)
elog(ERROR, "Unable to encode timestamp");
result = (date2j(tm->tm_year, tm->tm_mon, tm->tm_mday)
- - date2j(tm->tm_year, 1, 1) + 1);
+ - date2j(tm->tm_year, 1, 1) + 1);
break;
default:
@@ -2138,7 +2147,7 @@ interval_part(PG_FUNCTION_ARGS)
if (VARSIZE(units) - VARHDRSZ > MAXDATELEN)
elog(ERROR, "Interval units '%s' not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))));
+ PointerGetDatum(units))));
up = VARDATA(units);
lp = lowunits;
for (i = 0; i < (VARSIZE(units) - VARHDRSZ); i++)
@@ -2213,7 +2222,7 @@ interval_part(PG_FUNCTION_ARGS)
default:
elog(ERROR, "Interval units '%s' not yet supported",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))));
+ PointerGetDatum(units))));
result = 0;
}
@@ -2237,7 +2246,7 @@ interval_part(PG_FUNCTION_ARGS)
{
elog(ERROR, "Interval units '%s' not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))));
+ PointerGetDatum(units))));
result = 0;
}
@@ -2283,9 +2292,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
type = DecodeSpecial(0, lowzone, &val);
if (TIMESTAMP_NOT_FINITE(timestamp))
- {
PG_RETURN_NULL();
- }
else if ((type == TZ) || (type == DTZ))
{
tm->tm_isdst = ((type == DTZ) ? 1 : 0);
@@ -2320,7 +2327,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
}
PG_RETURN_TEXT_P(result);
-} /* timestamp_zone() */
+} /* timestamp_zone() */
/* timestamp_izone()
* Encode timestamp type with specified time interval as time zone.
@@ -2364,4 +2371,4 @@ timestamp_izone(PG_FUNCTION_ARGS)
memmove(VARDATA(result), buf, (len - VARHDRSZ));
PG_RETURN_TEXT_P(result);
-} /* timestamp_izone() */
+} /* timestamp_izone() */
diff --git a/src/backend/utils/adt/varbit.c b/src/backend/utils/adt/varbit.c
index adbdf4f8ca7..775382568bb 100644
--- a/src/backend/utils/adt/varbit.c
+++ b/src/backend/utils/adt/varbit.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/varbit.c,v 1.15 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/varbit.c,v 1.16 2001/03/22 03:59:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,8 +51,10 @@ Datum
zpbit_in(PG_FUNCTION_ARGS)
{
char *input_string = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
+
#endif
int32 atttypmod = PG_GETARG_INT32(2);
VarBit *result; /* The resulting bit string */
@@ -79,9 +81,10 @@ zpbit_in(PG_FUNCTION_ARGS)
}
else
{
+
/*
- * Otherwise it's binary. This allows things like cast('1001'
- * as bit) to work transparently.
+ * Otherwise it's binary. This allows things like cast('1001' as
+ * bit) to work transparently.
*/
bit_not_hex = true;
sp = input_string;
@@ -214,8 +217,8 @@ zpbit_out(PG_FUNCTION_ARGS)
}
/*
- * Go back one step if we printed a hex number that was not part
- * of the bitstring anymore
+ * Go back one step if we printed a hex number that was not part of
+ * the bitstring anymore
*/
if (i > len)
r--;
@@ -263,12 +266,13 @@ _zpbit(PG_FUNCTION_ARGS)
{
ArrayType *v = (ArrayType *) PG_GETARG_VARLENA_P(0);
int32 len = PG_GETARG_INT32(1);
- FunctionCallInfoData locfcinfo;
+ FunctionCallInfoData locfcinfo;
+
/*
- * Since zpbit() is a built-in function, we should only need to
- * look it up once per run.
+ * Since zpbit() is a built-in function, we should only need to look
+ * it up once per run.
*/
- static FmgrInfo zpbit_finfo;
+ static FmgrInfo zpbit_finfo;
if (zpbit_finfo.fn_oid == InvalidOid)
fmgr_info(F_ZPBIT, &zpbit_finfo);
@@ -293,8 +297,10 @@ Datum
varbit_in(PG_FUNCTION_ARGS)
{
char *input_string = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
+
#endif
int32 atttypmod = PG_GETARG_INT32(2);
VarBit *result; /* The resulting bit string */
@@ -490,12 +496,13 @@ _varbit(PG_FUNCTION_ARGS)
{
ArrayType *v = (ArrayType *) PG_GETARG_VARLENA_P(0);
int32 len = PG_GETARG_INT32(1);
- FunctionCallInfoData locfcinfo;
+ FunctionCallInfoData locfcinfo;
+
/*
- * Since varbit() is a built-in function, we should only need to
- * look it up once per run.
+ * Since varbit() is a built-in function, we should only need to look
+ * it up once per run.
*/
- static FmgrInfo varbit_finfo;
+ static FmgrInfo varbit_finfo;
if (varbit_finfo.fn_oid == InvalidOid)
fmgr_info(F_VARBIT, &varbit_finfo);
@@ -765,7 +772,7 @@ bitsubstr(PG_FUNCTION_ARGS)
bitlen = VARBITLEN(arg);
/* If we do not have an upper bound, set bitlen */
- if (l==-1)
+ if (l == -1)
l = bitlen;
e = s + l;
s1 = Max(s, 1);
@@ -780,6 +787,7 @@ bitsubstr(PG_FUNCTION_ARGS)
}
else
{
+
/*
* OK, we've got a true substring starting at position s1-1 and
* ending at position e1-1
@@ -823,7 +831,7 @@ bitsubstr(PG_FUNCTION_ARGS)
PG_RETURN_VARBIT_P(result);
}
-/* bitlength, bitoctetlength
+/* bitlength, bitoctetlength
* Return the length of a bit string
*/
Datum
@@ -986,7 +994,7 @@ bitnot(PG_FUNCTION_ARGS)
p = VARBITS(arg);
r = VARBITS(result);
for (; p < VARBITEND(arg); p++)
- *r++ = ~ *p;
+ *r++ = ~*p;
/* Pad the result */
mask = BITMASK << VARBITPAD(result);
@@ -1076,8 +1084,8 @@ bitshiftright(PG_FUNCTION_ARGS)
/* Negative shift is a shift to the left */
if (shft < 0)
PG_RETURN_DATUM(DirectFunctionCall2(bitshiftleft,
- VarBitPGetDatum(arg),
- Int32GetDatum(-shft)));
+ VarBitPGetDatum(arg),
+ Int32GetDatum(-shft)));
result = (VarBit *) palloc(VARSIZE(arg));
VARATT_SIZEP(result) = VARSIZE(arg);
@@ -1121,7 +1129,7 @@ bitshiftright(PG_FUNCTION_ARGS)
}
/* This is not defined in any standard. We retain the natural ordering of
- * bits here, as it just seems more intuitive.
+ * bits here, as it just seems more intuitive.
*/
Datum
bitfromint4(PG_FUNCTION_ARGS)
@@ -1130,19 +1138,21 @@ bitfromint4(PG_FUNCTION_ARGS)
VarBit *result;
bits8 *r;
int len;
-
+
/* allocate enough space for the bits in an int4 */
- len = VARBITTOTALLEN(sizeof(int4)*BITS_PER_BYTE);
+ len = VARBITTOTALLEN(sizeof(int4) * BITS_PER_BYTE);
result = (VarBit *) palloc(len);
VARATT_SIZEP(result) = len;
- VARBITLEN(result) = sizeof(int4)*BITS_PER_BYTE;
- /* masks and shifts here are just too painful and we know that an int4 has
- * got 4 bytes
+ VARBITLEN(result) = sizeof(int4) * BITS_PER_BYTE;
+
+ /*
+ * masks and shifts here are just too painful and we know that an int4
+ * has got 4 bytes
*/
r = VARBITS(result);
- r[0] = (bits8) ((a >> (3*BITS_PER_BYTE)) & BITMASK);
- r[1] = (bits8) ((a >> (2*BITS_PER_BYTE)) & BITMASK);
- r[2] = (bits8) ((a >> (1*BITS_PER_BYTE)) & BITMASK);
+ r[0] = (bits8) ((a >> (3 * BITS_PER_BYTE)) & BITMASK);
+ r[1] = (bits8) ((a >> (2 * BITS_PER_BYTE)) & BITMASK);
+ r[2] = (bits8) ((a >> (1 * BITS_PER_BYTE)) & BITMASK);
r[3] = (bits8) (a & BITMASK);
PG_RETURN_VARBIT_P(result);
@@ -1156,7 +1166,7 @@ bittoint4(PG_FUNCTION_ARGS)
bits8 *r;
/* Check that the bit string is not too long */
- if (VARBITLEN(arg) > sizeof(int4)*BITS_PER_BYTE)
+ if (VARBITLEN(arg) > sizeof(int4) * BITS_PER_BYTE)
elog(ERROR, "Bit string is too large to fit in an int4");
result = 0;
for (r = VARBITS(arg); r < VARBITEND(arg); r++)
@@ -1179,18 +1189,18 @@ bittoint4(PG_FUNCTION_ARGS)
Datum
bitposition(PG_FUNCTION_ARGS)
{
- VarBit *substr = PG_GETARG_VARBIT_P(1);
- VarBit *arg = PG_GETARG_VARBIT_P(0);
- int substr_length,
+ VarBit *substr = PG_GETARG_VARBIT_P(1);
+ VarBit *arg = PG_GETARG_VARBIT_P(0);
+ int substr_length,
arg_length,
i,
is;
- bits8 *s, /* pointer into substring */
- *p; /* pointer into arg */
- bits8 cmp, /* shifted substring byte to compare */
- mask1, /* mask for substring byte shifted right */
- mask2, /* mask for substring byte shifted left */
- end_mask, /* pad mask for last substring byte */
+ bits8 *s, /* pointer into substring */
+ *p; /* pointer into arg */
+ bits8 cmp, /* shifted substring byte to compare */
+ mask1, /* mask for substring byte shifted right */
+ mask2, /* mask for substring byte shifted left */
+ end_mask, /* pad mask for last substring byte */
arg_mask; /* pad mask for last argument byte */
bool is_match;
@@ -1200,8 +1210,8 @@ bitposition(PG_FUNCTION_ARGS)
/* Argument has 0 length or substring longer than argument, return 0 */
if (arg_length == 0 || substr_length > arg_length)
- PG_RETURN_INT32(0);
-
+ PG_RETURN_INT32(0);
+
/* 0-length means return 1 */
if (substr_length == 0)
PG_RETURN_INT32(1);
@@ -1209,23 +1219,26 @@ bitposition(PG_FUNCTION_ARGS)
/* Initialise the padding masks */
end_mask = BITMASK << VARBITPAD(substr);
arg_mask = BITMASK << VARBITPAD(arg);
- for (i = 0; i < VARBITBYTES(arg) - VARBITBYTES(substr) + 1; i++)
+ for (i = 0; i < VARBITBYTES(arg) - VARBITBYTES(substr) + 1; i++)
{
- for (is = 0; is < BITS_PER_BYTE; is++) {
+ for (is = 0; is < BITS_PER_BYTE; is++)
+ {
is_match = true;
p = VARBITS(arg) + i;
mask1 = BITMASK >> is;
mask2 = ~mask1;
- for (s = VARBITS(substr);
- is_match && s < VARBITEND(substr); s++)
+ for (s = VARBITS(substr);
+ is_match && s < VARBITEND(substr); s++)
{
cmp = *s >> is;
- if (s == VARBITEND(substr) - 1)
+ if (s == VARBITEND(substr) - 1)
{
mask1 &= end_mask >> is;
- if (p == VARBITEND(arg) - 1) {
+ if (p == VARBITEND(arg) - 1)
+ {
/* Check that there is enough of arg left */
- if (mask1 & ~arg_mask) {
+ if (mask1 & ~arg_mask)
+ {
is_match = false;
break;
}
@@ -1237,21 +1250,24 @@ bitposition(PG_FUNCTION_ARGS)
break;
/* Move on to the next byte */
p++;
- if (p == VARBITEND(arg)) {
+ if (p == VARBITEND(arg))
+ {
mask2 = end_mask << (BITS_PER_BYTE - is);
is_match = mask2 == 0;
#if 0
- elog(NOTICE,"S. %d %d em=%2x sm=%2x r=%d",
- i,is,end_mask,mask2,is_match);
+ elog(NOTICE, "S. %d %d em=%2x sm=%2x r=%d",
+ i, is, end_mask, mask2, is_match);
#endif
break;
}
cmp = *s << (BITS_PER_BYTE - is);
- if (s == VARBITEND(substr) - 1)
+ if (s == VARBITEND(substr) - 1)
{
mask2 &= end_mask << (BITS_PER_BYTE - is);
- if (p == VARBITEND(arg) - 1) {
- if (mask2 & ~arg_mask) {
+ if (p == VARBITEND(arg) - 1)
+ {
+ if (mask2 & ~arg_mask)
+ {
is_match = false;
break;
}
@@ -1262,7 +1278,7 @@ bitposition(PG_FUNCTION_ARGS)
}
/* Have we found a match */
if (is_match)
- PG_RETURN_INT32(i*BITS_PER_BYTE + is + 1);
+ PG_RETURN_INT32(i * BITS_PER_BYTE + is + 1);
}
}
PG_RETURN_INT32(0);
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index acec5f0cf24..37eeee4206c 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/varchar.c,v 1.74 2001/02/10 02:31:27 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/varchar.c,v 1.75 2001/03/22 03:59:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,8 +64,10 @@ Datum
bpcharin(PG_FUNCTION_ARGS)
{
char *s = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
+
#endif
int32 atttypmod = PG_GETARG_INT32(2);
BpChar *result;
@@ -82,9 +84,9 @@ bpcharin(PG_FUNCTION_ARGS)
else
#ifdef MULTIBYTE
{
+
/*
- * truncate multi-byte string preserving multi-byte
- * boundary
+ * truncate multi-byte string preserving multi-byte boundary
*/
len = pg_mbcliplen(s, atttypmod - VARHDRSZ, atttypmod - VARHDRSZ);
}
@@ -169,6 +171,7 @@ bpchar(PG_FUNCTION_ARGS)
r = VARDATA(result);
#ifdef MULTIBYTE
+
/*
* truncate multi-byte string in a way not to break multi-byte
* boundary
@@ -214,18 +217,19 @@ _bpchar(PG_FUNCTION_ARGS)
{
ArrayType *v = (ArrayType *) PG_GETARG_VARLENA_P(0);
int32 len = PG_GETARG_INT32(1);
- FunctionCallInfoData locfcinfo;
+ FunctionCallInfoData locfcinfo;
+
/*
- * Since bpchar() is a built-in function, we should only need to
- * look it up once per run.
+ * Since bpchar() is a built-in function, we should only need to look
+ * it up once per run.
*/
- static FmgrInfo bpchar_finfo;
+ static FmgrInfo bpchar_finfo;
if (bpchar_finfo.fn_oid == InvalidOid)
fmgr_info(F_BPCHAR, &bpchar_finfo);
MemSet(&locfcinfo, 0, sizeof(locfcinfo));
- locfcinfo.flinfo = &bpchar_finfo;
+ locfcinfo.flinfo = &bpchar_finfo;
locfcinfo.nargs = 2;
/* We assume we are "strict" and need not worry about null inputs */
locfcinfo.arg[0] = PointerGetDatum(v);
@@ -280,7 +284,7 @@ bpchar_name(PG_FUNCTION_ARGS)
/* Truncate to max length for a Name */
if (len >= NAMEDATALEN)
- len = NAMEDATALEN-1;
+ len = NAMEDATALEN - 1;
/* Remove trailing blanks */
while (len > 0)
@@ -335,17 +339,19 @@ Datum
varcharin(PG_FUNCTION_ARGS)
{
char *s = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
+
#endif
int32 atttypmod = PG_GETARG_INT32(2);
- VarChar *result;
+ VarChar *result;
int len;
len = strlen(s) + VARHDRSZ;
if (atttypmod >= (int32) VARHDRSZ && len > atttypmod)
#ifdef MULTIBYTE
- len = pg_mbcliplen(s, len - VARHDRSZ, atttypmod - VARHDRSZ) + VARHDRSZ;
+ len = pg_mbcliplen(s, len - VARHDRSZ, atttypmod - VARHDRSZ) + VARHDRSZ;
#else
len = atttypmod; /* clip the string at max length */
#endif
@@ -364,7 +370,7 @@ varcharin(PG_FUNCTION_ARGS)
Datum
varcharout(PG_FUNCTION_ARGS)
{
- VarChar *s = PG_GETARG_VARCHAR_P(0);
+ VarChar *s = PG_GETARG_VARCHAR_P(0);
char *result;
int len;
@@ -388,9 +394,9 @@ varcharout(PG_FUNCTION_ARGS)
Datum
varchar(PG_FUNCTION_ARGS)
{
- VarChar *s = PG_GETARG_VARCHAR_P(0);
+ VarChar *s = PG_GETARG_VARCHAR_P(0);
int32 slen = PG_GETARG_INT32(1);
- VarChar *result;
+ VarChar *result;
int len;
len = VARSIZE(s);
@@ -402,8 +408,7 @@ varchar(PG_FUNCTION_ARGS)
#ifdef MULTIBYTE
/*
- * truncate multi-byte string preserving multi-byte
- * boundary
+ * truncate multi-byte string preserving multi-byte boundary
*/
len = pg_mbcliplen(VARDATA(s), slen - VARHDRSZ, slen - VARHDRSZ);
slen = len + VARHDRSZ;
@@ -427,18 +432,19 @@ _varchar(PG_FUNCTION_ARGS)
{
ArrayType *v = (ArrayType *) PG_GETARG_VARLENA_P(0);
int32 len = PG_GETARG_INT32(1);
- FunctionCallInfoData locfcinfo;
+ FunctionCallInfoData locfcinfo;
+
/*
- * Since varchar() is a built-in function, we should only need to
- * look it up once per run.
+ * Since varchar() is a built-in function, we should only need to look
+ * it up once per run.
*/
- static FmgrInfo varchar_finfo;
+ static FmgrInfo varchar_finfo;
if (varchar_finfo.fn_oid == InvalidOid)
fmgr_info(F_VARCHAR, &varchar_finfo);
MemSet(&locfcinfo, 0, sizeof(locfcinfo));
- locfcinfo.flinfo = &varchar_finfo;
+ locfcinfo.flinfo = &varchar_finfo;
locfcinfo.nargs = 2;
/* We assume we are "strict" and need not worry about null inputs */
locfcinfo.arg[0] = PointerGetDatum(v);
@@ -468,6 +474,7 @@ Datum
bpcharlen(PG_FUNCTION_ARGS)
{
BpChar *arg = PG_GETARG_BPCHAR_P(0);
+
#ifdef MULTIBYTE
unsigned char *s;
int len,
@@ -656,7 +663,7 @@ bpcharcmp(PG_FUNCTION_ARGS)
/*
* bpchar needs a specialized hash function because we want to ignore
- * trailing blanks in comparisons. (varchar can use plain hashvarlena.)
+ * trailing blanks in comparisons. (varchar can use plain hashvarlena.)
*/
Datum
hashbpchar(PG_FUNCTION_ARGS)
@@ -685,7 +692,8 @@ hashbpchar(PG_FUNCTION_ARGS)
Datum
varcharlen(PG_FUNCTION_ARGS)
{
- VarChar *arg = PG_GETARG_VARCHAR_P(0);
+ VarChar *arg = PG_GETARG_VARCHAR_P(0);
+
#ifdef MULTIBYTE
unsigned char *s;
int len,
@@ -711,7 +719,7 @@ varcharlen(PG_FUNCTION_ARGS)
Datum
varcharoctetlen(PG_FUNCTION_ARGS)
{
- VarChar *arg = PG_GETARG_VARCHAR_P(0);
+ VarChar *arg = PG_GETARG_VARCHAR_P(0);
PG_RETURN_INT32(VARSIZE(arg) - VARHDRSZ);
}
@@ -728,8 +736,8 @@ varcharoctetlen(PG_FUNCTION_ARGS)
Datum
varchareq(PG_FUNCTION_ARGS)
{
- VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
- VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
+ VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
+ VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
int len1,
len2;
bool result;
@@ -751,8 +759,8 @@ varchareq(PG_FUNCTION_ARGS)
Datum
varcharne(PG_FUNCTION_ARGS)
{
- VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
- VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
+ VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
+ VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
int len1,
len2;
bool result;
@@ -774,8 +782,8 @@ varcharne(PG_FUNCTION_ARGS)
Datum
varcharlt(PG_FUNCTION_ARGS)
{
- VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
- VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
+ VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
+ VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
int len1,
len2;
int cmp;
@@ -794,8 +802,8 @@ varcharlt(PG_FUNCTION_ARGS)
Datum
varcharle(PG_FUNCTION_ARGS)
{
- VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
- VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
+ VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
+ VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
int len1,
len2;
int cmp;
@@ -814,8 +822,8 @@ varcharle(PG_FUNCTION_ARGS)
Datum
varchargt(PG_FUNCTION_ARGS)
{
- VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
- VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
+ VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
+ VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
int len1,
len2;
int cmp;
@@ -834,8 +842,8 @@ varchargt(PG_FUNCTION_ARGS)
Datum
varcharge(PG_FUNCTION_ARGS)
{
- VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
- VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
+ VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
+ VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
int len1,
len2;
int cmp;
@@ -854,8 +862,8 @@ varcharge(PG_FUNCTION_ARGS)
Datum
varcharcmp(PG_FUNCTION_ARGS)
{
- VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
- VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
+ VarChar *arg1 = PG_GETARG_VARCHAR_P(0);
+ VarChar *arg2 = PG_GETARG_VARCHAR_P(1);
int len1,
len2;
int cmp;
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index ba1ed5350ed..efa6ab65527 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/varlena.c,v 1.68 2001/02/10 02:31:27 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/varlena.c,v 1.69 2001/03/22 03:59:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -202,6 +202,7 @@ Datum
textlen(PG_FUNCTION_ARGS)
{
text *t = PG_GETARG_TEXT_P(0);
+
#ifdef MULTIBYTE
unsigned char *s;
int len,
@@ -236,10 +237,10 @@ textlen(PG_FUNCTION_ARGS)
Datum
textoctetlen(PG_FUNCTION_ARGS)
{
- struct varattrib *t = (struct varattrib *) PG_GETARG_RAW_VARLENA_P(0);
+ struct varattrib *t = (struct varattrib *) PG_GETARG_RAW_VARLENA_P(0);
if (!VARATT_IS_EXTERNAL(t))
- PG_RETURN_INT32(VARATT_SIZE(t) - VARHDRSZ);
+ PG_RETURN_INT32(VARATT_SIZE(t) - VARHDRSZ);
PG_RETURN_INT32(t->va_content.va_external.va_extsize);
}
@@ -320,9 +321,11 @@ text_substr(PG_FUNCTION_ARGS)
int32 n = PG_GETARG_INT32(2);
text *ret;
int len;
+
#ifdef MULTIBYTE
int i;
char *p;
+
#endif
len = VARSIZE(string) - VARHDRSZ;
@@ -392,9 +395,11 @@ textpos(PG_FUNCTION_ARGS)
len2;
pg_wchar *p1,
*p2;
+
#ifdef MULTIBYTE
pg_wchar *ps1,
*ps2;
+
#endif
if (VARSIZE(t2) <= VARHDRSZ)
@@ -843,7 +848,7 @@ text_name(PG_FUNCTION_ARGS)
/* Truncate oversize input */
if (len >= NAMEDATALEN)
- len = NAMEDATALEN-1;
+ len = NAMEDATALEN - 1;
#ifdef STRINGDEBUG
printf("text- convert string length %d (%d) ->%d\n",
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 1bedd565d03..f843f2bb166 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.76 2001/02/22 18:39:19 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.77 2001/03/22 03:59:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,9 +31,9 @@
static void CatCacheRemoveCTup(CatCache *cache, CatCTup *ct);
static Index CatalogCacheComputeHashIndex(CatCache *cache,
- ScanKey cur_skey);
+ ScanKey cur_skey);
static Index CatalogCacheComputeTupleHashIndex(CatCache *cache,
- HeapTuple tuple);
+ HeapTuple tuple);
static void CatalogCacheInitializeCache(CatCache *cache);
static Datum cc_hashname(PG_FUNCTION_ARGS);
@@ -88,8 +88,8 @@ GetCCHashFunc(Oid keytype)
{
switch (keytype)
{
- case BOOLOID:
- case CHAROID:
+ case BOOLOID:
+ case CHAROID:
return hashchar;
case NAMEOID:
return cc_hashname;
@@ -125,7 +125,7 @@ cc_hashname(PG_FUNCTION_ARGS)
*/
NameData my_n;
- namestrcpy(&my_n, NameStr(* PG_GETARG_NAME(0)));
+ namestrcpy(&my_n, NameStr(*PG_GETARG_NAME(0)));
return DirectFunctionCall1(hashname, NameGetDatum(&my_n));
}
@@ -141,15 +141,17 @@ cc_hashname(PG_FUNCTION_ARGS)
void
CreateCacheMemoryContext(void)
{
- /* Purely for paranoia, check that context doesn't exist;
- * caller probably did so already.
+
+ /*
+ * Purely for paranoia, check that context doesn't exist; caller
+ * probably did so already.
*/
if (!CacheMemoryContext)
CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
"CacheMemoryContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
@@ -157,7 +159,7 @@ CreateCacheMemoryContext(void)
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
+ * descriptor and set up the hash and equality function links. We assume
* that the relcache entry can be opened at this point!
*
*/
@@ -201,8 +203,8 @@ CatalogCacheInitializeCache(CatCache *cache)
Assert(RelationIsValid(relation));
/*
- * switch to the cache context so our allocations
- * do not vanish at the end of a transaction
+ * switch to the cache context so our allocations do not vanish at the
+ * end of a transaction
*
*/
if (!CacheMemoryContext)
@@ -211,13 +213,13 @@ CatalogCacheInitializeCache(CatCache *cache)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * copy the relcache's tuple descriptor to permanent cache storage
+ * copy the relcache's tuple descriptor to permanent cache storage
*
*/
tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
/*
- * return to the caller's memory context and close the rel
+ * return to the caller's memory context and close the rel
*
*/
MemoryContextSwitchTo(oldcxt);
@@ -228,7 +230,7 @@ CatalogCacheInitializeCache(CatCache *cache)
cache->cc_relname, cache->cc_nkeys);
/*
- * initialize cache's key information
+ * initialize cache's key information
*
*/
for (i = 0; i < cache->cc_nkeys; ++i)
@@ -238,9 +240,7 @@ CatalogCacheInitializeCache(CatCache *cache)
CatalogCacheInitializeCache_DEBUG2;
if (cache->cc_key[i] > 0)
- {
keytype = tupdesc->attrs[cache->cc_key[i] - 1]->atttypid;
- }
else
{
if (cache->cc_key[i] != ObjectIdAttributeNumber)
@@ -249,6 +249,7 @@ CatalogCacheInitializeCache(CatCache *cache)
}
cache->cc_hashfunc[i] = GetCCHashFunc(keytype);
+
/*
* If GetCCHashFunc liked the type, safe to index into eqproc[]
*/
@@ -268,7 +269,7 @@ CatalogCacheInitializeCache(CatCache *cache)
}
/*
- * mark this cache fully initialized
+ * mark this cache fully initialized
*
*/
cache->cc_tupdesc = tupdesc;
@@ -293,22 +294,22 @@ CatalogCacheComputeHashIndex(CatCache *cache, ScanKey cur_skey)
case 4:
hashIndex ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
- cur_skey[3].sk_argument)) << 9;
+ cur_skey[3].sk_argument)) << 9;
/* FALLTHROUGH */
case 3:
hashIndex ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
- cur_skey[2].sk_argument)) << 6;
+ cur_skey[2].sk_argument)) << 6;
/* FALLTHROUGH */
case 2:
hashIndex ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
- cur_skey[1].sk_argument)) << 3;
+ cur_skey[1].sk_argument)) << 3;
/* FALLTHROUGH */
case 1:
hashIndex ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
- cur_skey[0].sk_argument));
+ cur_skey[0].sk_argument));
break;
default:
elog(FATAL, "CCComputeHashIndex: %d cc_nkeys", cache->cc_nkeys);
@@ -422,7 +423,7 @@ CatalogCacheIdInvalidate(int cacheId,
CatCache *ccp;
/*
- * sanity checks
+ * sanity checks
*
*/
Assert(hashIndex < NCCBUCK);
@@ -430,7 +431,7 @@ CatalogCacheIdInvalidate(int cacheId,
CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
/*
- * inspect caches to find the proper cache
+ * inspect caches to find the proper cache
*
*/
for (ccp = Caches; ccp; ccp = ccp->cc_next)
@@ -440,8 +441,9 @@ CatalogCacheIdInvalidate(int cacheId,
if (cacheId != ccp->id)
continue;
+
/*
- * inspect the hash bucket until we find a match or exhaust
+ * inspect the hash bucket until we find a match or exhaust
*
*/
for (elt = DLGetHead(&ccp->cc_cache[hashIndex]); elt; elt = nextelt)
@@ -491,7 +493,7 @@ CatalogCacheIdInvalidate(int cacheId,
void
AtEOXact_CatCache(bool isCommit)
{
- CatCache *cache;
+ CatCache *cache;
for (cache = Caches; cache; cache = cache->cc_next)
{
@@ -530,7 +532,7 @@ AtEOXact_CatCache(bool isCommit)
void
ResetSystemCache(void)
{
- CatCache *cache;
+ CatCache *cache;
CACHE1_elog(DEBUG, "ResetSystemCache called");
@@ -617,7 +619,7 @@ do { \
#define InitCatCache_DEBUG1
#endif
-CatCache *
+CatCache *
InitCatCache(int id,
char *relname,
char *indname,
@@ -629,8 +631,8 @@ InitCatCache(int id,
int i;
/*
- * first switch to the cache context so our allocations
- * do not vanish at the end of a transaction
+ * first switch to the cache context so our allocations do not vanish
+ * at the end of a transaction
*
*/
if (!CacheMemoryContext)
@@ -639,15 +641,15 @@ InitCatCache(int id,
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * allocate a new cache structure
+ * allocate a new cache structure
*
*/
cp = (CatCache *) palloc(sizeof(CatCache));
MemSet((char *) cp, 0, sizeof(CatCache));
/*
- * initialize the cache buckets (each bucket is a list header)
- * and the LRU tuple list
+ * initialize the cache buckets (each bucket is a list header) and the
+ * LRU tuple list
*
*/
DLInitList(&cp->cc_lrulist);
@@ -655,17 +657,17 @@ InitCatCache(int id,
DLInitList(&cp->cc_cache[i]);
/*
- * Caches is the pointer to the head of the list of all the
- * system caches. here we add the new cache to the top of the list.
+ * Caches is the pointer to the head of the list of all the system
+ * caches. here we add the new cache to the top of the list.
*
*/
cp->cc_next = Caches; /* list of caches (single link) */
Caches = cp;
/*
- * initialize the cache's relation information for the relation
- * corresponding to this cache, and initialize some of the new
- * cache's other internal fields. But don't open the relation yet.
+ * initialize the cache's relation information for the relation
+ * corresponding to this cache, and initialize some of the new cache's
+ * other internal fields. But don't open the relation yet.
*
*/
cp->cc_relname = relname;
@@ -679,14 +681,14 @@ InitCatCache(int id,
cp->cc_key[i] = key[i];
/*
- * all done. new cache is initialized. print some debugging
- * information, if appropriate.
+ * all done. new cache is initialized. print some debugging
+ * information, if appropriate.
*
*/
InitCatCache_DEBUG1;
/*
- * back to the old context before we return...
+ * back to the old context before we return...
*
*/
MemoryContextSwitchTo(oldcxt);
@@ -774,14 +776,14 @@ SearchCatCache(CatCache *cache,
MemoryContext oldcxt;
/*
- * one-time startup overhead
+ * one-time startup overhead
*
*/
if (cache->cc_tupdesc == NULL)
CatalogCacheInitializeCache(cache);
/*
- * initialize the search key information
+ * initialize the search key information
*
*/
memcpy(cur_skey, cache->cc_skey, sizeof(cur_skey));
@@ -791,13 +793,13 @@ SearchCatCache(CatCache *cache,
cur_skey[3].sk_argument = v4;
/*
- * find the hash bucket in which to look for the tuple
+ * find the hash bucket in which to look for the tuple
*
*/
hash = CatalogCacheComputeHashIndex(cache, cur_skey);
/*
- * scan the hash bucket until we find a match or exhaust our tuples
+ * scan the hash bucket until we find a match or exhaust our tuples
*
*/
for (elt = DLGetHead(&cache->cc_cache[hash]);
@@ -812,8 +814,8 @@ SearchCatCache(CatCache *cache,
continue; /* ignore dead entries */
/*
- * see if the cached tuple matches our key.
- * (should we be worried about time ranges? -cim 10/2/90)
+ * see if the cached tuple matches our key. (should we be worried
+ * about time ranges? -cim 10/2/90)
*
*/
HeapKeyTest(&ct->tuple,
@@ -821,16 +823,16 @@ SearchCatCache(CatCache *cache,
cache->cc_nkeys,
cur_skey,
res);
- if (! res)
+ if (!res)
continue;
/*
- * we found a tuple in the cache: bump its refcount, move it to
- * the front of the LRU list, and return it. We also move it
- * to the front of the list for its hashbucket, in order to speed
- * subsequent searches. (The most frequently accessed elements
- * in any hashbucket will tend to be near the front of the
- * hashbucket's list.)
+ * we found a tuple in the cache: bump its refcount, move it to
+ * the front of the LRU list, and return it. We also move it to
+ * the front of the list for its hashbucket, in order to speed
+ * subsequent searches. (The most frequently accessed elements in
+ * any hashbucket will tend to be near the front of the
+ * hashbucket's list.)
*
*/
ct->refcount++;
@@ -847,31 +849,30 @@ SearchCatCache(CatCache *cache,
}
/*
- * Tuple was not found in cache, so we have to try and
- * retrieve it directly from the relation. If it's found,
- * we add it to the cache.
+ * Tuple was not found in cache, so we have to try and retrieve it
+ * directly from the relation. If it's found, we add it to the cache.
*
- * NOTE: it is possible for recursive cache lookups to occur while
- * reading the relation --- for example, due to shared-cache-inval
- * messages being processed during heap_open(). This is OK. It's
- * even possible for one of those lookups to find and enter the
- * very same tuple we are trying to fetch here. If that happens,
- * we will enter a second copy of the tuple into the cache. The
- * first copy will never be referenced again, and will eventually
- * age out of the cache, so there's no functional problem. This case
- * is rare enough that it's not worth expending extra cycles to detect.
+ * NOTE: it is possible for recursive cache lookups to occur while
+ * reading the relation --- for example, due to shared-cache-inval
+ * messages being processed during heap_open(). This is OK. It's
+ * even possible for one of those lookups to find and enter the very
+ * same tuple we are trying to fetch here. If that happens, we will
+ * enter a second copy of the tuple into the cache. The first copy
+ * will never be referenced again, and will eventually age out of the
+ * cache, so there's no functional problem. This case is rare enough
+ * that it's not worth expending extra cycles to detect.
*
*/
/*
- * open the relation associated with the cache
+ * open the relation associated with the cache
*
*/
relation = heap_openr(cache->cc_relname, AccessShareLock);
/*
- * Scan the relation to find the tuple. If there's an index, and
- * if it's safe to do so, use the index. Else do a heap scan.
+ * Scan the relation to find the tuple. If there's an index, and if
+ * it's safe to do so, use the index. Else do a heap scan.
*
*/
ct = NULL;
@@ -891,13 +892,13 @@ SearchCatCache(CatCache *cache,
cache->cc_relname);
/*
- * For an index scan, sk_attno has to be set to the index attribute
- * number(s), not the heap attribute numbers. We assume that the
- * index corresponds exactly to the cache keys (or its first N
- * keys do, anyway).
+ * For an index scan, sk_attno has to be set to the index
+ * attribute number(s), not the heap attribute numbers. We assume
+ * that the index corresponds exactly to the cache keys (or its
+ * first N keys do, anyway).
*/
for (i = 0; i < cache->cc_nkeys; ++i)
- cur_skey[i].sk_attno = i+1;
+ cur_skey[i].sk_attno = i + 1;
idesc = index_openr(cache->cc_indname);
isd = index_beginscan(idesc, false, cache->cc_nkeys, cur_skey);
@@ -948,21 +949,21 @@ SearchCatCache(CatCache *cache,
}
/*
- * close the relation
+ * close the relation
*
*/
heap_close(relation, AccessShareLock);
/*
- * scan is complete. if tup was found, we can add it to the cache.
+ * scan is complete. if tup was found, we can add it to the cache.
*
*/
if (ct == NULL)
return NULL;
/*
- * Finish initializing the CatCTup header, and add it to the
- * linked lists.
+ * Finish initializing the CatCTup header, and add it to the linked
+ * lists.
*
*/
CACHE1_elog(DEBUG, "SearchCatCache: found tuple");
@@ -977,8 +978,8 @@ SearchCatCache(CatCache *cache,
DLAddHead(&cache->cc_cache[hash], &ct->cache_elem);
/*
- * If we've exceeded the desired size of this cache,
- * try to throw away the least recently used entry.
+ * If we've exceeded the desired size of this cache, try to throw away
+ * the least recently used entry.
*
*/
if (++cache->cc_ntup > cache->cc_maxtup)
@@ -1022,7 +1023,7 @@ SearchCatCache(CatCache *cache,
void
ReleaseCatCache(HeapTuple tuple)
{
- CatCTup *ct = (CatCTup *) (((char *) tuple) -
+ CatCTup *ct = (CatCTup *) (((char *) tuple) -
offsetof(CatCTup, tuple));
/* Safety checks to ensure we were handed a cache entry */
@@ -1038,9 +1039,9 @@ ReleaseCatCache(HeapTuple tuple)
)
{
/* We can find the associated cache using the dllist pointers */
- Dllist *lru = DLGetListHdr(&ct->lrulist_elem);
- CatCache *cache = (CatCache *) (((char *) lru) -
- offsetof(CatCache, cc_lrulist));
+ Dllist *lru = DLGetListHdr(&ct->lrulist_elem);
+ CatCache *cache = (CatCache *) (((char *) lru) -
+ offsetof(CatCache, cc_lrulist));
CatCacheRemoveCTup(cache, ct);
}
@@ -1061,7 +1062,7 @@ ReleaseCatCache(HeapTuple tuple)
* the specified relation, find all catcaches it could be in, compute the
* correct hashindex for each such catcache, and call the specified function
* to record the cache id, hashindex, and tuple ItemPointer in inval.c's
- * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
+ * lists. CatalogCacheIdInvalidate will be called later, if appropriate,
* using the recorded information.
*
* Note that it is irrelevant whether the given tuple is actually loaded
@@ -1082,7 +1083,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
CatCache *ccp;
/*
- * sanity checks
+ * sanity checks
*
*/
Assert(RelationIsValid(relation));
diff --git a/src/backend/utils/cache/fcache.c b/src/backend/utils/cache/fcache.c
index 8c246a1a9fd..91bea5cfc71 100644
--- a/src/backend/utils/cache/fcache.c
+++ b/src/backend/utils/cache/fcache.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/Attic/fcache.c,v 1.38 2001/01/24 19:43:14 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/Attic/fcache.c,v 1.39 2001/03/22 03:59:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,10 @@ init_fcache(Oid foid, int nargs, MemoryContext fcacheCxt)
if (nargs > FUNC_MAX_ARGS)
elog(ERROR, "init_fcache: too many arguments");
- /* If function returns set, prepare a resultinfo node for communication */
+ /*
+ * If function returns set, prepare a resultinfo node for
+ * communication
+ */
if (retval->func.fn_retset)
{
retval->fcinfo.resultinfo = (Node *) &(retval->rsinfo);
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 8d15188aaf0..bb3c2822232 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -25,7 +25,7 @@
* so that they can flush obsolete entries from their caches.
*
* We do not need to register EVERY tuple operation in this way, just those
- * on tuples in relations that have associated catcaches. Also, whenever
+ * on tuples in relations that have associated catcaches. Also, whenever
* we see an operation on a pg_class or pg_attribute tuple, we register
* a relcache flush operation for the relation described by that tuple.
*
@@ -34,7 +34,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/inval.c,v 1.41 2001/02/22 18:39:19 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/inval.c,v 1.42 2001/03/22 03:59:57 momjian Exp $
*
* Note - this code is real crufty... badly needs a rewrite to improve
* readability and portability. (Shouldn't assume Oid == Index, for example)
@@ -136,8 +136,8 @@ static LocalInvalid RollbackStack = EmptyLocalInvalid;
static InvalidationEntry InvalidationEntryAllocate(uint16 size);
static void LocalInvalidInvalidate(LocalInvalid invalid,
- void (*function) (InvalidationMessage),
- bool freemember);
+ void (*function) (InvalidationMessage),
+ bool freemember);
static LocalInvalid LocalInvalidRegister(LocalInvalid invalid,
InvalidationEntry entry);
static void DiscardInvalidStack(LocalInvalid *invalid);
@@ -269,14 +269,14 @@ CacheIdRegisterSpecifiedLocalInvalid(LocalInvalid invalid,
InvalidationMessage message;
/*
- * debugging stuff
+ * debugging stuff
*
*/
CacheIdRegisterSpecifiedLocalInvalid_DEBUG1;
/*
- * create a message describing the system catalog tuple
- * we wish to invalidate.
+ * create a message describing the system catalog tuple we wish to
+ * invalidate.
*
*/
message = (InvalidationMessage)
@@ -289,7 +289,7 @@ CacheIdRegisterSpecifiedLocalInvalid(LocalInvalid invalid,
ItemPointerCopy(pointer, &message->any.catalog.pointerData);
/*
- * Add message to linked list of unprocessed messages.
+ * Add message to linked list of unprocessed messages.
*
*/
invalid = LocalInvalidRegister(invalid, (InvalidationEntry) message);
@@ -305,20 +305,22 @@ CacheIdRegisterLocalInvalid(int cacheId,
Index hashIndex,
ItemPointer pointer)
{
+
/*
- * debugging stuff
+ * debugging stuff
*
*/
CacheIdRegisterLocalInvalid_DEBUG1;
/*
- * Add message to InvalidForall linked list.
+ * Add message to InvalidForall linked list.
*
*/
InvalidForall = CacheIdRegisterSpecifiedLocalInvalid(InvalidForall,
cacheId, hashIndex, pointer);
+
/*
- * Add message to InvalidLocal linked list.
+ * Add message to InvalidLocal linked list.
*
*/
InvalidLocal = CacheIdRegisterSpecifiedLocalInvalid(InvalidLocal,
@@ -336,13 +338,13 @@ CacheIdRegisterLocalRollback(int cacheId,
{
/*
- * debugging stuff
+ * debugging stuff
*
*/
CacheIdRegisterLocalRollback_DEBUG1;
/*
- * Add message to RollbackStack linked list.
+ * Add message to RollbackStack linked list.
*
*/
RollbackStack = CacheIdRegisterSpecifiedLocalInvalid(
@@ -360,7 +362,7 @@ RelationIdRegisterSpecifiedLocalInvalid(LocalInvalid invalid,
InvalidationMessage message;
/*
- * debugging stuff
+ * debugging stuff
*
*/
#ifdef INVALIDDEBUG
@@ -369,8 +371,8 @@ RelationIdRegisterSpecifiedLocalInvalid(LocalInvalid invalid,
#endif /* defined(INVALIDDEBUG) */
/*
- * create a message describing the relation descriptor
- * we wish to invalidate.
+ * create a message describing the relation descriptor we wish to
+ * invalidate.
*
*/
message = (InvalidationMessage)
@@ -381,7 +383,7 @@ RelationIdRegisterSpecifiedLocalInvalid(LocalInvalid invalid,
message->any.relation.objectId = objectId;
/*
- * Add message to linked list of unprocessed messages.
+ * Add message to linked list of unprocessed messages.
*
*/
invalid = LocalInvalidRegister(invalid, (InvalidationEntry) message);
@@ -395,8 +397,9 @@ RelationIdRegisterSpecifiedLocalInvalid(LocalInvalid invalid,
static void
RelationIdRegisterLocalInvalid(Oid relationId, Oid objectId)
{
+
/*
- * debugging stuff
+ * debugging stuff
*
*/
#ifdef INVALIDDEBUG
@@ -405,13 +408,14 @@ RelationIdRegisterLocalInvalid(Oid relationId, Oid objectId)
#endif /* defined(INVALIDDEBUG) */
/*
- * Add message to InvalidForall linked list.
+ * Add message to InvalidForall linked list.
*
*/
InvalidForall = RelationIdRegisterSpecifiedLocalInvalid(InvalidForall,
relationId, objectId);
+
/*
- * Add message to InvalidLocal linked list.
+ * Add message to InvalidLocal linked list.
*
*/
InvalidLocal = RelationIdRegisterSpecifiedLocalInvalid(InvalidLocal,
@@ -427,7 +431,7 @@ RelationIdRegisterLocalRollback(Oid relationId, Oid objectId)
{
/*
- * debugging stuff
+ * debugging stuff
*
*/
#ifdef INVALIDDEBUG
@@ -436,7 +440,7 @@ RelationIdRegisterLocalRollback(Oid relationId, Oid objectId)
#endif /* defined(INVALIDDEBUG) */
/*
- * Add message to RollbackStack linked list.
+ * Add message to RollbackStack linked list.
*
*/
RollbackStack = RelationIdRegisterSpecifiedLocalInvalid(
@@ -464,9 +468,10 @@ CacheIdInvalidate(Index cacheId,
Index hashIndex,
ItemPointer pointer)
{
+
/*
- * assume that if the item pointer is valid, then we are
- * invalidating an item in the specified system catalog cache.
+ * assume that if the item pointer is valid, then we are invalidating
+ * an item in the specified system catalog cache.
*
*/
if (ItemPointerIsValid(pointer))
@@ -478,8 +483,8 @@ CacheIdInvalidate(Index cacheId,
CacheIdInvalidate_DEBUG1;
/*
- * if the cacheId is the oid of any of the following system relations,
- * then assume we are invalidating a relation descriptor
+ * if the cacheId is the oid of any of the following system relations,
+ * then assume we are invalidating a relation descriptor
*
*/
if (cacheId == RelOid_pg_class)
@@ -495,7 +500,7 @@ CacheIdInvalidate(Index cacheId,
}
/*
- * Yow! the caller asked us to invalidate something else.
+ * Yow! the caller asked us to invalidate something else.
*
*/
elog(FATAL, "CacheIdInvalidate: cacheId=%d relation id?", cacheId);
@@ -630,13 +635,13 @@ PrepareToInvalidateRelationCache(Relation relation,
Oid objectId;
/*
- * get the relation object id
+ * get the relation object id
*
*/
relationId = RelationGetRelid(relation);
/*
- * is it one of the ones we need to send an SI message for?
+ * is it one of the ones we need to send an SI message for?
*
*/
if (relationId == RelOid_pg_class)
@@ -647,7 +652,7 @@ PrepareToInvalidateRelationCache(Relation relation,
return;
/*
- * register the relcache-invalidation action in the appropriate list
+ * register the relcache-invalidation action in the appropriate list
*
*/
Assert(PointerIsValid(function));
@@ -666,8 +671,9 @@ PrepareToInvalidateRelationCache(Relation relation,
void
DiscardInvalid(void)
{
+
/*
- * debugging stuff
+ * debugging stuff
*
*/
#ifdef INVALIDDEBUG
@@ -690,7 +696,7 @@ RegisterInvalid(bool send)
LocalInvalid invalid;
/*
- * debugging stuff
+ * debugging stuff
*
*/
#ifdef INVALIDDEBUG
@@ -698,7 +704,7 @@ RegisterInvalid(bool send)
#endif /* defined(INVALIDDEBUG) */
/*
- * Process and free the current list of inval messages.
+ * Process and free the current list of inval messages.
*
*/
@@ -734,7 +740,7 @@ ImmediateLocalInvalidation(bool send)
LocalInvalid invalid;
/*
- * debugging stuff
+ * debugging stuff
*
*/
#ifdef INVALIDDEBUG
@@ -742,7 +748,7 @@ ImmediateLocalInvalidation(bool send)
#endif /* defined(INVALIDDEBUG) */
/*
- * Process and free the local list of inval messages.
+ * Process and free the local list of inval messages.
*
*/
@@ -792,8 +798,9 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple,
void (*RelationIdRegisterFunc) (Oid, Oid),
const char *funcname)
{
+
/*
- * sanity checks
+ * sanity checks
*
*/
Assert(RelationIsValid(relation));
@@ -803,16 +810,16 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple,
return;
/*
- * We only need to worry about invalidation for tuples that are in
- * system relations; user-relation tuples are never in catcaches
- * and can't affect the relcache either.
+ * We only need to worry about invalidation for tuples that are in
+ * system relations; user-relation tuples are never in catcaches and
+ * can't affect the relcache either.
*
*/
if (!IsSystemRelationName(NameStr(RelationGetForm(relation)->relname)))
return;
/*
- * debugging stuff
+ * debugging stuff
*
*/
PrepareForTupleInvalidation_DEBUG1;
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 4e04cfea7c4..4882094bee1 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/lsyscache.c,v 1.50 2001/01/24 19:43:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/lsyscache.c,v 1.51 2001/03/22 03:59:57 momjian Exp $
*
* NOTES
* Eventually, the index information should go through here, too.
@@ -62,7 +62,7 @@ get_attname(Oid relid, AttrNumber attnum)
if (HeapTupleIsValid(tp))
{
Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp);
- char *result;
+ char *result;
result = pstrdup(NameStr(att_tup->attname));
ReleaseSysCache(tp);
@@ -118,7 +118,7 @@ get_atttype(Oid relid, AttrNumber attnum)
if (HeapTupleIsValid(tp))
{
Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp);
- Oid result;
+ Oid result;
result = att_tup->atttypid;
ReleaseSysCache(tp);
@@ -144,7 +144,7 @@ get_attisset(Oid relid, char *attname)
if (HeapTupleIsValid(tp))
{
Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp);
- bool result;
+ bool result;
result = att_tup->attisset;
ReleaseSysCache(tp);
@@ -172,7 +172,7 @@ get_atttypmod(Oid relid, AttrNumber attnum)
if (HeapTupleIsValid(tp))
{
Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp);
- int32 result;
+ int32 result;
result = att_tup->atttypmod;
ReleaseSysCache(tp);
@@ -306,7 +306,7 @@ get_opcode(Oid opno)
if (HeapTupleIsValid(tp))
{
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
- RegProcedure result;
+ RegProcedure result;
result = optup->oprcode;
ReleaseSysCache(tp);
@@ -333,7 +333,7 @@ get_opname(Oid opno)
if (HeapTupleIsValid(tp))
{
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
- char *result;
+ char *result;
result = pstrdup(NameStr(optup->oprname));
ReleaseSysCache(tp);
@@ -412,7 +412,7 @@ op_hashjoinable(Oid opno, Oid ltype, Oid rtype)
bool
op_iscachable(Oid opno)
{
- RegProcedure funcid = get_opcode(opno);
+ RegProcedure funcid = get_opcode(opno);
if (funcid == (RegProcedure) InvalidOid)
elog(ERROR, "Operator OID %u does not exist", opno);
@@ -436,7 +436,7 @@ get_commutator(Oid opno)
if (HeapTupleIsValid(tp))
{
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
- Oid result;
+ Oid result;
result = optup->oprcom;
ReleaseSysCache(tp);
@@ -462,7 +462,7 @@ get_negator(Oid opno)
if (HeapTupleIsValid(tp))
{
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
- Oid result;
+ Oid result;
result = optup->oprnegate;
ReleaseSysCache(tp);
@@ -488,7 +488,7 @@ get_oprrest(Oid opno)
if (HeapTupleIsValid(tp))
{
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
- RegProcedure result;
+ RegProcedure result;
result = optup->oprrest;
ReleaseSysCache(tp);
@@ -514,7 +514,7 @@ get_oprjoin(Oid opno)
if (HeapTupleIsValid(tp))
{
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tp);
- RegProcedure result;
+ RegProcedure result;
result = optup->oprjoin;
ReleaseSysCache(tp);
@@ -587,7 +587,7 @@ get_relnatts(Oid relid)
if (HeapTupleIsValid(tp))
{
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
- int result;
+ int result;
result = reltup->relnatts;
ReleaseSysCache(tp);
@@ -596,6 +596,7 @@ get_relnatts(Oid relid)
else
return InvalidAttrNumber;
}
+
#endif
/*
@@ -616,7 +617,7 @@ get_rel_name(Oid relid)
if (HeapTupleIsValid(tp))
{
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
- char *result;
+ char *result;
result = pstrdup(NameStr(reltup->relname));
ReleaseSysCache(tp);
@@ -644,7 +645,7 @@ get_typlen(Oid typid)
if (HeapTupleIsValid(tp))
{
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
- int16 result;
+ int16 result;
result = typtup->typlen;
ReleaseSysCache(tp);
@@ -671,7 +672,7 @@ get_typbyval(Oid typid)
if (HeapTupleIsValid(tp))
{
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
- bool result;
+ bool result;
result = typtup->typbyval;
ReleaseSysCache(tp);
@@ -687,7 +688,7 @@ get_typbyval(Oid typid)
* A two-fer: given the type OID, return both typlen and typbyval.
*
* Since both pieces of info are needed to know how to copy a Datum,
- * many places need both. Might as well get them with one cache lookup
+ * many places need both. Might as well get them with one cache lookup
* instead of two. Also, this routine raises an error instead of
* returning a bogus value when given a bad type OID.
*/
@@ -720,7 +721,7 @@ get_typalign(Oid typid)
if (HeapTupleIsValid(tp))
{
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
- char result;
+ char result;
result = typtup->typalign;
ReleaseSysCache(tp);
@@ -743,7 +744,7 @@ get_typstorage(Oid typid)
if (HeapTupleIsValid(tp))
{
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
- char result;
+ char result;
result = typtup->typstorage;
ReleaseSysCache(tp);
@@ -864,7 +865,7 @@ get_typtype(Oid typid)
if (HeapTupleIsValid(tp))
{
Form_pg_type typtup = (Form_pg_type) GETSTRUCT(tp);
- char result;
+ char result;
result = typtup->typtype;
ReleaseSysCache(tp);
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index a2a539f5abf..fc97f46910a 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.128 2001/02/22 18:39:19 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/relcache.c,v 1.129 2001/03/22 03:59:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -135,7 +135,7 @@ typedef struct relidcacheent
typedef struct relnodecacheent
{
- RelFileNode relnode;
+ RelFileNode relnode;
Relation reldesc;
} RelNodeCacheEnt;
@@ -250,9 +250,11 @@ do { \
/* non-export function prototypes */
static void RelationClearRelation(Relation relation, bool rebuildIt);
+
#ifdef ENABLE_REINDEX_NAILED_RELATIONS
static void RelationReloadClassinfo(Relation relation);
-#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
+
+#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
static void RelationFlushRelation(Relation relation);
static Relation RelationNameCacheGetRelation(const char *relationName);
static void RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp);
@@ -326,7 +328,7 @@ scan_pg_rel_seq(RelationBuildDescInfo buildinfo)
ScanKeyData key;
/*
- * form a scan key
+ * form a scan key
*
*/
switch (buildinfo.infotype)
@@ -351,7 +353,7 @@ scan_pg_rel_seq(RelationBuildDescInfo buildinfo)
}
/*
- * open pg_class and fetch a tuple
+ * open pg_class and fetch a tuple
*
*/
pg_class_desc = heap_openr(RelationRelationName, AccessShareLock);
@@ -359,18 +361,19 @@ scan_pg_rel_seq(RelationBuildDescInfo buildinfo)
pg_class_tuple = heap_getnext(pg_class_scan, 0);
/*
- * get set to return tuple
+ * get set to return tuple
*
*/
if (!HeapTupleIsValid(pg_class_tuple))
return_tuple = pg_class_tuple;
else
{
+
/*
- * a satanic bug used to live here: pg_class_tuple used to be
- * returned here without having the corresponding buffer pinned.
- * so when the buffer gets replaced, all hell breaks loose.
- * this bug is discovered and killed by wei on 9/27/91.
+ * a satanic bug used to live here: pg_class_tuple used to be
+ * returned here without having the corresponding buffer pinned.
+ * so when the buffer gets replaced, all hell breaks loose. this
+ * bug is discovered and killed by wei on 9/27/91.
*
*/
return_tuple = heap_copytuple(pg_class_tuple);
@@ -390,9 +393,10 @@ scan_pg_rel_ind(RelationBuildDescInfo buildinfo)
HeapTuple return_tuple;
pg_class_desc = heap_openr(RelationRelationName, AccessShareLock);
+
/*
- * If the indexes of pg_class are deactivated
- * we have to call scan_pg_rel_seq() instead.
+ * If the indexes of pg_class are deactivated we have to call
+ * scan_pg_rel_seq() instead.
*/
if (!pg_class_desc->rd_rel->relhasindex)
{
@@ -404,12 +408,12 @@ scan_pg_rel_ind(RelationBuildDescInfo buildinfo)
{
case INFO_RELID:
return_tuple = ClassOidIndexScan(pg_class_desc,
- ObjectIdGetDatum(buildinfo.i.info_id));
+ ObjectIdGetDatum(buildinfo.i.info_id));
break;
case INFO_RELNAME:
return_tuple = ClassNameIndexScan(pg_class_desc,
- PointerGetDatum(buildinfo.i.info_name));
+ PointerGetDatum(buildinfo.i.info_name));
break;
default:
@@ -445,14 +449,14 @@ AllocateRelationDesc(Relation relation, Form_pg_class relp)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * allocate space for new relation descriptor, if needed
+ * allocate space for new relation descriptor, if needed
*
*/
if (relation == NULL)
relation = (Relation) palloc(sizeof(RelationData));
/*
- * clear all fields of reldesc
+ * clear all fields of reldesc
*
*/
MemSet((char *) relation, 0, sizeof(RelationData));
@@ -461,14 +465,14 @@ AllocateRelationDesc(Relation relation, Form_pg_class relp)
relation->rd_fd = -1;
/*
- * Copy the relation tuple form
+ * Copy the relation tuple form
*
- * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
- * relacl is NOT stored in the relcache --- there'd be little point
- * in it, since we don't copy the tuple's nullvalues bitmap and hence
- * wouldn't know if the value is valid ... bottom line is that relacl
- * *cannot* be retrieved from the relcache. Get it from the syscache
- * if you need it.
+ * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
+ * relacl is NOT stored in the relcache --- there'd be little point in
+ * it, since we don't copy the tuple's nullvalues bitmap and hence
+ * wouldn't know if the value is valid ... bottom line is that relacl
+ * *cannot* be retrieved from the relcache. Get it from the syscache
+ * if you need it.
*
*/
relationForm = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
@@ -538,7 +542,7 @@ SetConstrOfRelation(Relation relation,
constr->num_check = relation->rd_rel->relchecks;
constr->check = (ConstrCheck *)
MemoryContextAlloc(CacheMemoryContext,
- constr->num_check * sizeof(ConstrCheck));
+ constr->num_check * sizeof(ConstrCheck));
MemSet(constr->check, 0, constr->num_check * sizeof(ConstrCheck));
RelCheckFetch(relation);
}
@@ -571,7 +575,7 @@ build_tupdesc_seq(RelationBuildDescInfo buildinfo,
constr->has_not_null = false;
/*
- * form a scan key
+ * form a scan key
*
*/
ScanKeyEntryInitialize(&key, 0,
@@ -580,14 +584,14 @@ build_tupdesc_seq(RelationBuildDescInfo buildinfo,
ObjectIdGetDatum(RelationGetRelid(relation)));
/*
- * open pg_attribute and begin a scan
+ * open pg_attribute and begin a scan
*
*/
pg_attribute_desc = heap_openr(AttributeRelationName, AccessShareLock);
pg_attribute_scan = heap_beginscan(pg_attribute_desc, 0, SnapshotNow, 1, &key);
/*
- * add attribute data to relation->rd_att
+ * add attribute data to relation->rd_att
*
*/
need = relation->rd_rel->relnatts;
@@ -601,7 +605,7 @@ build_tupdesc_seq(RelationBuildDescInfo buildinfo,
{
relation->rd_att->attrs[attp->attnum - 1] =
(Form_pg_attribute) MemoryContextAlloc(CacheMemoryContext,
- ATTRIBUTE_TUPLE_SIZE);
+ ATTRIBUTE_TUPLE_SIZE);
memcpy((char *) (relation->rd_att->attrs[attp->attnum - 1]),
(char *) attp,
@@ -621,7 +625,7 @@ build_tupdesc_seq(RelationBuildDescInfo buildinfo,
relation->rd_rel->relnatts *
sizeof(AttrDefault));
MemSet(attrdef, 0,
- relation->rd_rel->relnatts * sizeof(AttrDefault));
+ relation->rd_rel->relnatts * sizeof(AttrDefault));
}
attrdef[ndef].adnum = attp->attnum;
attrdef[ndef].adbin = NULL;
@@ -636,34 +640,32 @@ build_tupdesc_seq(RelationBuildDescInfo buildinfo,
need, (need == 1 ? "" : "s"), RelationGetRelid(relation));
/*
- * end the scan and close the attribute relation
+ * end the scan and close the attribute relation
*
*/
heap_endscan(pg_attribute_scan);
heap_close(pg_attribute_desc, AccessShareLock);
/*
- * The attcacheoff values we read from pg_attribute should all be -1
- * ("unknown"). Verify this if assert checking is on. They will be
- * computed when and if needed during tuple access.
+ * The attcacheoff values we read from pg_attribute should all be -1
+ * ("unknown"). Verify this if assert checking is on. They will be
+ * computed when and if needed during tuple access.
*
*/
#ifdef USE_ASSERT_CHECKING
{
- int i;
+ int i;
for (i = 0; i < relation->rd_rel->relnatts; i++)
- {
Assert(relation->rd_att->attrs[i]->attcacheoff == -1);
- }
}
#endif
/*
- * However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special
- * cases for attnum=1 that used to exist in fastgetattr() and
- * index_getattr().
+ * However, we can easily set the attcacheoff value for the first
+ * attribute: it must be zero. This eliminates the need for special
+ * cases for attnum=1 that used to exist in fastgetattr() and
+ * index_getattr().
*
*/
relation->rd_att->attrs[0]->attcacheoff = 0;
@@ -693,18 +695,19 @@ build_tupdesc_ind(RelationBuildDescInfo buildinfo,
{
#ifdef _DROP_COLUMN_HACK__
bool columnDropped = false;
+
#endif /* _DROP_COLUMN_HACK__ */
atttup = AttributeRelidNumIndexScan(attrel,
- ObjectIdGetDatum(RelationGetRelid(relation)),
+ ObjectIdGetDatum(RelationGetRelid(relation)),
Int32GetDatum(i));
if (!HeapTupleIsValid(atttup))
{
#ifdef _DROP_COLUMN_HACK__
atttup = AttributeRelidNumIndexScan(attrel,
- ObjectIdGetDatum(RelationGetRelid(relation)),
- Int32GetDatum(DROPPED_COLUMN_INDEX(i)));
+ ObjectIdGetDatum(RelationGetRelid(relation)),
+ Int32GetDatum(DROPPED_COLUMN_INDEX(i)));
if (!HeapTupleIsValid(atttup))
#endif /* _DROP_COLUMN_HACK__ */
elog(ERROR, "cannot find attribute %d of relation %s", i,
@@ -754,23 +757,21 @@ build_tupdesc_ind(RelationBuildDescInfo buildinfo,
heap_close(attrel, AccessShareLock);
/*
- * The attcacheoff values we read from pg_attribute should all be -1
- * ("unknown"). Verify this if assert checking is on. They will be
- * computed when and if needed during tuple access.
+ * The attcacheoff values we read from pg_attribute should all be -1
+ * ("unknown"). Verify this if assert checking is on. They will be
+ * computed when and if needed during tuple access.
*
*/
#ifdef USE_ASSERT_CHECKING
for (i = 0; i < relation->rd_rel->relnatts; i++)
- {
Assert(relation->rd_att->attrs[i]->attcacheoff == -1);
- }
#endif
/*
- * However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special
- * cases for attnum=1 that used to exist in fastgetattr() and
- * index_getattr().
+ * However, we can easily set the attcacheoff value for the first
+ * attribute: it must be zero. This eliminates the need for special
+ * cases for attnum=1 that used to exist in fastgetattr() and
+ * index_getattr().
*
*/
relation->rd_att->attrs[0]->attcacheoff = 0;
@@ -790,7 +791,7 @@ build_tupdesc_ind(RelationBuildDescInfo buildinfo,
* each relcache entry that has associated rules. The context is used
* just for rule info, not for any other subsidiary data of the relcache
* entry, because that keeps the update logic in RelationClearRelation()
- * manageable. The other subsidiary data structures are simple enough
+ * manageable. The other subsidiary data structures are simple enough
* to be easy to free explicitly, anyway.
*
*/
@@ -815,14 +816,14 @@ RelationBuildRuleLock(Relation relation)
*/
rulescxt = AllocSetContextCreate(CacheMemoryContext,
RelationGetRelationName(relation),
- 0, /* minsize */
- 1024, /* initsize */
- 1024); /* maxsize */
+ 0, /* minsize */
+ 1024, /* initsize */
+ 1024); /* maxsize */
relation->rd_rulescxt = rulescxt;
/*
- * form an array to hold the rewrite rules (the array is extended if
- * necessary)
+ * form an array to hold the rewrite rules (the array is extended if
+ * necessary)
*
*/
maxlocks = 4;
@@ -831,7 +832,7 @@ RelationBuildRuleLock(Relation relation)
numlocks = 0;
/*
- * form a scan key
+ * form a scan key
*
*/
ScanKeyEntryInitialize(&key, 0,
@@ -840,7 +841,7 @@ RelationBuildRuleLock(Relation relation)
ObjectIdGetDatum(RelationGetRelid(relation)));
/*
- * open pg_rewrite and begin a scan
+ * open pg_rewrite and begin a scan
*
*/
pg_rewrite_desc = heap_openr(RewriteRelationName, AccessShareLock);
@@ -870,7 +871,7 @@ RelationBuildRuleLock(Relation relation)
pg_rewrite_tupdesc,
&isnull));
rule->isInstead = DatumGetBool(heap_getattr(pg_rewrite_tuple,
- Anum_pg_rewrite_is_instead,
+ Anum_pg_rewrite_is_instead,
pg_rewrite_tupdesc,
&isnull));
@@ -878,7 +879,7 @@ RelationBuildRuleLock(Relation relation)
Anum_pg_rewrite_ev_action,
pg_rewrite_tupdesc,
&isnull);
- Assert(! isnull);
+ Assert(!isnull);
ruleaction_str = DatumGetCString(DirectFunctionCall1(textout,
ruleaction));
oldcxt = MemoryContextSwitchTo(rulescxt);
@@ -890,9 +891,9 @@ RelationBuildRuleLock(Relation relation)
Anum_pg_rewrite_ev_qual,
pg_rewrite_tupdesc,
&isnull);
- Assert(! isnull);
+ Assert(!isnull);
rule_evqual_str = DatumGetCString(DirectFunctionCall1(textout,
- rule_evqual));
+ rule_evqual));
oldcxt = MemoryContextSwitchTo(rulescxt);
rule->qual = (Node *) stringToNode(rule_evqual_str);
MemoryContextSwitchTo(oldcxt);
@@ -908,14 +909,14 @@ RelationBuildRuleLock(Relation relation)
}
/*
- * end the scan and close the attribute relation
+ * end the scan and close the attribute relation
*
*/
heap_endscan(pg_rewrite_scan);
heap_close(pg_rewrite_desc, AccessShareLock);
/*
- * form a RuleLock and insert into relation
+ * form a RuleLock and insert into relation
*
*/
rulelock = (RuleLock *) MemoryContextAlloc(rulescxt, sizeof(RuleLock));
@@ -1022,58 +1023,58 @@ RelationBuildDesc(RelationBuildDescInfo buildinfo,
MemoryContext oldcxt;
/*
- * find the tuple in pg_class corresponding to the given relation id
+ * find the tuple in pg_class corresponding to the given relation id
*
*/
pg_class_tuple = ScanPgRelation(buildinfo);
/*
- * if no such tuple exists, return NULL
+ * if no such tuple exists, return NULL
*
*/
if (!HeapTupleIsValid(pg_class_tuple))
return NULL;
/*
- * get information from the pg_class_tuple
+ * get information from the pg_class_tuple
*
*/
relid = pg_class_tuple->t_data->t_oid;
relp = (Form_pg_class) GETSTRUCT(pg_class_tuple);
/*
- * allocate storage for the relation descriptor,
- * and copy pg_class_tuple to relation->rd_rel.
+ * allocate storage for the relation descriptor, and copy
+ * pg_class_tuple to relation->rd_rel.
*
*/
relation = AllocateRelationDesc(oldrelation, relp);
/*
- * now we can free the memory allocated for pg_class_tuple
+ * now we can free the memory allocated for pg_class_tuple
*
*/
heap_freetuple(pg_class_tuple);
/*
- * initialize the relation's relation id (relation->rd_id)
+ * initialize the relation's relation id (relation->rd_id)
*
*/
RelationGetRelid(relation) = relid;
/*
- * initialize relation->rd_refcnt
+ * initialize relation->rd_refcnt
*
*/
RelationSetReferenceCount(relation, 1);
/*
- * normal relations are not nailed into the cache
+ * normal relations are not nailed into the cache
*
*/
relation->rd_isnailed = false;
/*
- * initialize the access method information (relation->rd_am)
+ * initialize the access method information (relation->rd_am)
*
*/
relam = relation->rd_rel->relam;
@@ -1082,13 +1083,13 @@ RelationBuildDesc(RelationBuildDescInfo buildinfo,
CacheMemoryContext);
/*
- * initialize the tuple descriptor (relation->rd_att).
+ * initialize the tuple descriptor (relation->rd_att).
*
*/
RelationBuildTupleDesc(buildinfo, relation);
/*
- * Fetch rules and triggers that affect this relation
+ * Fetch rules and triggers that affect this relation
*
*/
if (relation->rd_rel->relhasrules)
@@ -1105,14 +1106,14 @@ RelationBuildDesc(RelationBuildDescInfo buildinfo,
relation->trigdesc = NULL;
/*
- * initialize index strategy and support information for this relation
+ * initialize index strategy and support information for this relation
*
*/
if (OidIsValid(relam))
IndexedAccessMethodInitialize(relation);
/*
- * initialize the relation lock manager information
+ * initialize the relation lock manager information
*
*/
RelationInitLockInfo(relation); /* see lmgr.c */
@@ -1124,8 +1125,8 @@ RelationBuildDesc(RelationBuildDescInfo buildinfo,
relation->rd_node.relNode = relation->rd_rel->relfilenode;
/*
- * open the relation and assign the file descriptor returned
- * by the storage manager code to rd_fd.
+ * open the relation and assign the file descriptor returned by the
+ * storage manager code to rd_fd.
*
*/
if (relation->rd_rel->relkind != RELKIND_VIEW)
@@ -1134,8 +1135,8 @@ RelationBuildDesc(RelationBuildDescInfo buildinfo,
relation->rd_fd = -1;
/*
- * insert newly created relation into proper relcaches,
- * restore memory context and return the new reldesc.
+ * insert newly created relation into proper relcaches, restore memory
+ * context and return the new reldesc.
*
*/
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
@@ -1203,36 +1204,36 @@ formrdesc(char *relationName,
int i;
/*
- * allocate new relation desc
+ * allocate new relation desc
*
*/
relation = (Relation) palloc(sizeof(RelationData));
MemSet((char *) relation, 0, sizeof(RelationData));
/*
- * don't open the unix file yet..
+ * don't open the unix file yet..
*
*/
relation->rd_fd = -1;
/*
- * initialize reference count
+ * initialize reference count
*
*/
RelationSetReferenceCount(relation, 1);
/*
- * all entries built with this routine are nailed-in-cache
+ * all entries built with this routine are nailed-in-cache
*
*/
relation->rd_isnailed = true;
/*
- * initialize relation tuple form
+ * initialize relation tuple form
*
- * The data we insert here is pretty incomplete/bogus, but it'll
- * serve to get us launched. RelationCacheInitializePhase2() will
- * read the real data from pg_class and replace what we've done here.
+ * The data we insert here is pretty incomplete/bogus, but it'll serve to
+ * get us launched. RelationCacheInitializePhase2() will read the
+ * real data from pg_class and replace what we've done here.
*
*/
relation->rd_rel = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
@@ -1257,13 +1258,13 @@ formrdesc(char *relationName,
relation->rd_rel->relnatts = (int16) natts;
/*
- * initialize attribute tuple form
+ * initialize attribute tuple form
*
*/
relation->rd_att = CreateTemplateTupleDesc(natts);
/*
- * initialize tuple desc info
+ * initialize tuple desc info
*
*/
for (i = 0; i < natts; i++)
@@ -1275,13 +1276,13 @@ formrdesc(char *relationName,
}
/*
- * initialize relation id
+ * initialize relation id
*
*/
RelationGetRelid(relation) = relation->rd_att->attrs[0]->attrelid;
/*
- * initialize the relation's lock manager and RelFileNode information
+ * initialize the relation's lock manager and RelFileNode information
*
*/
RelationInitLockInfo(relation); /* see lmgr.c */
@@ -1290,11 +1291,11 @@ formrdesc(char *relationName,
relation->rd_node.tblNode = InvalidOid;
else
relation->rd_node.tblNode = MyDatabaseId;
- relation->rd_node.relNode =
+ relation->rd_node.relNode =
relation->rd_rel->relfilenode = RelationGetRelid(relation);
/*
- * initialize the rel-has-index flag, using hardwired knowledge
+ * initialize the rel-has-index flag, using hardwired knowledge
*
*/
relation->rd_rel->relhasindex = false;
@@ -1313,7 +1314,7 @@ formrdesc(char *relationName,
}
/*
- * add new reldesc to relcache
+ * add new reldesc to relcache
*
*/
RelationCacheInsert(relation);
@@ -1336,7 +1337,7 @@ fixrdesc(char *relationName)
Relation relation;
/*
- * find the tuple in pg_class corresponding to the given relation name
+ * find the tuple in pg_class corresponding to the given relation name
*
*/
buildinfo.infotype = INFO_RELNAME;
@@ -1350,7 +1351,7 @@ fixrdesc(char *relationName)
relp = (Form_pg_class) GETSTRUCT(pg_class_tuple);
/*
- * find the pre-made relcache entry (better be there!)
+ * find the pre-made relcache entry (better be there!)
*
*/
relation = RelationNameCacheGetRelation(relationName);
@@ -1359,8 +1360,8 @@ fixrdesc(char *relationName)
relationName);
/*
- * and copy pg_class_tuple to relation->rd_rel.
- * (See notes in AllocateRelationDesc())
+ * and copy pg_class_tuple to relation->rd_rel. (See notes in
+ * AllocateRelationDesc())
*
*/
Assert(relation->rd_rel != NULL);
@@ -1474,14 +1475,14 @@ RelationIdGetRelation(Oid relationId)
RelationBuildDescInfo buildinfo;
/*
- * increment access statistics
+ * increment access statistics
*
*/
IncrHeapAccessStat(local_RelationIdGetRelation);
IncrHeapAccessStat(global_RelationIdGetRelation);
/*
- * first try and get a reldesc from the cache
+ * first try and get a reldesc from the cache
*
*/
rd = RelationIdCacheGetRelation(relationId);
@@ -1489,8 +1490,8 @@ RelationIdGetRelation(Oid relationId)
return rd;
/*
- * no reldesc in the cache, so have RelationBuildDesc()
- * build one and add it.
+ * no reldesc in the cache, so have RelationBuildDesc() build one and
+ * add it.
*
*/
buildinfo.infotype = INFO_RELID;
@@ -1514,15 +1515,15 @@ RelationNameGetRelation(const char *relationName)
RelationBuildDescInfo buildinfo;
/*
- * increment access statistics
+ * increment access statistics
*
*/
IncrHeapAccessStat(local_RelationNameGetRelation);
IncrHeapAccessStat(global_RelationNameGetRelation);
/*
- * if caller is looking for a temp relation, substitute its real name;
- * we only index temp rels by their real names.
+ * if caller is looking for a temp relation, substitute its real name;
+ * we only index temp rels by their real names.
*
*/
temprelname = get_temp_rel_by_username(relationName);
@@ -1530,7 +1531,7 @@ RelationNameGetRelation(const char *relationName)
relationName = temprelname;
/*
- * first try and get a reldesc from the cache
+ * first try and get a reldesc from the cache
*
*/
rd = RelationNameCacheGetRelation(relationName);
@@ -1538,8 +1539,8 @@ RelationNameGetRelation(const char *relationName)
return rd;
/*
- * no reldesc in the cache, so have RelationBuildDesc()
- * build one and add it.
+ * no reldesc in the cache, so have RelationBuildDesc() build one and
+ * add it.
*
*/
buildinfo.infotype = INFO_RELNAME;
@@ -1592,7 +1593,7 @@ RelationReloadClassinfo(Relation relation)
{
RelationBuildDescInfo buildinfo;
HeapTuple pg_class_tuple;
- Form_pg_class relp;
+ Form_pg_class relp;
if (!relation->rd_rel)
return;
@@ -1613,7 +1614,8 @@ RelationReloadClassinfo(Relation relation)
return;
}
-#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
+
+#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
/*
* RelationClearRelation
@@ -1633,7 +1635,7 @@ RelationClearRelation(Relation relation, bool rebuildIt)
/*
* Make sure smgr and lower levels close the relation's files, if they
* weren't closed already. If the relation is not getting deleted,
- * the next smgr access should reopen the files automatically. This
+ * the next smgr access should reopen the files automatically. This
* ensures that the low-level file access state is updated after, say,
* a vacuum truncation.
*/
@@ -1648,7 +1650,7 @@ RelationClearRelation(Relation relation, bool rebuildIt)
{
#ifdef ENABLE_REINDEX_NAILED_RELATIONS
RelationReloadClassinfo(relation);
-#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
+#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
return;
}
@@ -1740,9 +1742,7 @@ RelationClearRelation(Relation relation, bool rebuildIt)
relation->rd_att = old_att;
}
else
- {
FreeTupleDesc(old_att);
- }
if (equalRuleLocks(old_rules, relation->rd_rules))
{
if (relation->rd_rulescxt)
@@ -1761,9 +1761,7 @@ RelationClearRelation(Relation relation, bool rebuildIt)
relation->trigdesc = old_trigdesc;
}
else
- {
FreeTriggerDesc(old_trigdesc);
- }
relation->rd_nblocks = old_nblocks;
/*
@@ -1787,6 +1785,7 @@ RelationFlushRelation(Relation relation)
if (relation->rd_myxactonly)
{
+
/*
* Local rels should always be rebuilt, not flushed; the relcache
* entry must live until RelationPurgeLocalRelation().
@@ -1912,7 +1911,7 @@ RelationFlushIndexes(Relation *r,
* We do this in two phases: the first pass deletes deletable items, and
* the second one rebuilds the rebuildable items. This is essential for
* safety, because HashTableWalk only copes with concurrent deletion of
- * the element it is currently visiting. If a second SI overflow were to
+ * the element it is currently visiting. If a second SI overflow were to
* occur while we are walking the table, resulting in recursive entry to
* this routine, we could crash because the inner invocation blows away
* the entry next to be visited by the outer scan. But this way is OK,
@@ -1923,8 +1922,8 @@ RelationFlushIndexes(Relation *r,
void
RelationCacheInvalidate(void)
{
- List *rebuildList = NIL;
- List *l;
+ List *rebuildList = NIL;
+ List *l;
/* Phase 1 */
HashTableWalk(RelationNameCache,
@@ -1932,7 +1931,7 @@ RelationCacheInvalidate(void)
PointerGetDatum(&rebuildList));
/* Phase 2: rebuild the items found to need rebuild in phase 1 */
- foreach (l, rebuildList)
+ foreach(l, rebuildList)
{
Relation relation = (Relation) lfirst(l);
@@ -1945,7 +1944,7 @@ static void
RelationCacheInvalidateWalker(Relation *relationPtr, Datum listp)
{
Relation relation = *relationPtr;
- List **rebuildList = (List **) DatumGetPointer(listp);
+ List **rebuildList = (List **) DatumGetPointer(listp);
/* We can ignore xact-local relations, since they are never SI targets */
if (relation->rd_myxactonly)
@@ -2070,7 +2069,7 @@ RelationCacheInitialize(void)
HASHCTL ctl;
/*
- * switch to cache memory context
+ * switch to cache memory context
*
*/
if (!CacheMemoryContext)
@@ -2079,7 +2078,7 @@ RelationCacheInitialize(void)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * create global caches
+ * create global caches
*
*/
MemSet(&ctl, 0, (int) sizeof(ctl));
@@ -2095,14 +2094,14 @@ RelationCacheInitialize(void)
ctl.keysize = sizeof(RelFileNode);
ctl.hash = tag_hash;
RelationNodeCache = hash_create(INITRELCACHESIZE, &ctl,
- HASH_ELEM | HASH_FUNCTION);
+ HASH_ELEM | HASH_FUNCTION);
/*
- * initialize the cache with pre-made relation descriptors
- * for some of the more important system relations. These
- * relations should always be in the cache.
+ * initialize the cache with pre-made relation descriptors for some of
+ * the more important system relations. These relations should always
+ * be in the cache.
*
- * NB: see also the list in RelationCacheInitializePhase2().
+ * NB: see also the list in RelationCacheInitializePhase2().
*
*/
formrdesc(RelationRelationName, Natts_pg_class, Desc_pg_class);
@@ -2113,9 +2112,9 @@ RelationCacheInitialize(void)
formrdesc(LogRelationName, Natts_pg_log, Desc_pg_log);
/*
- * init_irels() used to be called here. It is changed to be called
- * in RelationCacheInitializePhase2() now so that transactional
- * control could guarantee the consistency.
+ * init_irels() used to be called here. It is changed to be called in
+ * RelationCacheInitializePhase2() now so that transactional control
+ * could guarantee the consistency.
*/
MemoryContextSwitchTo(oldcxt);
@@ -2131,21 +2130,25 @@ RelationCacheInitialize(void)
void
RelationCacheInitializePhase2(void)
{
+
/*
* Get the real pg_class tuple for each nailed-in-cache relcache entry
* that was made by RelationCacheInitialize(), and replace the phony
- * rd_rel entry made by formrdesc(). This is necessary so that we have,
- * for example, the correct toast-table info for tables that have such.
+ * rd_rel entry made by formrdesc(). This is necessary so that we
+ * have, for example, the correct toast-table info for tables that
+ * have such.
*/
if (!IsBootstrapProcessingMode())
{
+
/*
* Initialize critical system index relation descriptors, first.
* They are to make building relation descriptors fast.
- * init_irels() used to be called in RelationCacheInitialize().
- * It is changed to be called here to be transaction safe.
+ * init_irels() used to be called in RelationCacheInitialize(). It
+ * is changed to be called here to be transaction safe.
*/
MemoryContext oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+
init_irels();
MemoryContextSwitchTo(oldcxt);
@@ -2154,19 +2157,23 @@ RelationCacheInitializePhase2(void)
fixrdesc(AttributeRelationName);
fixrdesc(ProcedureRelationName);
fixrdesc(TypeRelationName);
- /* We don't bother to update the entries for pg_variable or pg_log. */
+
+ /*
+ * We don't bother to update the entries for pg_variable or
+ * pg_log.
+ */
}
}
/* used by XLogInitCache */
-void CreateDummyCaches(void);
-void DestroyDummyCaches(void);
+void CreateDummyCaches(void);
+void DestroyDummyCaches(void);
void
CreateDummyCaches(void)
{
- MemoryContext oldcxt;
- HASHCTL ctl;
+ MemoryContext oldcxt;
+ HASHCTL ctl;
if (!CacheMemoryContext)
CreateCacheMemoryContext();
@@ -2186,14 +2193,14 @@ CreateDummyCaches(void)
ctl.keysize = sizeof(RelFileNode);
ctl.hash = tag_hash;
RelationNodeCache = hash_create(INITRELCACHESIZE, &ctl,
- HASH_ELEM | HASH_FUNCTION);
+ HASH_ELEM | HASH_FUNCTION);
MemoryContextSwitchTo(oldcxt);
}
void
DestroyDummyCaches(void)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
if (!CacheMemoryContext)
return;
@@ -2293,8 +2300,8 @@ AttrDefaultFetch(Relation relation)
RelationGetRelationName(relation));
else
attrdef[i].adbin = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(textout,
- val)));
+ DatumGetCString(DirectFunctionCall1(textout,
+ val)));
break;
}
if (hasindex)
@@ -2399,8 +2406,8 @@ RelCheckFetch(Relation relation)
elog(ERROR, "RelCheckFetch: rcbin IS NULL for rel %s",
RelationGetRelationName(relation));
check[found].ccbin = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(textout,
- val)));
+ DatumGetCString(DirectFunctionCall1(textout,
+ val)));
found++;
if (hasindex)
ReleaseBuffer(buffer);
@@ -2438,7 +2445,7 @@ RelCheckFetch(Relation relation)
*
* Since shared cache inval causes the relcache's copy of the list to go away,
* we return a copy of the list palloc'd in the caller's context. The caller
- * may freeList() the returned list after scanning it. This is necessary
+ * may freeList() the returned list after scanning it. This is necessary
* since the caller will typically be doing syscache lookups on the relevant
* indexes, and syscache lookup could cause SI messages to be processed!
*/
@@ -2468,7 +2475,7 @@ RelationGetIndexList(Relation relation)
(bits16) 0x0,
(AttrNumber) 1,
(RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(relation)));
+ ObjectIdGetDatum(RelationGetRelid(relation)));
sd = index_beginscan(irel, false, 1, &skey);
}
else
@@ -2477,18 +2484,18 @@ RelationGetIndexList(Relation relation)
(bits16) 0x0,
(AttrNumber) Anum_pg_index_indrelid,
(RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(relation)));
+ ObjectIdGetDatum(RelationGetRelid(relation)));
hscan = heap_beginscan(indrel, false, SnapshotNow, 1, &skey);
}
/*
- * We build the list we intend to return (in the caller's context) while
- * doing the scan. After successfully completing the scan, we copy that
- * list into the relcache entry. This avoids cache-context memory leakage
- * if we get some sort of error partway through.
+ * We build the list we intend to return (in the caller's context)
+ * while doing the scan. After successfully completing the scan, we
+ * copy that list into the relcache entry. This avoids cache-context
+ * memory leakage if we get some sort of error partway through.
*/
result = NIL;
-
+
for (;;)
{
HeapTupleData tuple;
@@ -2806,6 +2813,7 @@ write_irels(void)
fd = PathNameOpenFile(tempfilename, O_WRONLY | O_CREAT | O_TRUNC | PG_BINARY, 0600);
if (fd < 0)
{
+
/*
* We used to consider this a fatal error, but we might as well
* continue with backend startup ...
@@ -2943,7 +2951,5 @@ write_irels(void)
* previously-existing init file.
*/
if (rename(tempfilename, finalfilename) < 0)
- {
elog(NOTICE, "Cannot rename init file %s to %s: %m\n\tContinuing anyway, but there's something wrong.", tempfilename, finalfilename);
- }
}
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 4a6a8c00bca..75ef3179202 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/syscache.c,v 1.59 2001/02/22 18:39:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/syscache.c,v 1.60 2001/03/22 03:59:57 momjian Exp $
*
* NOTES
* These routines allow the parser/planner/executor to perform
@@ -60,7 +60,7 @@
In include/catalog/indexing.h, add a define for the number of indexes
on the relation, add define(s) for the index name(s), add an extern
array to hold the index names, and use DECLARE_UNIQUE_INDEX to define
- the index. Cache lookups return only one row, so the index should be
+ the index. Cache lookups return only one row, so the index should be
unique in most cases.
In backend/catalog/indexing.c, initialize the relation array with
@@ -89,261 +89,262 @@ struct cachedesc
static struct cachedesc cacheinfo[] = {
{AggregateRelationName, /* AGGNAME */
- AggregateNameTypeIndex,
+ AggregateNameTypeIndex,
2,
{
Anum_pg_aggregate_aggname,
Anum_pg_aggregate_aggbasetype,
0,
0
- }},
+ }},
{AccessMethodRelationName, /* AMNAME */
- AmNameIndex,
+ AmNameIndex,
1,
{
Anum_pg_am_amname,
0,
0,
0
- }},
+ }},
{AccessMethodOperatorRelationName, /* AMOPOPID */
- AccessMethodOpidIndex,
+ AccessMethodOpidIndex,
3,
{
Anum_pg_amop_amopclaid,
Anum_pg_amop_amopopr,
Anum_pg_amop_amopid,
0
- }},
+ }},
{AccessMethodOperatorRelationName, /* AMOPSTRATEGY */
- AccessMethodStrategyIndex,
+ AccessMethodStrategyIndex,
3,
{
Anum_pg_amop_amopid,
Anum_pg_amop_amopclaid,
Anum_pg_amop_amopstrategy,
0
- }},
+ }},
{AttributeRelationName, /* ATTNAME */
- AttributeRelidNameIndex,
+ AttributeRelidNameIndex,
2,
{
Anum_pg_attribute_attrelid,
Anum_pg_attribute_attname,
0,
0
- }},
+ }},
{AttributeRelationName, /* ATTNUM */
- AttributeRelidNumIndex,
+ AttributeRelidNumIndex,
2,
{
Anum_pg_attribute_attrelid,
Anum_pg_attribute_attnum,
0,
0
- }},
+ }},
{OperatorClassRelationName, /* CLADEFTYPE */
- OpclassDeftypeIndex,
+ OpclassDeftypeIndex,
1,
{
Anum_pg_opclass_opcdeftype,
0,
0,
0
- }},
+ }},
{OperatorClassRelationName, /* CLANAME */
- OpclassNameIndex,
+ OpclassNameIndex,
1,
{
Anum_pg_opclass_opcname,
0,
0,
0
- }},
+ }},
{GroupRelationName, /* GRONAME */
- GroupNameIndex,
+ GroupNameIndex,
1,
{
Anum_pg_group_groname,
0,
0,
0
- }},
+ }},
{GroupRelationName, /* GROSYSID */
- GroupSysidIndex,
+ GroupSysidIndex,
1,
{
Anum_pg_group_grosysid,
0,
0,
0
- }},
+ }},
{IndexRelationName, /* INDEXRELID */
- IndexRelidIndex,
+ IndexRelidIndex,
1,
{
Anum_pg_index_indexrelid,
0,
0,
0
- }},
+ }},
{InheritsRelationName, /* INHRELID */
- InheritsRelidSeqnoIndex,
+ InheritsRelidSeqnoIndex,
2,
{
Anum_pg_inherits_inhrelid,
Anum_pg_inherits_inhseqno,
0,
0
- }},
+ }},
{LanguageRelationName, /* LANGNAME */
- LanguageNameIndex,
+ LanguageNameIndex,
1,
{
Anum_pg_language_lanname,
0,
0,
0
- }},
+ }},
{LanguageRelationName, /* LANGOID */
- LanguageOidIndex,
+ LanguageOidIndex,
1,
{
ObjectIdAttributeNumber,
0,
0,
0
- }},
+ }},
{ListenerRelationName, /* LISTENREL */
- ListenerPidRelnameIndex,
+ ListenerPidRelnameIndex,
2,
{
Anum_pg_listener_pid,
Anum_pg_listener_relname,
0,
0
- }},
+ }},
{OperatorRelationName, /* OPERNAME */
- OperatorNameIndex,
+ OperatorNameIndex,
4,
{
Anum_pg_operator_oprname,
Anum_pg_operator_oprleft,
Anum_pg_operator_oprright,
Anum_pg_operator_oprkind
- }},
+ }},
{OperatorRelationName, /* OPEROID */
- OperatorOidIndex,
+ OperatorOidIndex,
1,
{
ObjectIdAttributeNumber,
0,
0,
0
- }},
+ }},
{ProcedureRelationName, /* PROCNAME */
- ProcedureNameIndex,
+ ProcedureNameIndex,
3,
{
Anum_pg_proc_proname,
Anum_pg_proc_pronargs,
Anum_pg_proc_proargtypes,
0
- }},
+ }},
{ProcedureRelationName, /* PROCOID */
- ProcedureOidIndex,
+ ProcedureOidIndex,
1,
{
ObjectIdAttributeNumber,
0,
0,
0
- }},
+ }},
{RelationRelationName, /* RELNAME */
- ClassNameIndex,
+ ClassNameIndex,
1,
{
Anum_pg_class_relname,
0,
0,
0
- }},
+ }},
{RelationRelationName, /* RELOID */
- ClassOidIndex,
+ ClassOidIndex,
1,
{
ObjectIdAttributeNumber,
0,
0,
0
- }},
+ }},
{RewriteRelationName, /* REWRITENAME */
- RewriteRulenameIndex,
+ RewriteRulenameIndex,
1,
{
Anum_pg_rewrite_rulename,
0,
0,
0
- }},
+ }},
{RewriteRelationName, /* RULEOID */
- RewriteOidIndex,
+ RewriteOidIndex,
1,
{
ObjectIdAttributeNumber,
0,
0,
0
- }},
+ }},
{ShadowRelationName, /* SHADOWNAME */
- ShadowNameIndex,
+ ShadowNameIndex,
1,
{
Anum_pg_shadow_usename,
0,
0,
0
- }},
+ }},
{ShadowRelationName, /* SHADOWSYSID */
- ShadowSysidIndex,
+ ShadowSysidIndex,
1,
{
Anum_pg_shadow_usesysid,
0,
0,
0
- }},
+ }},
{StatisticRelationName, /* STATRELID */
- StatisticRelidAttnumIndex,
+ StatisticRelidAttnumIndex,
2,
{
Anum_pg_statistic_starelid,
Anum_pg_statistic_staattnum,
0,
0
- }},
+ }},
{TypeRelationName, /* TYPENAME */
- TypeNameIndex,
+ TypeNameIndex,
1,
{
Anum_pg_type_typname,
0,
0,
0
- }},
+ }},
{TypeRelationName, /* TYPEOID */
- TypeOidIndex,
+ TypeOidIndex,
1,
{
ObjectIdAttributeNumber,
0,
0,
0
- }}
+ }}
};
-static CatCache *SysCache[lengthof(cacheinfo)];
-static int SysCacheSize = lengthof(cacheinfo);
+static CatCache *SysCache[
+ lengthof(cacheinfo)];
+static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
@@ -358,7 +359,7 @@ IsCacheInitialized(void)
* InitCatalogCache - initialize the caches
*
* Note that no database access is done here; we only allocate memory
- * and initialize the cache structure. Interrogation of the database
+ * and initialize the cache structure. Interrogation of the database
* to complete initialization of a cache happens only upon first use
* of that cache.
*/
@@ -419,8 +420,8 @@ SearchSysCache(int cacheId,
/*
* If someone tries to look up a relname, translate temp relation
- * names to real names. Less obviously, apply the same translation
- * to type names, so that the type tuple of a temp table will be found
+ * names to real names. Less obviously, apply the same translation to
+ * type names, so that the type tuple of a temp table will be found
* when sought. This is a kluge ... temp table substitution should be
* happening at a higher level ...
*/
@@ -522,8 +523,8 @@ SysCacheGetAttr(int cacheId, HeapTuple tup,
/*
* We just need to get the TupleDesc out of the cache entry, and then
* we can apply heap_getattr(). We expect that the cache control data
- * is currently valid --- if the caller recently fetched the tuple, then
- * it should be.
+ * is currently valid --- if the caller recently fetched the tuple,
+ * then it should be.
*/
if (cacheId < 0 || cacheId >= SysCacheSize)
elog(ERROR, "SysCacheGetAttr: Bad cache id %d", cacheId);
diff --git a/src/backend/utils/cache/temprel.c b/src/backend/utils/cache/temprel.c
index b4ca06bbce6..88dc606cb0e 100644
--- a/src/backend/utils/cache/temprel.c
+++ b/src/backend/utils/cache/temprel.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/Attic/temprel.c,v 1.34 2001/01/24 19:43:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/Attic/temprel.c,v 1.35 2001/03/22 03:59:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
*
* When a temp table is created, normal entries are made for it in pg_class,
* pg_type, etc using a unique "physical" relation name. We also make an
- * entry in the temp table list maintained by this module. Subsequently,
+ * entry in the temp table list maintained by this module. Subsequently,
* relname lookups are filtered through the temp table list, and attempts
* to look up a temp table name are changed to look up the physical name.
* This allows temp table names to mask a regular table of the same name
@@ -50,12 +50,13 @@ typedef struct TempTable
NameData relname; /* underlying unique name */
Oid relid; /* needed properties of rel */
char relkind;
+
/*
- * If this entry was created during this xact, it should be deleted
- * at xact abort. Conversely, if this entry was deleted during this
- * xact, it should be removed at xact commit. We leave deleted entries
- * in the list until commit so that we can roll back if needed ---
- * but we ignore them for purposes of lookup!
+ * If this entry was created during this xact, it should be deleted at
+ * xact abort. Conversely, if this entry was deleted during this
+ * xact, it should be removed at xact commit. We leave deleted
+ * entries in the list until commit so that we can roll back if needed
+ * --- but we ignore them for purposes of lookup!
*/
bool created_in_cur_xact;
bool deleted_in_cur_xact;
@@ -110,7 +111,11 @@ remove_temp_rel_by_relid(Oid relid)
if (temp_rel->relid == relid)
temp_rel->deleted_in_cur_xact = true;
- /* Keep scanning 'cause there could be multiple matches; see RENAME */
+
+ /*
+ * Keep scanning 'cause there could be multiple matches; see
+ * RENAME
+ */
}
}
@@ -161,10 +166,10 @@ rename_temp_relation(const char *oldname,
* xact. One of these entries will be deleted at xact end.
*
* NOTE: the new mapping entry is inserted into the list just after
- * the old one. We could alternatively insert it before the old one,
- * but that'd take more code. It does need to be in one spot or the
- * other, to ensure that deletion of temp rels happens in the right
- * order during remove_all_temp_relations().
+ * the old one. We could alternatively insert it before the old
+ * one, but that'd take more code. It does need to be in one spot
+ * or the other, to ensure that deletion of temp rels happens in
+ * the right order during remove_all_temp_relations().
*/
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
@@ -208,11 +213,11 @@ remove_all_temp_relations(void)
StartTransactionCommand();
/*
- * Scan the list and delete all entries not already deleted.
- * We need not worry about list entries getting deleted from under us,
- * because remove_temp_rel_by_relid() doesn't remove entries, only
- * mark them dead. Note that entries will be deleted in reverse order
- * of creation --- that's critical for cases involving inheritance.
+ * Scan the list and delete all entries not already deleted. We need
+ * not worry about list entries getting deleted from under us, because
+ * remove_temp_rel_by_relid() doesn't remove entries, only mark them
+ * dead. Note that entries will be deleted in reverse order of
+ * creation --- that's critical for cases involving inheritance.
*/
foreach(l, temp_rels)
{
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index a8b6215930e..55c7e69c67b 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/error/elog.c,v 1.82 2001/03/10 04:21:51 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/error/elog.c,v 1.83 2001/03/22 03:59:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,24 +52,24 @@ extern CommandDest whereToSendOutput;
* 2 = syslog only
* ... in theory anyway
*/
-int Use_syslog = 0;
-char *Syslog_facility;
-char *Syslog_ident;
+int Use_syslog = 0;
+char *Syslog_facility;
+char *Syslog_ident;
static void write_syslog(int level, const char *line);
#else
-# define Use_syslog 0
+#define Use_syslog 0
#endif
-bool Log_timestamp;
-bool Log_pid;
+bool Log_timestamp;
+bool Log_pid;
#define TIMESTAMP_SIZE 20 /* format `YYYY-MM-DD HH:MM:SS ' */
#define PID_SIZE 9 /* format `[123456] ' */
-static const char * print_timestamp(void);
-static const char * print_pid(void);
+static const char *print_timestamp(void);
+static const char *print_pid(void);
static int Debugfile = -1;
static int ElogDebugIndentLevel = 0;
@@ -93,9 +93,10 @@ static int ElogDebugIndentLevel = 0;
*--------------------
*/
void
-elog(int lev, const char *fmt, ...)
+elog(int lev, const char *fmt,...)
{
va_list ap;
+
/*
* The expanded format and final output message are dynamically
* allocated if necessary, but not if they fit in the "reasonable
@@ -108,17 +109,19 @@ elog(int lev, const char *fmt, ...)
* Note that we use malloc() not palloc() because we want to retain
* control if we run out of memory. palloc() would recursively call
* elog(ERROR), which would be all right except if we are working on a
- * FATAL or REALLYFATAL error. We'd lose track of the fatal condition
+ * FATAL or REALLYFATAL error. We'd lose track of the fatal condition
* and report a mere ERROR to outer loop, which would be a Bad Thing.
- * So, we substitute an appropriate message in-place, without downgrading
- * the level if it's above ERROR.
+ * So, we substitute an appropriate message in-place, without
+ * downgrading the level if it's above ERROR.
*/
char fmt_fixedbuf[128];
char msg_fixedbuf[256];
char *fmt_buf = fmt_fixedbuf;
char *msg_buf = msg_fixedbuf;
+
/* this buffer is only used for strange values of lev: */
char prefix_buf[32];
+
/* this buffer is only used if errno has a bogus value: */
char errorstr_buf[32];
const char *errorstr;
@@ -128,6 +131,7 @@ elog(int lev, const char *fmt, ...)
int indent = 0;
int space_needed;
int len;
+
/* size of the prefix needed for timestamp and pid, if enabled */
size_t timestamp_size;
@@ -136,6 +140,7 @@ elog(int lev, const char *fmt, ...)
/* Save error str before calling any function that might change errno */
errorstr = strerror(errno);
+
/*
* Some strerror()s return an empty string for out-of-range errno.
* This is ANSI C spec compliant, but not exactly useful.
@@ -148,16 +153,18 @@ elog(int lev, const char *fmt, ...)
if (lev == ERROR || lev == FATAL)
{
+
/*
- * Convert initialization errors into fatal errors.
- * This is probably redundant, because Warn_restart_ready won't
- * be set anyway...
+ * Convert initialization errors into fatal errors. This is
+ * probably redundant, because Warn_restart_ready won't be set
+ * anyway...
*/
if (IsInitProcessingMode())
lev = FATAL;
+
/*
- * If we are inside a critical section, all errors become STOP errors.
- * See miscadmin.h.
+ * If we are inside a critical section, all errors become STOP
+ * errors. See miscadmin.h.
*/
if (CritSectionCount > 0)
lev = STOP;
@@ -334,7 +341,7 @@ elog(int lev, const char *fmt, ...)
/* Write to syslog, if enabled */
if (Use_syslog >= 1)
{
- int syslog_level;
+ int syslog_level;
switch (lev)
{
@@ -360,7 +367,7 @@ elog(int lev, const char *fmt, ...)
write_syslog(syslog_level, msg_buf + timestamp_size);
}
-#endif /* ENABLE_SYSLOG */
+#endif /* ENABLE_SYSLOG */
/* syslog doesn't want a trailing newline, but other destinations do */
strcat(msg_buf, "\n");
@@ -379,10 +386,10 @@ elog(int lev, const char *fmt, ...)
char msgtype;
/*
- * Since backend libpq may call palloc(), switch to a context where
- * there's fairly likely to be some free space. After all the
- * pushups above, we don't want to drop the ball by running out of
- * space now...
+ * Since backend libpq may call palloc(), switch to a context
+ * where there's fairly likely to be some free space. After all
+ * the pushups above, we don't want to drop the ball by running
+ * out of space now...
*/
oldcxt = MemoryContextSwitchTo(ErrorContext);
@@ -452,17 +459,19 @@ elog(int lev, const char *fmt, ...)
* responsibility to see that this doesn't turn into infinite
* recursion!) But in the latter case, we exit with nonzero exit
* code to indicate that something's pretty wrong. We also want
- * to exit with nonzero exit code if not running under the postmaster
- * (for example, if we are being run from the initdb script, we'd
- * better return an error status).
+ * to exit with nonzero exit code if not running under the
+ * postmaster (for example, if we are being run from the initdb
+ * script, we'd better return an error status).
*/
if (lev == FATAL || !Warn_restart_ready || proc_exit_inprogress)
{
+
/*
* fflush here is just to improve the odds that we get to see
- * the error message, in case things are so hosed that proc_exit
- * crashes. Any other code you might be tempted to add here
- * should probably be in an on_proc_exit callback instead.
+ * the error message, in case things are so hosed that
+ * proc_exit crashes. Any other code you might be tempted to
+ * add here should probably be in an on_proc_exit callback
+ * instead.
*/
fflush(stdout);
fflush(stderr);
@@ -484,6 +493,7 @@ elog(int lev, const char *fmt, ...)
if (lev > FATAL)
{
+
/*
* Serious crash time. Postmaster will observe nonzero process
* exit status and kill the other backends too.
@@ -511,6 +521,7 @@ DebugFileOpen(void)
if (OutputFileName[0])
{
+
/*
* A debug-output file name was given.
*
@@ -530,6 +541,7 @@ DebugFileOpen(void)
elog(FATAL, "DebugFileOpen: %s reopen as stderr: %m",
OutputFileName);
Debugfile = fileno(stderr);
+
/*
* If the file is a tty and we're running under the postmaster,
* try to send stdout there as well (if it isn't a tty then stderr
@@ -565,12 +577,12 @@ DebugFileOpen(void)
/*
* Return a timestamp string like
*
- * "2000-06-04 13:12:03 "
+ * "2000-06-04 13:12:03 "
*/
static const char *
print_timestamp(void)
{
- time_t curtime;
+ time_t curtime;
static char buf[TIMESTAMP_SIZE + 1];
curtime = time(NULL);
@@ -587,7 +599,7 @@ print_timestamp(void)
/*
* Return a string like
*
- * "[123456] "
+ * "[123456] "
*
* with the current pid.
*/
@@ -596,7 +608,7 @@ print_pid(void)
{
static char buf[PID_SIZE + 1];
- snprintf(buf, PID_SIZE + 1, "[%d] ", (int)MyProcPid);
+ snprintf(buf, PID_SIZE + 1, "[%d] ", (int) MyProcPid);
return buf;
}
@@ -605,7 +617,7 @@ print_pid(void)
#ifdef ENABLE_SYSLOG
#ifndef PG_SYSLOG_LIMIT
-# define PG_SYSLOG_LIMIT 128
+#define PG_SYSLOG_LIMIT 128
#endif
/*
@@ -619,32 +631,32 @@ print_pid(void)
static void
write_syslog(int level, const char *line)
{
- static bool openlog_done = false;
+ static bool openlog_done = false;
static unsigned long seq = 0;
static int syslog_fac = LOG_LOCAL0;
- int len = strlen(line);
+ int len = strlen(line);
if (Use_syslog == 0)
return;
if (!openlog_done)
{
- if (strcasecmp(Syslog_facility,"LOCAL0") == 0)
+ if (strcasecmp(Syslog_facility, "LOCAL0") == 0)
syslog_fac = LOG_LOCAL0;
- if (strcasecmp(Syslog_facility,"LOCAL1") == 0)
+ if (strcasecmp(Syslog_facility, "LOCAL1") == 0)
syslog_fac = LOG_LOCAL1;
- if (strcasecmp(Syslog_facility,"LOCAL2") == 0)
+ if (strcasecmp(Syslog_facility, "LOCAL2") == 0)
syslog_fac = LOG_LOCAL2;
- if (strcasecmp(Syslog_facility,"LOCAL3") == 0)
+ if (strcasecmp(Syslog_facility, "LOCAL3") == 0)
syslog_fac = LOG_LOCAL3;
- if (strcasecmp(Syslog_facility,"LOCAL4") == 0)
+ if (strcasecmp(Syslog_facility, "LOCAL4") == 0)
syslog_fac = LOG_LOCAL4;
- if (strcasecmp(Syslog_facility,"LOCAL5") == 0)
+ if (strcasecmp(Syslog_facility, "LOCAL5") == 0)
syslog_fac = LOG_LOCAL5;
- if (strcasecmp(Syslog_facility,"LOCAL6") == 0)
+ if (strcasecmp(Syslog_facility, "LOCAL6") == 0)
syslog_fac = LOG_LOCAL6;
- if (strcasecmp(Syslog_facility,"LOCAL7") == 0)
+ if (strcasecmp(Syslog_facility, "LOCAL7") == 0)
syslog_fac = LOG_LOCAL7;
openlog(Syslog_ident, LOG_PID | LOG_NDELAY, syslog_fac);
openlog_done = true;
@@ -658,16 +670,16 @@ write_syslog(int level, const char *line)
/* divide into multiple syslog() calls if message is too long */
/* or if the message contains embedded NewLine(s) '\n' */
- if (len > PG_SYSLOG_LIMIT || strchr(line,'\n') != NULL )
+ if (len > PG_SYSLOG_LIMIT || strchr(line, '\n') != NULL)
{
- int chunk_nr = 0;
+ int chunk_nr = 0;
while (len > 0)
{
- char buf[PG_SYSLOG_LIMIT+1];
- int buflen;
- int l;
- int i;
+ char buf[PG_SYSLOG_LIMIT + 1];
+ int buflen;
+ int l;
+ int i;
/* if we start at a newline, move ahead one char */
if (line[0] == '\n')
@@ -679,15 +691,15 @@ write_syslog(int level, const char *line)
strncpy(buf, line, PG_SYSLOG_LIMIT);
buf[PG_SYSLOG_LIMIT] = '\0';
- if (strchr(buf,'\n') != NULL)
- *strchr(buf,'\n') = '\0';
+ if (strchr(buf, '\n') != NULL)
+ *strchr(buf, '\n') = '\0';
l = strlen(buf);
#ifdef MULTIBYTE
- /* trim to multibyte letter boundary */
+ /* trim to multibyte letter boundary */
buflen = pg_mbcliplen(buf, l, l);
if (buflen <= 0)
- return;
+ return;
buf[buflen] = '\0';
l = strlen(buf);
#endif
@@ -701,7 +713,7 @@ write_syslog(int level, const char *line)
while (i > 0 && !isspace((unsigned char) buf[i]))
i--;
- if (i <= 0) /* couldn't divide word boundary */
+ if (i <= 0) /* couldn't divide word boundary */
buflen = l;
else
{
@@ -724,4 +736,4 @@ write_syslog(int level, const char *line)
}
}
-#endif /* ENABLE_SYSLOG */
+#endif /* ENABLE_SYSLOG */
diff --git a/src/backend/utils/error/exc.c b/src/backend/utils/error/exc.c
index f25e01254f5..ee64ecb5f0a 100644
--- a/src/backend/utils/error/exc.c
+++ b/src/backend/utils/error/exc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/error/Attic/exc.c,v 1.36 2001/01/24 19:43:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/error/Attic/exc.c,v 1.37 2001/03/22 03:59:58 momjian Exp $
*
* NOTE
* XXX this code needs improvement--check for state violations and
@@ -112,6 +112,7 @@ ExcPrint(Exception *excP,
/* Save error str before calling any function that might change errno */
errorstr = strerror(errno);
+
/*
* Some strerror()s return an empty string for out-of-range errno.
* This is ANSI C spec compliant, but not exactly useful.
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index c2d25325c7d..49be6b37903 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.47 2001/01/24 19:43:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.48 2001/03/22 03:59:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,7 +32,9 @@ typedef struct df_files
ino_t inode; /* Inode number of file */
void *handle; /* a handle for pg_dl* functions */
char filename[1]; /* Full pathname of file */
- /* we allocate the block big enough for actual length of pathname.
+
+ /*
+ * we allocate the block big enough for actual length of pathname.
* filename[] must be last item in struct!
*/
} DynamicFileList;
@@ -48,7 +50,7 @@ static DynamicFileList *file_tail = (DynamicFileList *) NULL;
* named funcname in it. If the function is not found, we raise an error
* if signalNotFound is true, else return (PGFunction) NULL. Note that
* errors in loading the library will provoke elog regardless of
- * signalNotFound.
+ * signalNotFound.
*/
PGFunction
load_external_function(char *filename, char *funcname,
@@ -64,11 +66,12 @@ load_external_function(char *filename, char *funcname,
*/
for (file_scanner = file_list;
file_scanner != (DynamicFileList *) NULL &&
- strcmp(filename, file_scanner->filename) != 0;
+ strcmp(filename, file_scanner->filename) != 0;
file_scanner = file_scanner->next)
;
if (file_scanner == (DynamicFileList *) NULL)
{
+
/*
* Check for same files - different paths (ie, symlink or link)
*/
@@ -77,13 +80,14 @@ load_external_function(char *filename, char *funcname,
for (file_scanner = file_list;
file_scanner != (DynamicFileList *) NULL &&
- !SAME_INODE(stat_buf, *file_scanner);
+ !SAME_INODE(stat_buf, *file_scanner);
file_scanner = file_scanner->next)
;
}
if (file_scanner == (DynamicFileList *) NULL)
{
+
/*
* File not loaded yet.
*/
@@ -130,7 +134,7 @@ load_external_function(char *filename, char *funcname,
/*
* This function loads a shlib file without looking up any particular
- * function in it. If the same shlib has previously been loaded,
+ * function in it. If the same shlib has previously been loaded,
* unload and reload it.
*/
void
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index a0a412cbb6a..08c35327d02 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/fmgr/fmgr.c,v 1.50 2001/02/10 02:31:27 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/fmgr/fmgr.c,v 1.51 2001/03/22 03:59:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,8 +40,10 @@
*/
#if defined(__mc68000__) && defined(__ELF__)
typedef int32 ((*func_ptr) ());
+
#else
typedef char *((*func_ptr) ());
+
#endif
/*
@@ -50,8 +52,8 @@ typedef char *((*func_ptr) ());
typedef struct
{
func_ptr func; /* Address of the oldstyle function */
- bool arg_toastable[FUNC_MAX_ARGS]; /* is n'th arg of a toastable
- * datatype? */
+ bool arg_toastable[FUNC_MAX_ARGS]; /* is n'th arg of a
+ * toastable datatype? */
} Oldstyle_fnextra;
@@ -62,23 +64,24 @@ static Datum fmgr_untrusted(PG_FUNCTION_ARGS);
/*
- * Lookup routines for builtin-function table. We can search by either Oid
+ * Lookup routines for builtin-function table. We can search by either Oid
* or name, but search by Oid is much faster.
*/
static const FmgrBuiltin *
fmgr_isbuiltin(Oid id)
{
- int low = 0;
- int high = fmgr_nbuiltins - 1;
+ int low = 0;
+ int high = fmgr_nbuiltins - 1;
- /* Loop invariant: low is the first index that could contain target
+ /*
+ * Loop invariant: low is the first index that could contain target
* entry, and high is the last index that could contain it.
*/
while (low <= high)
{
- int i = (high + low) / 2;
- const FmgrBuiltin *ptr = &fmgr_builtins[i];
+ int i = (high + low) / 2;
+ const FmgrBuiltin *ptr = &fmgr_builtins[i];
if (id == ptr->foid)
return ptr;
@@ -96,15 +99,15 @@ fmgr_isbuiltin(Oid id)
* routine.
*/
static const FmgrBuiltin *
-fmgr_lookupByName(const char *name)
+fmgr_lookupByName(const char *name)
{
- int i;
+ int i;
for (i = 0; i < fmgr_nbuiltins; i++)
{
if (strcmp(name, fmgr_builtins[i].funcName) == 0)
return fmgr_builtins + i;
- }
+ }
return (const FmgrBuiltin *) NULL;
}
@@ -126,8 +129,10 @@ fmgr_info(Oid functionId, FmgrInfo *finfo)
if ((fbp = fmgr_isbuiltin(functionId)) != NULL)
{
+
/*
- * Fast path for builtin functions: don't bother consulting pg_proc
+ * Fast path for builtin functions: don't bother consulting
+ * pg_proc
*/
finfo->fn_nargs = fbp->nargs;
finfo->fn_strict = fbp->strict;
@@ -160,18 +165,18 @@ fmgr_info(Oid functionId, FmgrInfo *finfo)
switch (procedureStruct->prolang)
{
case INTERNALlanguageId:
+
/*
- * For an ordinary builtin function, we should never get
- * here because the isbuiltin() search above will have
- * succeeded. However, if the user has done a CREATE
- * FUNCTION to create an alias for a builtin function, we
- * can end up here. In that case we have to look up the
- * function by name. The name of the internal function is
- * stored in prosrc (it doesn't have to be the same as the
- * name of the alias!)
+ * For an ordinary builtin function, we should never get here
+ * because the isbuiltin() search above will have succeeded.
+ * However, if the user has done a CREATE FUNCTION to create
+ * an alias for a builtin function, we can end up here. In
+ * that case we have to look up the function by name. The
+ * name of the internal function is stored in prosrc (it
+ * doesn't have to be the same as the name of the alias!)
*/
prosrc = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(&procedureStruct->prosrc)));
+ PointerGetDatum(&procedureStruct->prosrc)));
fbp = fmgr_lookupByName(prosrc);
if (fbp == NULL)
elog(ERROR, "fmgr_info: function %s not in internal table",
@@ -240,7 +245,11 @@ fmgr_info_C_lang(FmgrInfo *finfo, HeapTuple procedureTuple)
case 0:
/* Old style: need to use a handler */
finfo->fn_addr = fmgr_oldstyle;
- /* OK to use palloc here because fn_mcxt is CurrentMemoryContext */
+
+ /*
+ * OK to use palloc here because fn_mcxt is
+ * CurrentMemoryContext
+ */
fnextra = (Oldstyle_fnextra *) palloc(sizeof(Oldstyle_fnextra));
finfo->fn_extra = (void *) fnextra;
MemSet(fnextra, 0, sizeof(Oldstyle_fnextra));
@@ -290,10 +299,11 @@ fmgr_info_other_lang(FmgrInfo *finfo, HeapTuple procedureTuple)
fmgr_info(languageStruct->lanplcallfoid, &plfinfo);
finfo->fn_addr = plfinfo.fn_addr;
+
/*
- * If lookup of the PL handler function produced nonnull
- * fn_extra, complain --- it must be an oldstyle function!
- * We no longer support oldstyle PL handlers.
+ * If lookup of the PL handler function produced nonnull fn_extra,
+ * complain --- it must be an oldstyle function! We no longer
+ * support oldstyle PL handlers.
*/
if (plfinfo.fn_extra != NULL)
elog(ERROR, "fmgr_info: language %u has old-style handler",
@@ -325,7 +335,7 @@ fetch_finfo_record(char *filename, char *funcname)
char *infofuncname;
PGFInfoFunction infofunc;
Pg_finfo_record *inforec;
- static Pg_finfo_record default_inforec = { 0 };
+ static Pg_finfo_record default_inforec = {0};
/* Compute name of info func */
infofuncname = (char *) palloc(strlen(funcname) + 10);
@@ -343,7 +353,7 @@ fetch_finfo_record(char *filename, char *funcname)
}
/* Found, so call it */
- inforec = (*infofunc)();
+ inforec = (*infofunc) ();
/* Validate result as best we can */
if (inforec == NULL)
@@ -399,10 +409,11 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
fnextra = (Oldstyle_fnextra *) fcinfo->flinfo->fn_extra;
/*
- * Result is NULL if any argument is NULL, but we still call the function
- * (peculiar, but that's the way it worked before, and after all this is
- * a backwards-compatibility wrapper). Note, however, that we'll never
- * get here with NULL arguments if the function is marked strict.
+ * Result is NULL if any argument is NULL, but we still call the
+ * function (peculiar, but that's the way it worked before, and after
+ * all this is a backwards-compatibility wrapper). Note, however,
+ * that we'll never get here with NULL arguments if the function is
+ * marked strict.
*
* We also need to detoast any TOAST-ed inputs, since it's unlikely that
* an old-style function knows about TOASTing.
@@ -425,12 +436,13 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
returnValue = (*user_fn) ();
break;
case 1:
+
/*
* nullvalue() used to use isNull to check if arg is NULL;
- * perhaps there are other functions still out there that
- * also rely on this undocumented hack?
+ * perhaps there are other functions still out there that also
+ * rely on this undocumented hack?
*/
- returnValue = (*user_fn) (fcinfo->arg[0], & fcinfo->isnull);
+ returnValue = (*user_fn) (fcinfo->arg[0], &fcinfo->isnull);
break;
case 2:
returnValue = (*user_fn) (fcinfo->arg[0], fcinfo->arg[1]);
@@ -534,16 +546,17 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
fcinfo->arg[14], fcinfo->arg[15]);
break;
default:
+
/*
- * Increasing FUNC_MAX_ARGS doesn't automatically add cases
- * to the above code, so mention the actual value in this error
+ * Increasing FUNC_MAX_ARGS doesn't automatically add cases to
+ * the above code, so mention the actual value in this error
* not FUNC_MAX_ARGS. You could add cases to the above if you
* needed to support old-style functions with many arguments,
* but making 'em be new-style is probably a better idea.
*/
elog(ERROR, "fmgr_oldstyle: function %u: too many arguments (%d > %d)",
fcinfo->flinfo->fn_oid, n_arguments, 16);
- returnValue = NULL; /* keep compiler quiet */
+ returnValue = NULL; /* keep compiler quiet */
break;
}
@@ -557,6 +570,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
static Datum
fmgr_untrusted(PG_FUNCTION_ARGS)
{
+
/*
* Currently these are unsupported. Someday we might do something
* like forking a subprocess to execute 'em.
@@ -573,20 +587,20 @@ fmgr_untrusted(PG_FUNCTION_ARGS)
/* These are for invocation of a specifically named function with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. Also, the function cannot be one that needs to
+ * are allowed to be NULL. Also, the function cannot be one that needs to
* look at FmgrInfo, since there won't be any.
*/
Datum
DirectFunctionCall1(PGFunction func, Datum arg1)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.nargs = 1;
fcinfo.arg[0] = arg1;
- result = (* func) (&fcinfo);
+ result = (*func) (&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
@@ -599,15 +613,15 @@ DirectFunctionCall1(PGFunction func, Datum arg1)
Datum
DirectFunctionCall2(PGFunction func, Datum arg1, Datum arg2)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.nargs = 2;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
- result = (* func) (&fcinfo);
+ result = (*func) (&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
@@ -621,8 +635,8 @@ Datum
DirectFunctionCall3(PGFunction func, Datum arg1, Datum arg2,
Datum arg3)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.nargs = 3;
@@ -630,7 +644,7 @@ DirectFunctionCall3(PGFunction func, Datum arg1, Datum arg2,
fcinfo.arg[1] = arg2;
fcinfo.arg[2] = arg3;
- result = (* func) (&fcinfo);
+ result = (*func) (&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
@@ -644,8 +658,8 @@ Datum
DirectFunctionCall4(PGFunction func, Datum arg1, Datum arg2,
Datum arg3, Datum arg4)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.nargs = 4;
@@ -654,7 +668,7 @@ DirectFunctionCall4(PGFunction func, Datum arg1, Datum arg2,
fcinfo.arg[2] = arg3;
fcinfo.arg[3] = arg4;
- result = (* func) (&fcinfo);
+ result = (*func) (&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
@@ -668,8 +682,8 @@ Datum
DirectFunctionCall5(PGFunction func, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.nargs = 5;
@@ -679,7 +693,7 @@ DirectFunctionCall5(PGFunction func, Datum arg1, Datum arg2,
fcinfo.arg[3] = arg4;
fcinfo.arg[4] = arg5;
- result = (* func) (&fcinfo);
+ result = (*func) (&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
@@ -694,8 +708,8 @@ DirectFunctionCall6(PGFunction func, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.nargs = 6;
@@ -706,7 +720,7 @@ DirectFunctionCall6(PGFunction func, Datum arg1, Datum arg2,
fcinfo.arg[4] = arg5;
fcinfo.arg[5] = arg6;
- result = (* func) (&fcinfo);
+ result = (*func) (&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
@@ -721,8 +735,8 @@ DirectFunctionCall7(PGFunction func, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.nargs = 7;
@@ -734,7 +748,7 @@ DirectFunctionCall7(PGFunction func, Datum arg1, Datum arg2,
fcinfo.arg[5] = arg6;
fcinfo.arg[6] = arg7;
- result = (* func) (&fcinfo);
+ result = (*func) (&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
@@ -749,8 +763,8 @@ DirectFunctionCall8(PGFunction func, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.nargs = 8;
@@ -763,7 +777,7 @@ DirectFunctionCall8(PGFunction func, Datum arg1, Datum arg2,
fcinfo.arg[6] = arg7;
fcinfo.arg[7] = arg8;
- result = (* func) (&fcinfo);
+ result = (*func) (&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
@@ -779,8 +793,8 @@ DirectFunctionCall9(PGFunction func, Datum arg1, Datum arg2,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.nargs = 9;
@@ -794,7 +808,7 @@ DirectFunctionCall9(PGFunction func, Datum arg1, Datum arg2,
fcinfo.arg[7] = arg8;
fcinfo.arg[8] = arg9;
- result = (* func) (&fcinfo);
+ result = (*func) (&fcinfo);
/* Check for null result, since caller is clearly not expecting one */
if (fcinfo.isnull)
@@ -812,11 +826,11 @@ DirectFunctionCall9(PGFunction func, Datum arg1, Datum arg2,
Datum
FunctionCall1(FmgrInfo *flinfo, Datum arg1)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = flinfo;
+ fcinfo.flinfo = flinfo;
fcinfo.nargs = 1;
fcinfo.arg[0] = arg1;
@@ -833,11 +847,11 @@ FunctionCall1(FmgrInfo *flinfo, Datum arg1)
Datum
FunctionCall2(FmgrInfo *flinfo, Datum arg1, Datum arg2)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = flinfo;
+ fcinfo.flinfo = flinfo;
fcinfo.nargs = 2;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -856,11 +870,11 @@ Datum
FunctionCall3(FmgrInfo *flinfo, Datum arg1, Datum arg2,
Datum arg3)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = flinfo;
+ fcinfo.flinfo = flinfo;
fcinfo.nargs = 3;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -880,11 +894,11 @@ Datum
FunctionCall4(FmgrInfo *flinfo, Datum arg1, Datum arg2,
Datum arg3, Datum arg4)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = flinfo;
+ fcinfo.flinfo = flinfo;
fcinfo.nargs = 4;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -905,11 +919,11 @@ Datum
FunctionCall5(FmgrInfo *flinfo, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = flinfo;
+ fcinfo.flinfo = flinfo;
fcinfo.nargs = 5;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -932,11 +946,11 @@ FunctionCall6(FmgrInfo *flinfo, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = flinfo;
+ fcinfo.flinfo = flinfo;
fcinfo.nargs = 6;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -960,11 +974,11 @@ FunctionCall7(FmgrInfo *flinfo, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = flinfo;
+ fcinfo.flinfo = flinfo;
fcinfo.nargs = 7;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -989,11 +1003,11 @@ FunctionCall8(FmgrInfo *flinfo, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = flinfo;
+ fcinfo.flinfo = flinfo;
fcinfo.nargs = 8;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1020,11 +1034,11 @@ FunctionCall9(FmgrInfo *flinfo, Datum arg1, Datum arg2,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9)
{
- FunctionCallInfoData fcinfo;
- Datum result;
+ FunctionCallInfoData fcinfo;
+ Datum result;
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = flinfo;
+ fcinfo.flinfo = flinfo;
fcinfo.nargs = 9;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1049,21 +1063,21 @@ FunctionCall9(FmgrInfo *flinfo, Datum arg1, Datum arg2,
/* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially fmgr_info() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially fmgr_info() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the fmgr_info() once and then use FunctionCallN().
*/
Datum
OidFunctionCall1(Oid functionId, Datum arg1)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ Datum result;
fmgr_info(functionId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = 1;
fcinfo.arg[0] = arg1;
@@ -1080,14 +1094,14 @@ OidFunctionCall1(Oid functionId, Datum arg1)
Datum
OidFunctionCall2(Oid functionId, Datum arg1, Datum arg2)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ Datum result;
fmgr_info(functionId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = 2;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1106,14 +1120,14 @@ Datum
OidFunctionCall3(Oid functionId, Datum arg1, Datum arg2,
Datum arg3)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ Datum result;
fmgr_info(functionId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = 3;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1133,14 +1147,14 @@ Datum
OidFunctionCall4(Oid functionId, Datum arg1, Datum arg2,
Datum arg3, Datum arg4)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ Datum result;
fmgr_info(functionId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = 4;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1161,14 +1175,14 @@ Datum
OidFunctionCall5(Oid functionId, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ Datum result;
fmgr_info(functionId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = 5;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1191,14 +1205,14 @@ OidFunctionCall6(Oid functionId, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ Datum result;
fmgr_info(functionId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = 6;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1222,14 +1236,14 @@ OidFunctionCall7(Oid functionId, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ Datum result;
fmgr_info(functionId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = 7;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1254,14 +1268,14 @@ OidFunctionCall8(Oid functionId, Datum arg1, Datum arg2,
Datum arg3, Datum arg4, Datum arg5,
Datum arg6, Datum arg7, Datum arg8)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ Datum result;
fmgr_info(functionId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = 8;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1288,14 +1302,14 @@ OidFunctionCall9(Oid functionId, Datum arg1, Datum arg2,
Datum arg6, Datum arg7, Datum arg8,
Datum arg9)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ Datum result;
fmgr_info(functionId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = 9;
fcinfo.arg[0] = arg1;
fcinfo.arg[1] = arg2;
@@ -1332,15 +1346,15 @@ OidFunctionCall9(Oid functionId, Datum arg1, Datum arg2,
char *
fmgr(Oid procedureId,...)
{
- FmgrInfo flinfo;
- FunctionCallInfoData fcinfo;
- int n_arguments;
- Datum result;
+ FmgrInfo flinfo;
+ FunctionCallInfoData fcinfo;
+ int n_arguments;
+ Datum result;
fmgr_info(procedureId, &flinfo);
MemSet(&fcinfo, 0, sizeof(fcinfo));
- fcinfo.flinfo = &flinfo;
+ fcinfo.flinfo = &flinfo;
fcinfo.nargs = flinfo.fn_nargs;
n_arguments = fcinfo.nargs;
@@ -1430,7 +1444,7 @@ pg_detoast_datum_copy(struct varlena * datum)
else
{
/* Make a modifiable copy of the varlena object */
- Size len = VARSIZE(datum);
+ Size len = VARSIZE(datum);
struct varlena *result = (struct varlena *) palloc(len);
memcpy(result, datum, len);
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index b13bd014490..b3a991fdc52 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.34 2001/01/24 19:43:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.35 2001/03/22 03:59:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -719,7 +719,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
curElem = GET_BUCKET(hashp, status->curIndex);
status->curIndex = curElem->next;
- if (status->curIndex == INVALID_INDEX) /* end of this bucket */
+ if (status->curIndex == INVALID_INDEX) /* end of this bucket */
++status->curBucket;
return &(curElem->key);
}
diff --git a/src/backend/utils/hash/pg_crc.c b/src/backend/utils/hash/pg_crc.c
index 96413f3b8b4..22469bf5f5e 100644
--- a/src/backend/utils/hash/pg_crc.c
+++ b/src/backend/utils/hash/pg_crc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/hash/pg_crc.c,v 1.1 2001/03/13 01:17:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/hash/pg_crc.c,v 1.2 2001/03/22 03:59:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -281,7 +281,7 @@ const uint32 crc_table1[256] = {
0xD80C07CD, 0x9AFCE626
};
-#else /* int64 works */
+#else /* int64 works */
const uint64 crc_table[256] = {
0x0000000000000000, 0x42F0E1EBA9EA3693,
@@ -414,4 +414,4 @@ const uint64 crc_table[256] = {
0xD80C07CD676F8394, 0x9AFCE626CE85B507
};
-#endif /* INT64_IS_BUSTED */
+#endif /* INT64_IS_BUSTED */
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index 43331badc7a..2a1a0aaaae4 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/init/globals.c,v 1.54 2001/03/13 01:17:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/init/globals.c,v 1.55 2001/03/22 03:59:59 momjian Exp $
*
* NOTES
* Globals used all over the place should be declared here and not
@@ -79,7 +79,7 @@ char DateFormat[20] = "%d-%m-%Y"; /* mjl: sizes! or better
* malloc? XXX */
char FloatFormat[20] = "%f";
-bool enableFsync = true;
+bool enableFsync = true;
bool allowSystemTableMods = false;
int SortMem = 512;
int NBuffers = DEF_NBUFFERS;
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index f5824327c96..a6dffbee103 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/init/miscinit.c,v 1.63 2001/03/18 18:22:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/init/miscinit.c,v 1.64 2001/03/22 04:00:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -109,7 +109,7 @@ SetDatabaseName(const char *name)
void
SetDataDir(const char *dir)
{
- char *new;
+ char *new;
AssertArg(dir);
if (DataDir)
@@ -117,8 +117,8 @@ SetDataDir(const char *dir)
if (dir[0] != '/')
{
- char *buf;
- size_t buflen;
+ char *buf;
+ size_t buflen;
buflen = MAXPGPATH;
for (;;)
@@ -147,13 +147,11 @@ SetDataDir(const char *dir)
free(buf);
}
else
- {
new = strdup(dir);
- }
if (!new)
elog(FATAL, "out of memory");
- DataDir = new;
+ DataDir = new;
}
@@ -344,7 +342,7 @@ convertstr(unsigned char *buff, int len, int dest)
/* ----------------------------------------------------------------
- * User ID things
+ * User ID things
*
* The session user is determined at connection start and never
* changes. The current user may change when "setuid" functions
@@ -415,7 +413,7 @@ SetSessionUserIdFromUserName(const char *username)
if (!HeapTupleIsValid(userTup))
elog(FATAL, "user \"%s\" does not exist", username);
- SetSessionUserId( ((Form_pg_shadow) GETSTRUCT(userTup))->usesysid );
+ SetSessionUserId(((Form_pg_shadow) GETSTRUCT(userTup))->usesysid);
ReleaseSysCache(userTup);
}
@@ -436,7 +434,7 @@ GetUserName(Oid userid)
if (!HeapTupleIsValid(tuple))
elog(ERROR, "invalid user id %u", (unsigned) userid);
- result = pstrdup( NameStr(((Form_pg_shadow) GETSTRUCT(tuple))->usename) );
+ result = pstrdup(NameStr(((Form_pg_shadow) GETSTRUCT(tuple))->usename));
ReleaseSysCache(tuple);
return result;
@@ -502,12 +500,14 @@ CreateLockFile(const char *filename, bool amPostmaster,
*/
for (;;)
{
+
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
if (fd >= 0)
break; /* Success; exit the retry loop */
+
/*
* Couldn't create the pid file. Probably it already exists.
*/
@@ -551,7 +551,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
#ifdef __BEOS__
&& errno != EINVAL
#endif
- ))
+ ))
{
/* lockfile belongs to a live process */
fprintf(stderr, "Lock file \"%s\" already exists.\n",
@@ -571,11 +571,11 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * No, the creating process did not exist. However, it could be that
- * the postmaster crashed (or more likely was kill -9'd by a clueless
- * admin) but has left orphan backends behind. Check for this by
- * looking to see if there is an associated shmem segment that is
- * still in use.
+ * No, the creating process did not exist. However, it could be
+ * that the postmaster crashed (or more likely was kill -9'd by a
+ * clueless admin) but has left orphan backends behind. Check for
+ * this by looking to see if there is an associated shmem segment
+ * that is still in use.
*/
if (isDDLock)
{
@@ -585,7 +585,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
ptr = strchr(buffer, '\n');
if (ptr != NULL &&
- (ptr = strchr(ptr+1, '\n')) != NULL)
+ (ptr = strchr(ptr + 1, '\n')) != NULL)
{
ptr++;
if (sscanf(ptr, "%lu %lu", &shmKey, &shmId) == 2)
@@ -607,8 +607,8 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Looks like nobody's home. Unlink the file and try again to
- * create it. Need a loop because of possible race condition against
- * other would-be creators.
+ * create it. Need a loop because of possible race condition
+ * against other would-be creators.
*/
if (unlink(filename) < 0)
elog(FATAL, "Can't remove old lock file %s: %m"
@@ -621,11 +621,11 @@ CreateLockFile(const char *filename, bool amPostmaster,
* Successfully created the file, now fill it.
*/
snprintf(buffer, sizeof(buffer), "%d\n%s\n",
- amPostmaster ? (int) my_pid : - ((int) my_pid),
+ amPostmaster ? (int) my_pid : -((int) my_pid),
DataDir);
if (write(fd, buffer, strlen(buffer)) != strlen(buffer))
{
- int save_errno = errno;
+ int save_errno = errno;
close(fd);
unlink(filename);
@@ -645,10 +645,10 @@ CreateLockFile(const char *filename, bool amPostmaster,
bool
CreateDataDirLockFile(const char *datadir, bool amPostmaster)
{
- char lockfile[MAXPGPATH];
+ char lockfile[MAXPGPATH];
snprintf(lockfile, sizeof(lockfile), "%s/postmaster.pid", datadir);
- if (! CreateLockFile(lockfile, amPostmaster, true, datadir))
+ if (!CreateLockFile(lockfile, amPostmaster, true, datadir))
return false;
/* Save name of lockfile for RecordSharedMemoryInLockFile */
strcpy(directoryLockFile, lockfile);
@@ -658,10 +658,10 @@ CreateDataDirLockFile(const char *datadir, bool amPostmaster)
bool
CreateSocketLockFile(const char *socketfile, bool amPostmaster)
{
- char lockfile[MAXPGPATH];
+ char lockfile[MAXPGPATH];
snprintf(lockfile, sizeof(lockfile), "%s.lock", socketfile);
- if (! CreateLockFile(lockfile, amPostmaster, false, socketfile))
+ if (!CreateLockFile(lockfile, amPostmaster, false, socketfile))
return false;
/* Save name of lockfile for TouchSocketLockFile */
strcpy(socketLockFile, lockfile);
@@ -698,7 +698,7 @@ TouchSocketLockFile(void)
* lock file (if we have created one).
*
* This may be called multiple times in the life of a postmaster, if we
- * delete and recreate shmem due to backend crash. Therefore, be prepared
+ * delete and recreate shmem due to backend crash. Therefore, be prepared
* to overwrite existing information. (As of 7.1, a postmaster only creates
* one shm seg anyway; but for the purposes here, if we did have more than
* one then any one of them would do anyway.)
@@ -712,8 +712,8 @@ RecordSharedMemoryInLockFile(IpcMemoryKey shmKey, IpcMemoryId shmId)
char buffer[BLCKSZ];
/*
- * Do nothing if we did not create a lockfile (probably because we
- * are running standalone).
+ * Do nothing if we did not create a lockfile (probably because we are
+ * running standalone).
*/
if (directoryLockFile[0] == '\0')
return;
@@ -732,27 +732,30 @@ RecordSharedMemoryInLockFile(IpcMemoryKey shmKey, IpcMemoryId shmId)
return;
}
buffer[len] = '\0';
+
/*
* Skip over first two lines (PID and path).
*/
ptr = strchr(buffer, '\n');
if (ptr == NULL ||
- (ptr = strchr(ptr+1, '\n')) == NULL)
+ (ptr = strchr(ptr + 1, '\n')) == NULL)
{
elog(DEBUG, "Bogus data in %s", directoryLockFile);
close(fd);
return;
}
ptr++;
+
/*
* Append shm key and ID. Format to try to keep it the same length
* always (trailing junk won't hurt, but might confuse humans).
*/
sprintf(ptr, "%9lu %9lu\n",
(unsigned long) shmKey, (unsigned long) shmId);
+
/*
- * And rewrite the data. Since we write in a single kernel call,
- * this update should appear atomic to onlookers.
+ * And rewrite the data. Since we write in a single kernel call, this
+ * update should appear atomic to onlookers.
*/
len = strlen(buffer);
if (lseek(fd, (off_t) 0, SEEK_SET) != 0 ||
@@ -781,16 +784,18 @@ void
ValidatePgVersion(const char *path)
{
char full_path[MAXPGPATH];
- FILE *file;
+ FILE *file;
int ret;
- long file_major, file_minor;
- long my_major = 0, my_minor = 0;
- char *endptr;
+ long file_major,
+ file_minor;
+ long my_major = 0,
+ my_minor = 0;
+ char *endptr;
const char *version_string = PG_VERSION;
my_major = strtol(version_string, &endptr, 10);
if (*endptr == '.')
- my_minor = strtol(endptr+1, NULL, 10);
+ my_minor = strtol(endptr + 1, NULL, 10);
snprintf(full_path, MAXPGPATH, "%s/PG_VERSION", path);
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 0f9639de02b..ef5f09374af 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/init/postinit.c,v 1.81 2001/02/16 18:50:40 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/init/postinit.c,v 1.82 2001/03/22 04:00:00 momjian Exp $
*
*
*-------------------------------------------------------------------------
@@ -118,7 +118,7 @@ ReverifyMyDatabase(const char *name)
* Also check that the database is currently allowing connections.
*/
dbform = (Form_pg_database) GETSTRUCT(tup);
- if (! dbform->datallowconn)
+ if (!dbform->datallowconn)
elog(FATAL, "Database \"%s\" is not currently accepting connections",
name);
@@ -175,6 +175,7 @@ InitCommunication(void)
void
BaseInit(void)
{
+
/*
* Attach to shared memory and semaphores, and initialize our
* input/output/debugging file descriptors.
@@ -230,8 +231,8 @@ InitPostgres(const char *dbname, const char *username)
ValidatePgVersion(DataDir);
/*
- * Find oid and path of the database we're about to open.
- * Since we're not yet up and running we have to use the hackish
+ * Find oid and path of the database we're about to open. Since
+ * we're not yet up and running we have to use the hackish
* GetRawDatabaseInfo.
*/
GetRawDatabaseInfo(dbname, &MyDatabaseId, datpath);
@@ -347,8 +348,8 @@ InitPostgres(const char *dbname, const char *username)
/*
* Unless we are bootstrapping, double-check that InitMyDatabaseInfo()
- * got a correct result. We can't do this until all the database-access
- * infrastructure is up.
+ * got a correct result. We can't do this until all the
+ * database-access infrastructure is up.
*/
if (!bootstrap)
ReverifyMyDatabase(dbname);
@@ -359,14 +360,18 @@ InitPostgres(const char *dbname, const char *username)
#endif
/*
- * Set up process-exit callbacks to remove temp relations and then
- * do pre-shutdown cleanup. This should be last because we want
+ * Set up process-exit callbacks to remove temp relations and then do
+ * pre-shutdown cleanup. This should be last because we want
* shmem_exit to call these routines before the exit callbacks that
- * are registered by buffer manager, lock manager, etc. We need
- * to run this code before we close down database access!
+ * are registered by buffer manager, lock manager, etc. We need to
+ * run this code before we close down database access!
*/
on_shmem_exit(ShutdownPostgres, 0);
- /* because callbacks are called in reverse order, this gets done first: */
+
+ /*
+ * because callbacks are called in reverse order, this gets done
+ * first:
+ */
on_shmem_exit(remove_all_temp_relations, 0);
/* close the transaction we started above */
@@ -389,21 +394,24 @@ InitPostgres(const char *dbname, const char *username)
static void
ShutdownPostgres(void)
{
+
/*
- * These operations are really just a minimal subset of AbortTransaction().
- * We don't want to do any inessential cleanup, since that just raises
- * the odds of failure --- but there's some stuff we need to do.
+ * These operations are really just a minimal subset of
+ * AbortTransaction(). We don't want to do any inessential cleanup,
+ * since that just raises the odds of failure --- but there's some
+ * stuff we need to do.
*
* Release any spinlocks or buffer context locks we might be holding.
- * This is a kluge to improve the odds that we won't get into a self-made
- * stuck-spinlock scenario while trying to shut down.
+ * This is a kluge to improve the odds that we won't get into a
+ * self-made stuck-spinlock scenario while trying to shut down.
*/
ProcReleaseSpins(NULL);
UnlockBuffers();
+
/*
- * In case a transaction is open, delete any files it created. This
+ * In case a transaction is open, delete any files it created. This
* has to happen before bufmgr shutdown, so having smgr register a
* callback for it wouldn't work.
*/
- smgrDoPendingDeletes(false); /* delete as though aborting xact */
+ smgrDoPendingDeletes(false);/* delete as though aborting xact */
}
diff --git a/src/backend/utils/mb/conv.c b/src/backend/utils/mb/conv.c
index eaef3976b5e..30b1a0d92a6 100644
--- a/src/backend/utils/mb/conv.c
+++ b/src/backend/utils/mb/conv.c
@@ -6,7 +6,7 @@
* WIN1250 client encoding support contributed by Pavel Behal
* SJIS UDC (NEC selection IBM kanji) support contributed by Eiji Tokuya
*
- * $Id: conv.c,v 1.23 2001/02/11 01:56:58 ishii Exp $
+ * $Id: conv.c,v 1.24 2001/03/22 04:00:01 momjian Exp $
*
*
*/
@@ -39,7 +39,7 @@
#include "Unicode/utf8_to_big5.map"
#include "Unicode/big5_to_utf8.map"
-#endif /* UNICODE_CONVERSION */
+#endif /* UNICODE_CONVERSION */
/*
* SJIS alternative code.
@@ -86,6 +86,7 @@ sjis2mic(unsigned char *sjis, unsigned char *p, int len)
i,
k,
k2;
+
/* Eiji Tokuya patched end */
while (len > 0 && (c1 = *sjis++))
{
@@ -117,7 +118,7 @@ sjis2mic(unsigned char *sjis, unsigned char *p, int len)
{
k = ibmkanji[i].sjis;
c1 = (k >> 8) & 0xff;
- c2 = k & 0xff;
+ c2 = k & 0xff;
}
}
}
@@ -132,7 +133,7 @@ sjis2mic(unsigned char *sjis, unsigned char *p, int len)
*p++ = c2 + ((c2 > 0x9e) ? 2 : 0x60) + (c2 < 0x80);
}
/* Eiji Tokuya patched begin */
- else if ((k >= 0xeb40 && k < 0xf040)||(k >= 0xfc4c && k <= 0xfcfc))
+ else if ((k >= 0xeb40 && k < 0xf040) || (k >= 0xfc4c && k <= 0xfcfc))
{
/* NEC selection IBM kanji - Other undecided justice */
/* Eiji Tokuya patched end */
@@ -1144,7 +1145,7 @@ mic2win1250(unsigned char *mic, unsigned char *p, int len)
static void
ascii2utf(unsigned char *ascii, unsigned char *utf, int len)
{
- ascii2mic(ascii, utf, len);
+ ascii2mic(ascii, utf, len);
}
/*
@@ -1153,7 +1154,7 @@ ascii2utf(unsigned char *ascii, unsigned char *utf, int len)
static void
utf2ascii(unsigned char *utf, unsigned char *ascii, int len)
{
- mic2ascii(utf, ascii, len);
+ mic2ascii(utf, ascii, len);
}
/*
@@ -1162,14 +1163,12 @@ utf2ascii(unsigned char *utf, unsigned char *ascii, int len)
static void
latin1_to_utf(unsigned char *iso, unsigned char *utf, int len)
{
- unsigned short c;
+ unsigned short c;
while (len-- > 0 && (c = *iso++))
{
if (c < 0x80)
- {
*utf++ = c;
- }
else
{
*utf++ = (c >> 6) | 0xc0;
@@ -1185,7 +1184,9 @@ latin1_to_utf(unsigned char *iso, unsigned char *utf, int len)
static void
utf_to_latin1(unsigned char *utf, unsigned char *iso, int len)
{
- unsigned short c, c1, c2;
+ unsigned short c,
+ c1,
+ c2;
while (len > 0 && (c = *utf++))
{
@@ -1198,9 +1199,7 @@ utf_to_latin1(unsigned char *utf, unsigned char *iso, int len)
len -= 2;
}
else if ((c & 0xe0) == 0xe0)
- {
elog(ERROR, "Could not convert UTF-8 to ISO8859-1");
- }
else
{
*iso++ = c;
@@ -1214,46 +1213,50 @@ utf_to_latin1(unsigned char *utf, unsigned char *iso, int len)
* comparison routine for bsearch()
* this routine is intended for UTF-8 -> local code
*/
-static int compare1(const void *p1, const void *p2)
+static int
+compare1(const void *p1, const void *p2)
{
- unsigned int v1, v2;
+ unsigned int v1,
+ v2;
- v1 = *(unsigned int *)p1;
- v2 = ((pg_utf_to_local *)p2)->utf;
- return(v1 - v2);
+ v1 = *(unsigned int *) p1;
+ v2 = ((pg_utf_to_local *) p2)->utf;
+ return (v1 - v2);
}
/*
* comparison routine for bsearch()
* this routine is intended for local code -> UTF-8
*/
-static int compare2(const void *p1, const void *p2)
+static int
+compare2(const void *p1, const void *p2)
{
- unsigned int v1, v2;
+ unsigned int v1,
+ v2;
- v1 = *(unsigned int *)p1;
- v2 = ((pg_local_to_utf *)p2)->code;
- return(v1 - v2);
+ v1 = *(unsigned int *) p1;
+ v2 = ((pg_local_to_utf *) p2)->code;
+ return (v1 - v2);
}
/*
* UTF-8 ---> local code
*
* utf: input UTF-8 string. Its length is limited by "len" parameter
- * or a null terminater.
+ * or a null terminater.
* iso: pointer to the output.
* map: the conversion map.
* size: the size of the conversion map.
*/
static void
utf_to_local(unsigned char *utf, unsigned char *iso,
- pg_utf_to_local *map, int size, int len)
+ pg_utf_to_local * map, int size, int len)
{
unsigned int iutf;
- int l;
+ int l;
pg_utf_to_local *p;
- for (;len > 0 && *utf; len -= l)
+ for (; len > 0 && *utf; len -= l)
{
l = pg_utf_mblen(utf);
if (l == 1)
@@ -1262,7 +1265,7 @@ utf_to_local(unsigned char *utf, unsigned char *iso,
continue;
}
else if (l == 2)
- {
+ {
iutf = *utf++ << 8;
iutf |= *utf++;
}
@@ -1297,7 +1300,7 @@ utf_to_local(unsigned char *utf, unsigned char *iso,
static void
utf_to_latin2(unsigned char *utf, unsigned char *iso, int len)
{
- utf_to_local(utf, iso, ULmapISO8859_2, sizeof(ULmapISO8859_2)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, iso, ULmapISO8859_2, sizeof(ULmapISO8859_2) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1306,7 +1309,7 @@ utf_to_latin2(unsigned char *utf, unsigned char *iso, int len)
static void
utf_to_latin3(unsigned char *utf, unsigned char *iso, int len)
{
- utf_to_local(utf, iso, ULmapISO8859_3, sizeof(ULmapISO8859_3)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, iso, ULmapISO8859_3, sizeof(ULmapISO8859_3) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1315,7 +1318,7 @@ utf_to_latin3(unsigned char *utf, unsigned char *iso, int len)
static void
utf_to_latin4(unsigned char *utf, unsigned char *iso, int len)
{
- utf_to_local(utf, iso, ULmapISO8859_4, sizeof(ULmapISO8859_4)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, iso, ULmapISO8859_4, sizeof(ULmapISO8859_4) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1325,7 +1328,7 @@ static void
utf_to_latin5(unsigned char *utf, unsigned char *iso, int len)
{
- utf_to_local(utf, iso, ULmapISO8859_5, sizeof(ULmapISO8859_5)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, iso, ULmapISO8859_5, sizeof(ULmapISO8859_5) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1333,20 +1336,18 @@ utf_to_latin5(unsigned char *utf, unsigned char *iso, int len)
*/
static void
local_to_utf(unsigned char *iso, unsigned char *utf,
- pg_local_to_utf *map, int size, int encoding, int len)
+ pg_local_to_utf * map, int size, int encoding, int len)
{
unsigned int iiso;
- int l;
+ int l;
pg_local_to_utf *p;
pg_encoding_conv_tbl *e;
e = pg_get_enc_ent(encoding);
if (e == 0)
- {
elog(ERROR, "Invalid encoding number %d", encoding);
- }
- for (;len > 0 && *iso; len -= l)
+ for (; len > 0 && *iso; len -= l)
{
if (*iso < 0x80)
{
@@ -1358,11 +1359,9 @@ local_to_utf(unsigned char *iso, unsigned char *utf,
l = pg_encoding_mblen(encoding, iso);
if (l == 1)
- {
iiso = *iso++;
- }
else if (l == 2)
- {
+ {
iiso = *iso++ << 8;
iiso |= *iso++;
}
@@ -1383,8 +1382,8 @@ local_to_utf(unsigned char *iso, unsigned char *utf,
sizeof(pg_local_to_utf), compare2);
if (p == NULL)
{
- elog(NOTICE, "local_to_utf: could not convert (0x%04x) %s to UTF-8. Ignored",
- iiso, e->name);
+ elog(NOTICE, "local_to_utf: could not convert (0x%04x) %s to UTF-8. Ignored",
+ iiso, e->name);
continue;
}
if (p->utf & 0xff000000)
@@ -1405,7 +1404,7 @@ local_to_utf(unsigned char *iso, unsigned char *utf,
static void
latin2_to_utf(unsigned char *iso, unsigned char *utf, int len)
{
- local_to_utf(iso, utf, LUmapISO8859_2, sizeof(LUmapISO8859_2)/sizeof(pg_local_to_utf), LATIN2, len);
+ local_to_utf(iso, utf, LUmapISO8859_2, sizeof(LUmapISO8859_2) / sizeof(pg_local_to_utf), LATIN2, len);
}
/*
@@ -1414,7 +1413,7 @@ latin2_to_utf(unsigned char *iso, unsigned char *utf, int len)
static void
latin3_to_utf(unsigned char *iso, unsigned char *utf, int len)
{
- local_to_utf(iso, utf, LUmapISO8859_3, sizeof(LUmapISO8859_3)/sizeof(pg_local_to_utf), LATIN3, len);
+ local_to_utf(iso, utf, LUmapISO8859_3, sizeof(LUmapISO8859_3) / sizeof(pg_local_to_utf), LATIN3, len);
}
/*
@@ -1423,7 +1422,7 @@ latin3_to_utf(unsigned char *iso, unsigned char *utf, int len)
static void
latin4_to_utf(unsigned char *iso, unsigned char *utf, int len)
{
- local_to_utf(iso, utf, LUmapISO8859_4, sizeof(LUmapISO8859_4)/sizeof(pg_local_to_utf), LATIN4, len);
+ local_to_utf(iso, utf, LUmapISO8859_4, sizeof(LUmapISO8859_4) / sizeof(pg_local_to_utf), LATIN4, len);
}
/*
@@ -1432,7 +1431,7 @@ latin4_to_utf(unsigned char *iso, unsigned char *utf, int len)
static void
latin5_to_utf(unsigned char *iso, unsigned char *utf, int len)
{
- local_to_utf(iso, utf, LUmapISO8859_5, sizeof(LUmapISO8859_5)/sizeof(pg_local_to_utf), LATIN5, len);
+ local_to_utf(iso, utf, LUmapISO8859_5, sizeof(LUmapISO8859_5) / sizeof(pg_local_to_utf), LATIN5, len);
}
/*
@@ -1442,8 +1441,8 @@ static void
utf_to_euc_jp(unsigned char *utf, unsigned char *euc, int len)
{
- utf_to_local(utf, euc, ULmapEUC_JP,
- sizeof(ULmapEUC_JP)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, euc, ULmapEUC_JP,
+ sizeof(ULmapEUC_JP) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1453,7 +1452,7 @@ static void
euc_jp_to_utf(unsigned char *euc, unsigned char *utf, int len)
{
local_to_utf(euc, utf, LUmapEUC_JP,
- sizeof(LUmapEUC_JP)/sizeof(pg_local_to_utf), EUC_JP, len);
+ sizeof(LUmapEUC_JP) / sizeof(pg_local_to_utf), EUC_JP, len);
}
/*
@@ -1463,8 +1462,8 @@ static void
utf_to_euc_cn(unsigned char *utf, unsigned char *euc, int len)
{
- utf_to_local(utf, euc, ULmapEUC_CN,
- sizeof(ULmapEUC_CN)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, euc, ULmapEUC_CN,
+ sizeof(ULmapEUC_CN) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1474,7 +1473,7 @@ static void
euc_cn_to_utf(unsigned char *euc, unsigned char *utf, int len)
{
local_to_utf(euc, utf, LUmapEUC_CN,
- sizeof(LUmapEUC_CN)/sizeof(pg_local_to_utf), EUC_CN, len);
+ sizeof(LUmapEUC_CN) / sizeof(pg_local_to_utf), EUC_CN, len);
}
/*
@@ -1484,8 +1483,8 @@ static void
utf_to_euc_kr(unsigned char *utf, unsigned char *euc, int len)
{
- utf_to_local(utf, euc, ULmapEUC_KR,
- sizeof(ULmapEUC_KR)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, euc, ULmapEUC_KR,
+ sizeof(ULmapEUC_KR) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1495,7 +1494,7 @@ static void
euc_kr_to_utf(unsigned char *euc, unsigned char *utf, int len)
{
local_to_utf(euc, utf, LUmapEUC_KR,
- sizeof(LUmapEUC_KR)/sizeof(pg_local_to_utf), EUC_KR, len);
+ sizeof(LUmapEUC_KR) / sizeof(pg_local_to_utf), EUC_KR, len);
}
/*
@@ -1505,8 +1504,8 @@ static void
utf_to_euc_tw(unsigned char *utf, unsigned char *euc, int len)
{
- utf_to_local(utf, euc, ULmapEUC_TW,
- sizeof(ULmapEUC_TW)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, euc, ULmapEUC_TW,
+ sizeof(ULmapEUC_TW) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1516,7 +1515,7 @@ static void
euc_tw_to_utf(unsigned char *euc, unsigned char *utf, int len)
{
local_to_utf(euc, utf, LUmapEUC_TW,
- sizeof(LUmapEUC_TW)/sizeof(pg_local_to_utf), EUC_TW, len);
+ sizeof(LUmapEUC_TW) / sizeof(pg_local_to_utf), EUC_TW, len);
}
/*
@@ -1526,8 +1525,8 @@ static void
utf_to_sjis(unsigned char *utf, unsigned char *euc, int len)
{
- utf_to_local(utf, euc, ULmapSJIS,
- sizeof(ULmapSJIS)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, euc, ULmapSJIS,
+ sizeof(ULmapSJIS) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1537,7 +1536,7 @@ static void
sjis_to_utf(unsigned char *euc, unsigned char *utf, int len)
{
local_to_utf(euc, utf, LUmapSJIS,
- sizeof(LUmapSJIS)/sizeof(pg_local_to_utf), SJIS, len);
+ sizeof(LUmapSJIS) / sizeof(pg_local_to_utf), SJIS, len);
}
/*
@@ -1547,8 +1546,8 @@ static void
utf_to_big5(unsigned char *utf, unsigned char *euc, int len)
{
- utf_to_local(utf, euc, ULmapBIG5,
- sizeof(ULmapBIG5)/sizeof(pg_utf_to_local), len);
+ utf_to_local(utf, euc, ULmapBIG5,
+ sizeof(ULmapBIG5) / sizeof(pg_utf_to_local), len);
}
/*
@@ -1558,85 +1557,85 @@ static void
big5_to_utf(unsigned char *euc, unsigned char *utf, int len)
{
local_to_utf(euc, utf, LUmapBIG5,
- sizeof(LUmapBIG5)/sizeof(pg_local_to_utf), BIG5, len);
+ sizeof(LUmapBIG5) / sizeof(pg_local_to_utf), BIG5, len);
}
/*-----------------------------------------------------------------*/
pg_encoding_conv_tbl pg_conv_tbl[] = {
{SQL_ASCII, "SQL_ASCII", 0, ascii2mic, mic2ascii,
- ascii2utf, utf2ascii}, /* SQL/ASCII */
+ ascii2utf, utf2ascii}, /* SQL/ASCII */
{EUC_JP, "EUC_JP", 0, euc_jp2mic, mic2euc_jp,
- euc_jp_to_utf, utf_to_euc_jp}, /* EUC_JP */
+ euc_jp_to_utf, utf_to_euc_jp}, /* EUC_JP */
{EUC_CN, "EUC_CN", 0, euc_cn2mic, mic2euc_cn,
- euc_cn_to_utf, utf_to_euc_cn}, /* EUC_CN */
+ euc_cn_to_utf, utf_to_euc_cn}, /* EUC_CN */
{EUC_KR, "EUC_KR", 0, euc_kr2mic, mic2euc_kr,
euc_kr_to_utf, utf_to_euc_kr}, /* EUC_KR */
{EUC_TW, "EUC_TW", 0, euc_tw2mic, mic2euc_tw,
- euc_tw_to_utf, utf_to_euc_tw}, /* EUC_TW */
+ euc_tw_to_utf, utf_to_euc_tw}, /* EUC_TW */
{UNICODE, "UNICODE", 0, 0, 0}, /* UNICODE */
{MULE_INTERNAL, "MULE_INTERNAL", 0, 0, 0}, /* MULE_INTERNAL */
{LATIN1, "LATIN1", 0, latin12mic, mic2latin1,
- latin1_to_utf, utf_to_latin1}, /* ISO 8859 Latin 1 */
+ latin1_to_utf, utf_to_latin1}, /* ISO 8859 Latin 1 */
{LATIN2, "LATIN2", 0, latin22mic, mic2latin2,
- latin2_to_utf, utf_to_latin2}, /* ISO 8859 Latin 2 */
+ latin2_to_utf, utf_to_latin2}, /* ISO 8859 Latin 2 */
{LATIN3, "LATIN3", 0, latin32mic, mic2latin3,
- latin3_to_utf, utf_to_latin3}, /* ISO 8859 Latin 3 */
+ latin3_to_utf, utf_to_latin3}, /* ISO 8859 Latin 3 */
{LATIN4, "LATIN4", 0, latin42mic, mic2latin4,
- latin4_to_utf, utf_to_latin4}, /* ISO 8859 Latin 4 */
+ latin4_to_utf, utf_to_latin4}, /* ISO 8859 Latin 4 */
{LATIN5, "LATIN5", 0, iso2mic, mic2iso,
- latin5_to_utf, utf_to_latin5}, /* ISO 8859 Latin 5 */
+ latin5_to_utf, utf_to_latin5}, /* ISO 8859 Latin 5 */
{KOI8, "KOI8", 0, koi2mic, mic2koi,
- 0, 0}, /* KOI8-R */
+ 0, 0}, /* KOI8-R */
{WIN, "WIN", 0, win2mic, mic2win,
- 0,0}, /* CP1251 */
+ 0, 0}, /* CP1251 */
{ALT, "ALT", 0, alt2mic, mic2alt,
- 0,0}, /* CP866 */
+ 0, 0}, /* CP866 */
{SJIS, "SJIS", 1, sjis2mic, mic2sjis,
- sjis_to_utf, utf_to_sjis}, /* SJIS */
+ sjis_to_utf, utf_to_sjis}, /* SJIS */
{BIG5, "BIG5", 1, big52mic, mic2big5,
- big5_to_utf, utf_to_big5}, /* Big5 */
+ big5_to_utf, utf_to_big5}, /* Big5 */
{WIN1250, "WIN1250", 1, win12502mic, mic2win1250,
- 0,0}, /* WIN 1250 */
- {-1, "", 0, 0, 0, 0} /* end mark */
+ 0, 0}, /* WIN 1250 */
+ {-1, "", 0, 0, 0, 0} /* end mark */
};
#else
pg_encoding_conv_tbl pg_conv_tbl[] = {
{SQL_ASCII, "SQL_ASCII", 0, ascii2mic, mic2ascii,
- 0, 0}, /* SQL/ASCII */
+ 0, 0}, /* SQL/ASCII */
{EUC_JP, "EUC_JP", 0, euc_jp2mic, mic2euc_jp,
- 0, 0}, /* EUC_JP */
+ 0, 0}, /* EUC_JP */
{EUC_CN, "EUC_CN", 0, euc_cn2mic, mic2euc_cn,
- 0, 0}, /* EUC_CN */
+ 0, 0}, /* EUC_CN */
{EUC_KR, "EUC_KR", 0, euc_kr2mic, mic2euc_kr}, /* EUC_KR */
{EUC_TW, "EUC_TW", 0, euc_tw2mic, mic2euc_tw}, /* EUC_TW */
{UNICODE, "UNICODE", 0, 0, 0}, /* UNICODE */
{MULE_INTERNAL, "MULE_INTERNAL", 0, 0, 0}, /* MULE_INTERNAL */
{LATIN1, "LATIN1", 0, latin12mic, mic2latin1,
- 0, 0}, /* ISO 8859 Latin 1 */
+ 0, 0}, /* ISO 8859 Latin 1 */
{LATIN2, "LATIN2", 0, latin22mic, mic2latin2,
- 0, 0}, /* ISO 8859 Latin 2 */
+ 0, 0}, /* ISO 8859 Latin 2 */
{LATIN3, "LATIN3", 0, latin32mic, mic2latin3,
- 0, 0}, /* ISO 8859 Latin 3 */
+ 0, 0}, /* ISO 8859 Latin 3 */
{LATIN4, "LATIN4", 0, latin42mic, mic2latin4,
- 0, 0}, /* ISO 8859 Latin 4 */
+ 0, 0}, /* ISO 8859 Latin 4 */
{LATIN5, "LATIN5", 0, iso2mic, mic2iso,
- 0, 0}, /* ISO 8859 Latin 5 */
+ 0, 0}, /* ISO 8859 Latin 5 */
{KOI8, "KOI8", 0, koi2mic, mic2koi,
- 0, 0}, /* KOI8-R */
+ 0, 0}, /* KOI8-R */
{WIN, "WIN", 0, win2mic, mic2win,
- 0,0}, /* CP1251 */
+ 0, 0}, /* CP1251 */
{ALT, "ALT", 0, alt2mic, mic2alt,
- 0,0}, /* CP866 */
+ 0, 0}, /* CP866 */
{SJIS, "SJIS", 1, sjis2mic, mic2sjis,
- 0, 0}, /* SJIS */
+ 0, 0}, /* SJIS */
{BIG5, "BIG5", 1, big52mic, mic2big5,
- 0,0}, /* Big5 */
+ 0, 0}, /* Big5 */
{WIN1250, "WIN1250", 1, win12502mic, mic2win1250,
- 0,0}, /* WIN 1250 */
- {-1, "", 0, 0, 0, 0} /* end mark */
+ 0, 0}, /* WIN 1250 */
+ {-1, "", 0, 0, 0, 0} /* end mark */
};
-#endif /* UNICODE_CONVERSION */
+#endif /* UNICODE_CONVERSION */
diff --git a/src/backend/utils/mb/liketest.c b/src/backend/utils/mb/liketest.c
index 3c2e9de7507..5cb6860812c 100644
--- a/src/backend/utils/mb/liketest.c
+++ b/src/backend/utils/mb/liketest.c
@@ -12,37 +12,42 @@
#define UCHARMAX 0xff
/*----------------------------------------------------------------*/
-static int wchareq(unsigned char *p1, unsigned char *p2)
+static int
+wchareq(unsigned char *p1, unsigned char *p2)
{
- int l;
+ int l;
l = pg_mblen(p1);
- if (pg_mblen(p2) != l) {
- return(0);
- }
- while (l--) {
+ if (pg_mblen(p2) != l)
+ return (0);
+ while (l--)
+ {
if (*p1++ != *p2++)
- return(0);
+ return (0);
}
- return(1);
+ return (1);
}
-static int iwchareq(unsigned char *p1, unsigned char *p2)
+static int
+iwchareq(unsigned char *p1, unsigned char *p2)
{
- int c1, c2;
- int l;
+ int c1,
+ c2;
+ int l;
- /* short cut. if *p1 and *p2 is lower than UCHARMAX, then
- we assume they are ASCII */
+ /*
+ * short cut. if *p1 and *p2 is lower than UCHARMAX, then we assume
+ * they are ASCII
+ */
if (*p1 < UCHARMAX && *p2 < UCHARMAX)
- return(tolower(*p1) == tolower(*p2));
+ return (tolower(*p1) == tolower(*p2));
if (*p1 < UCHARMAX)
c1 = tolower(*p1);
else
{
l = pg_mblen(p1);
- (void)pg_mb2wchar_with_len(p1, (pg_wchar *)&c1, l);
+ (void) pg_mb2wchar_with_len(p1, (pg_wchar *) & c1, l);
c1 = tolower(c1);
}
if (*p2 < UCHARMAX)
@@ -50,10 +55,10 @@ static int iwchareq(unsigned char *p1, unsigned char *p2)
else
{
l = pg_mblen(p2);
- (void)pg_mb2wchar_with_len(p2, (pg_wchar *)&c2, l);
+ (void) pg_mb2wchar_with_len(p2, (pg_wchar *) & c2, l);
c2 = tolower(c2);
}
- return(c1 == c2);
+ return (c1 == c2);
}
#ifdef MULTIBYTE
@@ -69,23 +74,28 @@ static int iwchareq(unsigned char *p1, unsigned char *p2)
static int
MatchText(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
{
- /* Fast path for match-everything pattern
- * Include weird case of escape character as a percent sign or underscore,
- * when presumably that wildcard character becomes a literal.
+
+ /*
+ * Fast path for match-everything pattern Include weird case of escape
+ * character as a percent sign or underscore, when presumably that
+ * wildcard character becomes a literal.
*/
if ((plen == 1) && (*p == '%')
- && ! ((e != NULL) && (*e == '%')))
+ && !((e != NULL) && (*e == '%')))
return LIKE_TRUE;
while ((tlen > 0) && (plen > 0))
{
- /* If an escape character was specified and we find it here in the pattern,
- * then we'd better have an exact match for the next character.
+
+ /*
+ * If an escape character was specified and we find it here in the
+ * pattern, then we'd better have an exact match for the next
+ * character.
*/
- if ((e != NULL) && CHAREQ(p,e))
+ if ((e != NULL) && CHAREQ(p, e))
{
NextChar(p, plen);
- if ((plen <= 0) || !CHAREQ(t,p))
+ if ((plen <= 0) || !CHAREQ(t, p))
return LIKE_FALSE;
}
else if (*p == '%')
@@ -99,23 +109,23 @@ MatchText(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can
- * match the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match
+ * the rest of the pattern.
*/
while (tlen > 0)
{
+
/*
- * Optimization to prevent most recursion: don't
- * recurse unless first pattern char might match this
- * text char.
+ * Optimization to prevent most recursion: don't recurse
+ * unless first pattern char might match this text char.
*/
- if (CHAREQ(t,p) || (*p == '_')
- || ((e != NULL) && CHAREQ(p,e)))
+ if (CHAREQ(t, p) || (*p == '_')
+ || ((e != NULL) && CHAREQ(p, e)))
{
- int matched = MatchText(t, tlen, p, plen, e);
+ int matched = MatchText(t, tlen, p, plen, e);
if (matched != LIKE_FALSE)
- return matched; /* TRUE or ABORT */
+ return matched; /* TRUE or ABORT */
}
NextChar(t, tlen);
@@ -127,9 +137,11 @@ MatchText(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
*/
return LIKE_ABORT;
}
- else if ((*p != '_') && !CHAREQ(t,p))
+ else if ((*p != '_') && !CHAREQ(t, p))
{
- /* Not the single-character wildcard and no explicit match?
+
+ /*
+ * Not the single-character wildcard and no explicit match?
* Then time to quit...
*/
return LIKE_FALSE;
@@ -143,7 +155,8 @@ MatchText(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
return LIKE_FALSE; /* end of pattern, but not of text */
/* End of input string. Do we have matching pattern remaining? */
- while ((plen > 0) && (*p == '%')) /* allow multiple %'s at end of pattern */
+ while ((plen > 0) && (*p == '%')) /* allow multiple %'s at end of
+ * pattern */
NextChar(p, plen);
if (plen <= 0)
return LIKE_TRUE;
@@ -153,28 +166,33 @@ MatchText(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
* start matching this pattern.
*/
return LIKE_ABORT;
-} /* MatchText() */
+} /* MatchText() */
static int
MatchTextLower(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
{
- /* Fast path for match-everything pattern
- * Include weird case of escape character as a percent sign or underscore,
- * when presumably that wildcard character becomes a literal.
+
+ /*
+ * Fast path for match-everything pattern Include weird case of escape
+ * character as a percent sign or underscore, when presumably that
+ * wildcard character becomes a literal.
*/
if ((plen == 1) && (*p == '%')
- && ! ((e != NULL) && (*e == '%')))
+ && !((e != NULL) && (*e == '%')))
return LIKE_TRUE;
while ((tlen > 0) && (plen > 0))
{
- /* If an escape character was specified and we find it here in the pattern,
- * then we'd better have an exact match for the next character.
+
+ /*
+ * If an escape character was specified and we find it here in the
+ * pattern, then we'd better have an exact match for the next
+ * character.
*/
- if ((e != NULL) && ICHAREQ(p,e))
+ if ((e != NULL) && ICHAREQ(p, e))
{
NextChar(p, plen);
- if ((plen <= 0) || !ICHAREQ(t,p))
+ if ((plen <= 0) || !ICHAREQ(t, p))
return LIKE_FALSE;
}
else if (*p == '%')
@@ -188,23 +206,23 @@ MatchTextLower(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can
- * match the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match
+ * the rest of the pattern.
*/
while (tlen > 0)
{
+
/*
- * Optimization to prevent most recursion: don't
- * recurse unless first pattern char might match this
- * text char.
+ * Optimization to prevent most recursion: don't recurse
+ * unless first pattern char might match this text char.
*/
- if (ICHAREQ(t,p) || (*p == '_')
- || ((e != NULL) && ICHAREQ(p,e)))
+ if (ICHAREQ(t, p) || (*p == '_')
+ || ((e != NULL) && ICHAREQ(p, e)))
{
- int matched = MatchText(t, tlen, p, plen, e);
+ int matched = MatchText(t, tlen, p, plen, e);
if (matched != LIKE_FALSE)
- return matched; /* TRUE or ABORT */
+ return matched; /* TRUE or ABORT */
}
NextChar(t, tlen);
@@ -216,10 +234,8 @@ MatchTextLower(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
*/
return LIKE_ABORT;
}
- else if ((*p != '_') && !ICHAREQ(t,p))
- {
+ else if ((*p != '_') && !ICHAREQ(t, p))
return LIKE_FALSE;
- }
NextChar(t, tlen);
NextChar(p, plen);
@@ -229,7 +245,8 @@ MatchTextLower(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
return LIKE_FALSE; /* end of pattern, but not of text */
/* End of input string. Do we have matching pattern remaining? */
- while ((plen > 0) && (*p == '%')) /* allow multiple %'s at end of pattern */
+ while ((plen > 0) && (*p == '%')) /* allow multiple %'s at end of
+ * pattern */
NextChar(p, plen);
if (plen <= 0)
return LIKE_TRUE;
@@ -239,14 +256,16 @@ MatchTextLower(PG_CHAR * t, int tlen, PG_CHAR * p, int plen, char *e)
* start matching this pattern.
*/
return LIKE_ABORT;
-} /* MatchTextLower() */
+} /* MatchTextLower() */
main()
{
unsigned char *t = "��Z01��";
unsigned char *p = "_Z%";
- int tlen, plen;
+ int tlen,
+ plen;
+
tlen = strlen(t);
plen = strlen(p);
- printf("%d\n",MatchTextLower(t,tlen,p,plen,"\\"));
+ printf("%d\n", MatchTextLower(t, tlen, p, plen, "\\"));
}
diff --git a/src/backend/utils/mb/palloc.c b/src/backend/utils/mb/palloc.c
index e547e7a79e5..3e1b70aae9a 100644
--- a/src/backend/utils/mb/palloc.c
+++ b/src/backend/utils/mb/palloc.c
@@ -4,7 +4,7 @@
void
elog(int lev, const char *fmt,...)
{
- printf(fmt);
+ printf(fmt);
}
MemoryContext CurrentMemoryContext;
diff --git a/src/backend/utils/mb/utftest.c b/src/backend/utils/mb/utftest.c
index 5f0f7f9fb7e..4588e2699c9 100644
--- a/src/backend/utils/mb/utftest.c
+++ b/src/backend/utils/mb/utftest.c
@@ -1,5 +1,5 @@
/*
- * $Id: utftest.c,v 1.4 2000/10/12 06:06:50 ishii Exp $
+ * $Id: utftest.c,v 1.5 2001/03/22 04:00:05 momjian Exp $
*/
#include "conv.c"
#include "wchar.c"
@@ -49,5 +49,5 @@ main()
}
printf("\n");
- return(0);
+ return (0);
}
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 6d10cad020a..f3d91e963f3 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1,7 +1,7 @@
/*
* conversion functions between pg_wchar and multi-byte streams.
* Tatsuo Ishii
- * $Id: wchar.c,v 1.16 2001/03/08 00:24:34 tgl Exp $
+ * $Id: wchar.c,v 1.17 2001/03/22 04:00:05 momjian Exp $
*
* WIN1250 client encoding updated by Pavel Behal
*
@@ -22,10 +22,10 @@
/*
* SQL/ASCII
*/
-static int pg_ascii2wchar_with_len
+static int pg_ascii2wchar_with_len
(const unsigned char *from, pg_wchar * to, int len)
{
- int cnt = 0;
+ int cnt = 0;
while (len > 0 && *from)
{
@@ -34,7 +34,7 @@ static int pg_ascii2wchar_with_len
cnt++;
}
*to = 0;
- return(cnt);
+ return (cnt);
}
static int
@@ -47,10 +47,10 @@ pg_ascii_mblen(const unsigned char *s)
* EUC
*/
-static int pg_euc2wchar_with_len
+static int pg_euc2wchar_with_len
(const unsigned char *from, pg_wchar * to, int len)
{
- int cnt = 0;
+ int cnt = 0;
while (len > 0 && *from)
{
@@ -82,7 +82,7 @@ static int pg_euc2wchar_with_len
cnt++;
}
*to = 0;
- return(cnt);
+ return (cnt);
}
static int
@@ -104,10 +104,10 @@ pg_euc_mblen(const unsigned char *s)
/*
* EUC_JP
*/
-static int pg_eucjp2wchar_with_len
+static int pg_eucjp2wchar_with_len
(const unsigned char *from, pg_wchar * to, int len)
{
- return(pg_euc2wchar_with_len(from, to, len));
+ return (pg_euc2wchar_with_len(from, to, len));
}
static int
@@ -119,10 +119,10 @@ pg_eucjp_mblen(const unsigned char *s)
/*
* EUC_KR
*/
-static int pg_euckr2wchar_with_len
+static int pg_euckr2wchar_with_len
(const unsigned char *from, pg_wchar * to, int len)
{
- return(pg_euc2wchar_with_len(from, to, len));
+ return (pg_euc2wchar_with_len(from, to, len));
}
static int
@@ -134,10 +134,10 @@ pg_euckr_mblen(const unsigned char *s)
/*
* EUC_CN
*/
-static int pg_euccn2wchar_with_len
+static int pg_euccn2wchar_with_len
(const unsigned char *from, pg_wchar * to, int len)
{
- int cnt = 0;
+ int cnt = 0;
while (len > 0 && *from)
{
@@ -170,7 +170,7 @@ static int pg_euccn2wchar_with_len
cnt++;
}
*to = 0;
- return(cnt);
+ return (cnt);
}
static int
@@ -188,10 +188,10 @@ pg_euccn_mblen(const unsigned char *s)
/*
* EUC_TW
*/
-static int pg_euctw2wchar_with_len
+static int pg_euctw2wchar_with_len
(const unsigned char *from, pg_wchar * to, int len)
{
- int cnt = 0;
+ int cnt = 0;
while (len > 0 && *from)
{
@@ -225,7 +225,7 @@ static int pg_euctw2wchar_with_len
cnt++;
}
*to = 0;
- return(cnt);
+ return (cnt);
}
static int
@@ -256,7 +256,7 @@ pg_utf2wchar_with_len(const unsigned char *from, pg_wchar * to, int len)
unsigned char c1,
c2,
c3;
- int cnt = 0;
+ int cnt = 0;
while (len > 0 && *from)
{
@@ -292,7 +292,7 @@ pg_utf2wchar_with_len(const unsigned char *from, pg_wchar * to, int len)
cnt++;
}
*to = 0;
- return(cnt);
+ return (cnt);
}
/*
@@ -321,7 +321,7 @@ pg_utf_mblen(const unsigned char *s)
static int
pg_mule2wchar_with_len(const unsigned char *from, pg_wchar * to, int len)
{
- int cnt = 0;
+ int cnt = 0;
while (len > 0 && *from)
{
@@ -362,7 +362,7 @@ pg_mule2wchar_with_len(const unsigned char *from, pg_wchar * to, int len)
cnt++;
}
*to = 0;
- return(cnt);
+ return (cnt);
}
int
@@ -391,7 +391,7 @@ pg_mule_mblen(const unsigned char *s)
static int
pg_latin12wchar_with_len(const unsigned char *from, pg_wchar * to, int len)
{
- int cnt = 0;
+ int cnt = 0;
while (len > 0 && *from)
{
@@ -400,7 +400,7 @@ pg_latin12wchar_with_len(const unsigned char *from, pg_wchar * to, int len)
cnt++;
}
*to = 0;
- return(cnt);
+ return (cnt);
}
static int
@@ -496,7 +496,7 @@ pg_mic_mblen(const unsigned char *mbstr)
return (pg_mule_mblen(mbstr));
}
-/*
+/*
* Returns the byte length of a multi-byte word.
*/
int
diff --git a/src/backend/utils/misc/database.c b/src/backend/utils/misc/database.c
index 85f16fec347..5e14ef778bd 100644
--- a/src/backend/utils/misc/database.c
+++ b/src/backend/utils/misc/database.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/misc/Attic/database.c,v 1.43 2001/01/24 19:43:16 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/misc/Attic/database.c,v 1.44 2001/03/22 04:00:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -149,7 +149,8 @@ GetRawDatabaseInfo(const char *name, Oid *db_id, char *path)
sprintf(dbfname, "%s/global/%s", DataDir, DatabaseRelationName);
#else
{
- RelFileNode rnode;
+ RelFileNode rnode;
+
rnode.tblNode = 0;
rnode.relNode = RelOid_pg_database;
dbfname = relpath(rnode);
@@ -182,8 +183,8 @@ GetRawDatabaseInfo(const char *name, Oid *db_id, char *path)
while ((nbytes = read(dbfd, pg, BLCKSZ)) == BLCKSZ)
{
- OffsetNumber max = PageGetMaxOffsetNumber(pg);
- OffsetNumber lineoff;
+ OffsetNumber max = PageGetMaxOffsetNumber(pg);
+ OffsetNumber lineoff;
/* look at each tuple on the page */
for (lineoff = FirstOffsetNumber; lineoff <= max; lineoff++)
@@ -202,7 +203,7 @@ GetRawDatabaseInfo(const char *name, Oid *db_id, char *path)
* Check to see if tuple is valid (committed).
*
* XXX warning, will robinson: violation of transaction semantics
- * happens right here. We cannot really determine if the tuple
+ * happens right here. We cannot really determine if the tuple
* is valid without checking transaction commit status, and the
* only way to do that at init time is to paw over pg_log by hand,
* too. Instead of checking, we assume that the inserting
@@ -222,7 +223,7 @@ GetRawDatabaseInfo(const char *name, Oid *db_id, char *path)
* handle the password relation?
*--------------------
*/
- if (! PhonyHeapTupleSatisfiesNow(tup.t_data))
+ if (!PhonyHeapTupleSatisfiesNow(tup.t_data))
continue;
/*
@@ -236,7 +237,7 @@ GetRawDatabaseInfo(const char *name, Oid *db_id, char *path)
*db_id = tup.t_data->t_oid;
pathlen = VARSIZE(&(tup_db->datpath)) - VARHDRSZ;
if (pathlen >= MAXPGPATH)
- pathlen = MAXPGPATH-1; /* pure paranoia */
+ pathlen = MAXPGPATH - 1; /* pure paranoia */
strncpy(path, VARDATA(&(tup_db->datpath)), pathlen);
path[pathlen] = '\0';
goto done;
@@ -257,7 +258,7 @@ done:
* PhonyHeapTupleSatisfiesNow --- cut-down tuple time qual test
*
* This is a simplified version of HeapTupleSatisfiesNow() that does not
- * depend on having transaction commit info available. Any transaction
+ * depend on having transaction commit info available. Any transaction
* that touched the tuple is assumed committed unless later marked invalid.
* (While we could think about more complex rules, this seems appropriate
* for examining pg_database, since both CREATE DATABASE and DROP DATABASE
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index d05bb75a294..feceb5d9500 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -4,7 +4,7 @@
* Support for grand unified configuration scheme, including SET
* command, configuration file, and command line options.
*
- * $Header: /cvsroot/pgsql/src/backend/utils/misc/guc.c,v 1.33 2001/03/16 05:44:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/misc/guc.c,v 1.34 2001/03/22 04:00:06 momjian Exp $
*
* Copyright 2000 by PostgreSQL Global Development Group
* Written by Peter Eisentraut <peter_e@gmx.net>.
@@ -36,39 +36,42 @@
/* XXX these should be in other modules' header files */
extern bool Log_connections;
-extern int CheckPointTimeout;
-extern int CommitDelay;
-extern int CommitSiblings;
+extern int CheckPointTimeout;
+extern int CommitDelay;
+extern int CommitSiblings;
extern bool FixBTree;
#ifdef ENABLE_SYSLOG
extern char *Syslog_facility;
extern char *Syslog_ident;
static bool check_facility(const char *facility);
+
#endif
/*
* Debugging options
*/
#ifdef USE_ASSERT_CHECKING
-bool assert_enabled = true;
+bool assert_enabled = true;
+
#endif
-bool Debug_print_query = false;
-bool Debug_print_plan = false;
-bool Debug_print_parse = false;
-bool Debug_print_rewritten = false;
-bool Debug_pretty_print = false;
+bool Debug_print_query = false;
+bool Debug_print_plan = false;
+bool Debug_print_parse = false;
+bool Debug_print_rewritten = false;
+bool Debug_pretty_print = false;
-bool Show_parser_stats = false;
-bool Show_planner_stats = false;
-bool Show_executor_stats = false;
-bool Show_query_stats = false; /* this is sort of all three above together */
-bool Show_btree_build_stats = false;
+bool Show_parser_stats = false;
+bool Show_planner_stats = false;
+bool Show_executor_stats = false;
+bool Show_query_stats = false; /* this is sort of all three above
+ * together */
+bool Show_btree_build_stats = false;
-bool SQL_inheritance = true;
+bool SQL_inheritance = true;
#ifndef PG_KRB_SRVTAB
-# define PG_KRB_SRVTAB ""
+#define PG_KRB_SRVTAB ""
#endif
@@ -86,39 +89,39 @@ enum config_type
struct config_generic
{
const char *name;
- GucContext context;
- void *variable;
+ GucContext context;
+ void *variable;
};
struct config_bool
{
const char *name;
- GucContext context;
- bool *variable;
- bool default_val;
+ GucContext context;
+ bool *variable;
+ bool default_val;
};
struct config_int
{
const char *name;
- GucContext context;
- int *variable;
- int default_val;
- int min;
- int max;
+ GucContext context;
+ int *variable;
+ int default_val;
+ int min;
+ int max;
};
struct config_real
{
const char *name;
- GucContext context;
- double *variable;
- double default_val;
- double min;
- double max;
+ GucContext context;
+ double *variable;
+ double default_val;
+ double min;
+ double max;
};
/*
@@ -130,11 +133,11 @@ struct config_real
struct config_string
{
const char *name;
- GucContext context;
- char **variable;
+ GucContext context;
+ char **variable;
const char *default_val;
- bool (*parse_hook)(const char *proposed);
- void (*assign_hook)(const char *newval);
+ bool (*parse_hook) (const char *proposed);
+ void (*assign_hook) (const char *newval);
};
@@ -164,189 +167,189 @@ struct config_string
/******** option names follow ********/
static struct config_bool
-ConfigureNamesBool[] =
+ ConfigureNamesBool[] =
{
- {"enable_seqscan", PGC_USERSET, &enable_seqscan, true},
- {"enable_indexscan", PGC_USERSET, &enable_indexscan, true},
- {"enable_tidscan", PGC_USERSET, &enable_tidscan, true},
- {"enable_sort", PGC_USERSET, &enable_sort, true},
- {"enable_nestloop", PGC_USERSET, &enable_nestloop, true},
- {"enable_mergejoin", PGC_USERSET, &enable_mergejoin, true},
- {"enable_hashjoin", PGC_USERSET, &enable_hashjoin, true},
-
- {"ksqo", PGC_USERSET, &_use_keyset_query_optimizer, false},
- {"geqo", PGC_USERSET, &enable_geqo, true},
-
- {"tcpip_socket", PGC_POSTMASTER, &NetServer, false},
- {"ssl", PGC_POSTMASTER, &EnableSSL, false},
- {"fsync", PGC_SIGHUP, &enableFsync, true},
- {"silent_mode", PGC_POSTMASTER, &SilentMode, false},
-
- {"log_connections", PGC_SIGHUP, &Log_connections, false},
- {"log_timestamp", PGC_SIGHUP, &Log_timestamp, false},
- {"log_pid", PGC_SIGHUP, &Log_pid, false},
+ {"enable_seqscan", PGC_USERSET, &enable_seqscan, true},
+ {"enable_indexscan", PGC_USERSET, &enable_indexscan, true},
+ {"enable_tidscan", PGC_USERSET, &enable_tidscan, true},
+ {"enable_sort", PGC_USERSET, &enable_sort, true},
+ {"enable_nestloop", PGC_USERSET, &enable_nestloop, true},
+ {"enable_mergejoin", PGC_USERSET, &enable_mergejoin, true},
+ {"enable_hashjoin", PGC_USERSET, &enable_hashjoin, true},
+
+ {"ksqo", PGC_USERSET, &_use_keyset_query_optimizer, false},
+ {"geqo", PGC_USERSET, &enable_geqo, true},
+
+ {"tcpip_socket", PGC_POSTMASTER, &NetServer, false},
+ {"ssl", PGC_POSTMASTER, &EnableSSL, false},
+ {"fsync", PGC_SIGHUP, &enableFsync, true},
+ {"silent_mode", PGC_POSTMASTER, &SilentMode, false},
+
+ {"log_connections", PGC_SIGHUP, &Log_connections, false},
+ {"log_timestamp", PGC_SIGHUP, &Log_timestamp, false},
+ {"log_pid", PGC_SIGHUP, &Log_pid, false},
#ifdef USE_ASSERT_CHECKING
- {"debug_assertions", PGC_USERSET, &assert_enabled, true},
+ {"debug_assertions", PGC_USERSET, &assert_enabled, true},
#endif
- {"debug_print_query", PGC_USERSET, &Debug_print_query, false},
- {"debug_print_parse", PGC_USERSET, &Debug_print_parse, false},
- {"debug_print_rewritten", PGC_USERSET, &Debug_print_rewritten, false},
- {"debug_print_plan", PGC_USERSET, &Debug_print_plan, false},
- {"debug_pretty_print", PGC_USERSET, &Debug_pretty_print, false},
+ {"debug_print_query", PGC_USERSET, &Debug_print_query, false},
+ {"debug_print_parse", PGC_USERSET, &Debug_print_parse, false},
+ {"debug_print_rewritten", PGC_USERSET, &Debug_print_rewritten, false},
+ {"debug_print_plan", PGC_USERSET, &Debug_print_plan, false},
+ {"debug_pretty_print", PGC_USERSET, &Debug_pretty_print, false},
- {"show_parser_stats", PGC_USERSET, &Show_parser_stats, false},
- {"show_planner_stats", PGC_USERSET, &Show_planner_stats, false},
- {"show_executor_stats", PGC_USERSET, &Show_executor_stats, false},
- {"show_query_stats", PGC_USERSET, &Show_query_stats, false},
+ {"show_parser_stats", PGC_USERSET, &Show_parser_stats, false},
+ {"show_planner_stats", PGC_USERSET, &Show_planner_stats, false},
+ {"show_executor_stats", PGC_USERSET, &Show_executor_stats, false},
+ {"show_query_stats", PGC_USERSET, &Show_query_stats, false},
#ifdef BTREE_BUILD_STATS
- {"show_btree_build_stats", PGC_SUSET, &Show_btree_build_stats, false},
+ {"show_btree_build_stats", PGC_SUSET, &Show_btree_build_stats, false},
#endif
- {"trace_notify", PGC_USERSET, &Trace_notify, false},
+ {"trace_notify", PGC_USERSET, &Trace_notify, false},
#ifdef LOCK_DEBUG
- {"trace_locks", PGC_SUSET, &Trace_locks, false},
- {"trace_userlocks", PGC_SUSET, &Trace_userlocks, false},
- {"trace_spinlocks", PGC_SUSET, &Trace_spinlocks, false},
- {"debug_deadlocks", PGC_SUSET, &Debug_deadlocks, false},
+ {"trace_locks", PGC_SUSET, &Trace_locks, false},
+ {"trace_userlocks", PGC_SUSET, &Trace_userlocks, false},
+ {"trace_spinlocks", PGC_SUSET, &Trace_spinlocks, false},
+ {"debug_deadlocks", PGC_SUSET, &Debug_deadlocks, false},
#endif
- {"hostname_lookup", PGC_SIGHUP, &HostnameLookup, false},
- {"show_source_port", PGC_SIGHUP, &ShowPortNumber, false},
+ {"hostname_lookup", PGC_SIGHUP, &HostnameLookup, false},
+ {"show_source_port", PGC_SIGHUP, &ShowPortNumber, false},
- {"sql_inheritance", PGC_USERSET, &SQL_inheritance, true},
+ {"sql_inheritance", PGC_USERSET, &SQL_inheritance, true},
- {"fixbtree", PGC_POSTMASTER, &FixBTree, true},
+ {"fixbtree", PGC_POSTMASTER, &FixBTree, true},
{NULL, 0, NULL, false}
};
static struct config_int
-ConfigureNamesInt[] =
+ ConfigureNamesInt[] =
{
- {"geqo_threshold", PGC_USERSET, &geqo_rels,
- DEFAULT_GEQO_RELS, 2, INT_MAX},
- {"geqo_pool_size", PGC_USERSET, &Geqo_pool_size,
- DEFAULT_GEQO_POOL_SIZE, 0, MAX_GEQO_POOL_SIZE},
- {"geqo_effort", PGC_USERSET, &Geqo_effort,
- 1, 1, INT_MAX},
- {"geqo_generations", PGC_USERSET, &Geqo_generations,
- 0, 0, INT_MAX},
- {"geqo_random_seed", PGC_USERSET, &Geqo_random_seed,
- -1, INT_MIN, INT_MAX},
-
- {"deadlock_timeout", PGC_POSTMASTER, &DeadlockTimeout,
- 1000, 0, INT_MAX},
+ {"geqo_threshold", PGC_USERSET, &geqo_rels,
+ DEFAULT_GEQO_RELS, 2, INT_MAX},
+ {"geqo_pool_size", PGC_USERSET, &Geqo_pool_size,
+ DEFAULT_GEQO_POOL_SIZE, 0, MAX_GEQO_POOL_SIZE},
+ {"geqo_effort", PGC_USERSET, &Geqo_effort,
+ 1, 1, INT_MAX},
+ {"geqo_generations", PGC_USERSET, &Geqo_generations,
+ 0, 0, INT_MAX},
+ {"geqo_random_seed", PGC_USERSET, &Geqo_random_seed,
+ -1, INT_MIN, INT_MAX},
+
+ {"deadlock_timeout", PGC_POSTMASTER, &DeadlockTimeout,
+ 1000, 0, INT_MAX},
#ifdef ENABLE_SYSLOG
- {"syslog", PGC_SIGHUP, &Use_syslog,
- 0, 0, 2},
+ {"syslog", PGC_SIGHUP, &Use_syslog,
+ 0, 0, 2},
#endif
/*
- * Note: There is some postprocessing done in PostmasterMain() to
- * make sure the buffers are at least twice the number of
- * backends, so the constraints here are partially unused.
+ * Note: There is some postprocessing done in PostmasterMain() to make
+ * sure the buffers are at least twice the number of backends, so the
+ * constraints here are partially unused.
*/
- {"max_connections", PGC_POSTMASTER, &MaxBackends,
- DEF_MAXBACKENDS, 1, MAXBACKENDS},
- {"shared_buffers", PGC_POSTMASTER, &NBuffers,
- DEF_NBUFFERS, 16, INT_MAX},
- {"port", PGC_POSTMASTER, &PostPortNumber,
- DEF_PGPORT, 1, 65535},
+ {"max_connections", PGC_POSTMASTER, &MaxBackends,
+ DEF_MAXBACKENDS, 1, MAXBACKENDS},
+ {"shared_buffers", PGC_POSTMASTER, &NBuffers,
+ DEF_NBUFFERS, 16, INT_MAX},
+ {"port", PGC_POSTMASTER, &PostPortNumber,
+ DEF_PGPORT, 1, 65535},
- {"sort_mem", PGC_USERSET, &SortMem,
- 512, 1, INT_MAX},
+ {"sort_mem", PGC_USERSET, &SortMem,
+ 512, 1, INT_MAX},
- {"debug_level", PGC_USERSET, &DebugLvl,
- 0, 0, 16},
+ {"debug_level", PGC_USERSET, &DebugLvl,
+ 0, 0, 16},
#ifdef LOCK_DEBUG
- {"trace_lock_oidmin", PGC_SUSET, &Trace_lock_oidmin,
- BootstrapObjectIdData, 1, INT_MAX},
- {"trace_lock_table", PGC_SUSET, &Trace_lock_table,
- 0, 0, INT_MAX},
+ {"trace_lock_oidmin", PGC_SUSET, &Trace_lock_oidmin,
+ BootstrapObjectIdData, 1, INT_MAX},
+ {"trace_lock_table", PGC_SUSET, &Trace_lock_table,
+ 0, 0, INT_MAX},
#endif
- {"max_expr_depth", PGC_USERSET, &max_expr_depth,
- DEFAULT_MAX_EXPR_DEPTH, 10, INT_MAX},
+ {"max_expr_depth", PGC_USERSET, &max_expr_depth,
+ DEFAULT_MAX_EXPR_DEPTH, 10, INT_MAX},
- {"unix_socket_permissions", PGC_POSTMASTER, &Unix_socket_permissions,
- 0777, 0000, 0777},
+ {"unix_socket_permissions", PGC_POSTMASTER, &Unix_socket_permissions,
+ 0777, 0000, 0777},
- {"checkpoint_segments", PGC_SIGHUP, &CheckPointSegments,
- 3, 1, INT_MAX},
+ {"checkpoint_segments", PGC_SIGHUP, &CheckPointSegments,
+ 3, 1, INT_MAX},
- {"checkpoint_timeout", PGC_SIGHUP, &CheckPointTimeout,
- 300, 30, 3600},
+ {"checkpoint_timeout", PGC_SIGHUP, &CheckPointTimeout,
+ 300, 30, 3600},
- {"wal_buffers", PGC_POSTMASTER, &XLOGbuffers,
- 8, 4, INT_MAX},
+ {"wal_buffers", PGC_POSTMASTER, &XLOGbuffers,
+ 8, 4, INT_MAX},
- {"wal_files", PGC_SIGHUP, &XLOGfiles,
- 0, 0, 64},
+ {"wal_files", PGC_SIGHUP, &XLOGfiles,
+ 0, 0, 64},
- {"wal_debug", PGC_SUSET, &XLOG_DEBUG,
- 0, 0, 16},
+ {"wal_debug", PGC_SUSET, &XLOG_DEBUG,
+ 0, 0, 16},
- {"commit_delay", PGC_USERSET, &CommitDelay,
- 0, 0, 100000},
+ {"commit_delay", PGC_USERSET, &CommitDelay,
+ 0, 0, 100000},
- {"commit_siblings", PGC_USERSET, &CommitSiblings,
- 5, 1, 1000},
+ {"commit_siblings", PGC_USERSET, &CommitSiblings,
+ 5, 1, 1000},
{NULL, 0, NULL, 0, 0, 0}
};
static struct config_real
-ConfigureNamesReal[] =
+ ConfigureNamesReal[] =
{
- {"effective_cache_size", PGC_USERSET, &effective_cache_size,
- DEFAULT_EFFECTIVE_CACHE_SIZE, 0, DBL_MAX},
- {"random_page_cost", PGC_USERSET, &random_page_cost,
- DEFAULT_RANDOM_PAGE_COST, 0, DBL_MAX},
- {"cpu_tuple_cost", PGC_USERSET, &cpu_tuple_cost,
- DEFAULT_CPU_TUPLE_COST, 0, DBL_MAX},
- {"cpu_index_tuple_cost", PGC_USERSET, &cpu_index_tuple_cost,
- DEFAULT_CPU_INDEX_TUPLE_COST, 0, DBL_MAX},
- {"cpu_operator_cost", PGC_USERSET, &cpu_operator_cost,
- DEFAULT_CPU_OPERATOR_COST, 0, DBL_MAX},
-
- {"geqo_selection_bias", PGC_USERSET, &Geqo_selection_bias,
- DEFAULT_GEQO_SELECTION_BIAS, MIN_GEQO_SELECTION_BIAS, MAX_GEQO_SELECTION_BIAS},
+ {"effective_cache_size", PGC_USERSET, &effective_cache_size,
+ DEFAULT_EFFECTIVE_CACHE_SIZE, 0, DBL_MAX},
+ {"random_page_cost", PGC_USERSET, &random_page_cost,
+ DEFAULT_RANDOM_PAGE_COST, 0, DBL_MAX},
+ {"cpu_tuple_cost", PGC_USERSET, &cpu_tuple_cost,
+ DEFAULT_CPU_TUPLE_COST, 0, DBL_MAX},
+ {"cpu_index_tuple_cost", PGC_USERSET, &cpu_index_tuple_cost,
+ DEFAULT_CPU_INDEX_TUPLE_COST, 0, DBL_MAX},
+ {"cpu_operator_cost", PGC_USERSET, &cpu_operator_cost,
+ DEFAULT_CPU_OPERATOR_COST, 0, DBL_MAX},
+
+ {"geqo_selection_bias", PGC_USERSET, &Geqo_selection_bias,
+ DEFAULT_GEQO_SELECTION_BIAS, MIN_GEQO_SELECTION_BIAS, MAX_GEQO_SELECTION_BIAS},
{NULL, 0, NULL, 0.0, 0.0, 0.0}
};
static struct config_string
-ConfigureNamesString[] =
+ ConfigureNamesString[] =
{
- {"krb_server_keyfile", PGC_POSTMASTER, &pg_krb_server_keyfile,
- PG_KRB_SRVTAB, NULL, NULL},
+ {"krb_server_keyfile", PGC_POSTMASTER, &pg_krb_server_keyfile,
+ PG_KRB_SRVTAB, NULL, NULL},
#ifdef ENABLE_SYSLOG
- {"syslog_facility", PGC_POSTMASTER, &Syslog_facility,
+ {"syslog_facility", PGC_POSTMASTER, &Syslog_facility,
"LOCAL0", check_facility, NULL},
- {"syslog_ident", PGC_POSTMASTER, &Syslog_ident,
+ {"syslog_ident", PGC_POSTMASTER, &Syslog_ident,
"postgres", NULL, NULL},
#endif
- {"unix_socket_group", PGC_POSTMASTER, &Unix_socket_group,
- "", NULL, NULL},
+ {"unix_socket_group", PGC_POSTMASTER, &Unix_socket_group,
+ "", NULL, NULL},
- {"unix_socket_directory", PGC_POSTMASTER, &UnixSocketDir,
- "", NULL, NULL},
+ {"unix_socket_directory", PGC_POSTMASTER, &UnixSocketDir,
+ "", NULL, NULL},
- {"virtual_host", PGC_POSTMASTER, &VirtualHost,
- "", NULL, NULL},
+ {"virtual_host", PGC_POSTMASTER, &VirtualHost,
+ "", NULL, NULL},
- {"wal_sync_method", PGC_SIGHUP, &XLOG_sync_method,
- XLOG_sync_method_default,
- check_xlog_sync_method, assign_xlog_sync_method},
+ {"wal_sync_method", PGC_SIGHUP, &XLOG_sync_method,
+ XLOG_sync_method_default,
+ check_xlog_sync_method, assign_xlog_sync_method},
{NULL, 0, NULL, NULL, NULL, NULL}
};
@@ -361,41 +364,41 @@ ConfigureNamesString[] =
* the option there.
*/
static enum config_type
-find_option(const char * name, struct config_generic ** record)
+find_option(const char *name, struct config_generic ** record)
{
- int i;
+ int i;
Assert(name);
for (i = 0; ConfigureNamesBool[i].name; i++)
- if (strcasecmp(ConfigureNamesBool[i].name, name)==0)
+ if (strcasecmp(ConfigureNamesBool[i].name, name) == 0)
{
if (record)
- *record = (struct config_generic *)&ConfigureNamesBool[i];
+ *record = (struct config_generic *) & ConfigureNamesBool[i];
return PGC_BOOL;
}
for (i = 0; ConfigureNamesInt[i].name; i++)
- if (strcasecmp(ConfigureNamesInt[i].name, name)==0)
+ if (strcasecmp(ConfigureNamesInt[i].name, name) == 0)
{
if (record)
- *record = (struct config_generic *)&ConfigureNamesInt[i];
+ *record = (struct config_generic *) & ConfigureNamesInt[i];
return PGC_INT;
}
for (i = 0; ConfigureNamesReal[i].name; i++)
- if (strcasecmp(ConfigureNamesReal[i].name, name)==0)
+ if (strcasecmp(ConfigureNamesReal[i].name, name) == 0)
{
if (record)
- *record = (struct config_generic *)&ConfigureNamesReal[i];
+ *record = (struct config_generic *) & ConfigureNamesReal[i];
return PGC_REAL;
}
for (i = 0; ConfigureNamesString[i].name; i++)
- if (strcasecmp(ConfigureNamesString[i].name, name)==0)
+ if (strcasecmp(ConfigureNamesString[i].name, name) == 0)
{
if (record)
- *record = (struct config_generic *)&ConfigureNamesString[i];
+ *record = (struct config_generic *) & ConfigureNamesString[i];
return PGC_STRING;
}
@@ -411,7 +414,7 @@ find_option(const char * name, struct config_generic ** record)
void
ResetAllOptions(void)
{
- int i;
+ int i;
for (i = 0; ConfigureNamesBool[i].name; i++)
*(ConfigureNamesBool[i].variable) = ConfigureNamesBool[i].default_val;
@@ -424,7 +427,7 @@ ResetAllOptions(void)
for (i = 0; ConfigureNamesString[i].name; i++)
{
- char * str = NULL;
+ char *str = NULL;
if (ConfigureNamesString[i].default_val)
{
@@ -448,49 +451,49 @@ ResetAllOptions(void)
* there.
*/
static bool
-parse_bool(const char * value, bool * result)
+parse_bool(const char *value, bool *result)
{
- size_t len = strlen(value);
+ size_t len = strlen(value);
- if (strncasecmp(value, "true", len)==0)
+ if (strncasecmp(value, "true", len) == 0)
{
if (result)
*result = true;
}
- else if (strncasecmp(value, "false", len)==0)
+ else if (strncasecmp(value, "false", len) == 0)
{
if (result)
*result = false;
}
- else if (strncasecmp(value, "yes", len)==0)
+ else if (strncasecmp(value, "yes", len) == 0)
{
if (result)
*result = true;
}
- else if (strncasecmp(value, "no", len)==0)
+ else if (strncasecmp(value, "no", len) == 0)
{
if (result)
*result = false;
}
- else if (strcasecmp(value, "on")==0)
+ else if (strcasecmp(value, "on") == 0)
{
if (result)
*result = true;
}
- else if (strcasecmp(value, "off")==0)
+ else if (strcasecmp(value, "off") == 0)
{
if (result)
*result = false;
}
- else if (strcasecmp(value, "1")==0)
+ else if (strcasecmp(value, "1") == 0)
{
if (result)
*result = true;
}
- else if (strcasecmp(value, "0")==0)
+ else if (strcasecmp(value, "0") == 0)
{
if (result)
*result = false;
@@ -510,17 +513,17 @@ parse_bool(const char * value, bool * result)
* value there.
*/
static bool
-parse_int(const char * value, int * result)
+parse_int(const char *value, int *result)
{
- long val;
- char * endptr;
+ long val;
+ char *endptr;
errno = 0;
val = strtol(value, &endptr, 0);
if (endptr == value || *endptr != '\0' || errno == ERANGE)
return false;
if (result)
- *result = (int)val;
+ *result = (int) val;
return true;
}
@@ -528,14 +531,14 @@ parse_int(const char * value, int * result)
/*
* Try to parse value as a floating point constant in the usual
- * format. If the value parsed okay return true, else false. If
+ * format. If the value parsed okay return true, else false. If
* result is not NULL, return the semantic value there.
*/
static bool
-parse_real(const char * value, double * result)
+parse_real(const char *value, double *result)
{
- double val;
- char * endptr;
+ double val;
+ char *endptr;
errno = 0;
val = strtod(value, &endptr);
@@ -569,12 +572,12 @@ parse_real(const char * value, double * result)
* See also SetConfigOption for an external interface.
*/
bool
-set_config_option(const char * name, const char * value, GucContext
+set_config_option(const char *name, const char *value, GucContext
context, bool DoIt)
{
- struct config_generic * record;
+ struct config_generic *record;
enum config_type type;
- int elevel;
+ int elevel;
elevel = (context == PGC_SIGHUP) ? DEBUG : ERROR;
@@ -587,8 +590,8 @@ set_config_option(const char * name, const char * value, GucContext
/*
* Check if the option can be set at this time. See guc.h for the
- * precise rules. Note that we don't want to throw errors if we're
- * in the SIGHUP context. In that case we just ignore the attempt.
+ * precise rules. Note that we don't want to throw errors if we're in
+ * the SIGHUP context. In that case we just ignore the attempt.
*/
if (record->context == PGC_POSTMASTER && context != PGC_POSTMASTER)
{
@@ -601,10 +604,13 @@ set_config_option(const char * name, const char * value, GucContext
context != PGC_POSTMASTER)
{
elog(ERROR, "'%s' cannot be changed now", name);
- /* Hmm, the idea of the SIGHUP context is "ought to be global,
- * but can be changed after postmaster start". But there's
- * nothing that prevents a crafty administrator from sending
- * SIGHUP signals to individual backends only. */
+
+ /*
+ * Hmm, the idea of the SIGHUP context is "ought to be global, but
+ * can be changed after postmaster start". But there's nothing
+ * that prevents a crafty administrator from sending SIGHUP
+ * signals to individual backends only.
+ */
}
else if (record->context == PGC_BACKEND && context != PGC_BACKEND
&& context != PGC_POSTMASTER)
@@ -616,140 +622,139 @@ set_config_option(const char * name, const char * value, GucContext
}
else if (record->context == PGC_SUSET && (context == PGC_USERSET
|| context == PGC_BACKEND))
- {
elog(ERROR, "permission denied");
- }
/*
* Evaluate value and set variable
*/
- switch(type)
+ switch (type)
{
case PGC_BOOL:
- {
- struct config_bool * conf = (struct config_bool *)record;
-
- if (value)
{
- bool boolval;
- if (!parse_bool(value, &boolval))
+ struct config_bool *conf = (struct config_bool *) record;
+
+ if (value)
{
- elog(elevel, "option '%s' requires a boolean value", name);
- return false;
+ bool boolval;
+
+ if (!parse_bool(value, &boolval))
+ {
+ elog(elevel, "option '%s' requires a boolean value", name);
+ return false;
+ }
+ if (DoIt)
+ *conf->variable = boolval;
}
- if (DoIt)
- *conf->variable = boolval;
+ else if (DoIt)
+ *conf->variable = conf->default_val;
+ break;
}
- else if (DoIt)
- *conf->variable = conf->default_val;
- break;
- }
case PGC_INT:
- {
- struct config_int * conf = (struct config_int *)record;
-
- if (value)
{
- int intval;
+ struct config_int *conf = (struct config_int *) record;
- if (!parse_int(value, &intval))
- {
- elog(elevel, "option '%s' expects an integer value", name);
- return false;
- }
- if (intval < conf->min || intval > conf->max)
+ if (value)
{
- elog(elevel, "option '%s' value %d is outside"
- " of permissible range [%d .. %d]",
- name, intval, conf->min, conf->max);
- return false;
+ int intval;
+
+ if (!parse_int(value, &intval))
+ {
+ elog(elevel, "option '%s' expects an integer value", name);
+ return false;
+ }
+ if (intval < conf->min || intval > conf->max)
+ {
+ elog(elevel, "option '%s' value %d is outside"
+ " of permissible range [%d .. %d]",
+ name, intval, conf->min, conf->max);
+ return false;
+ }
+ if (DoIt)
+ *conf->variable = intval;
}
- if (DoIt)
- *conf->variable = intval;
+ else if (DoIt)
+ *conf->variable = conf->default_val;
+ break;
}
- else if (DoIt)
- *conf->variable = conf->default_val;
- break;
- }
case PGC_REAL:
- {
- struct config_real * conf = (struct config_real *)record;
-
- if (value)
{
- double dval;
+ struct config_real *conf = (struct config_real *) record;
- if (!parse_real(value, &dval))
+ if (value)
{
- elog(elevel, "option '%s' expects a real number", name);
- return false;
- }
- if (dval < conf->min || dval > conf->max)
- {
- elog(elevel, "option '%s' value %g is outside"
- " of permissible range [%g .. %g]",
- name, dval, conf->min, conf->max);
- return false;
+ double dval;
+
+ if (!parse_real(value, &dval))
+ {
+ elog(elevel, "option '%s' expects a real number", name);
+ return false;
+ }
+ if (dval < conf->min || dval > conf->max)
+ {
+ elog(elevel, "option '%s' value %g is outside"
+ " of permissible range [%g .. %g]",
+ name, dval, conf->min, conf->max);
+ return false;
+ }
+ if (DoIt)
+ *conf->variable = dval;
}
- if (DoIt)
- *conf->variable = dval;
+ else if (DoIt)
+ *conf->variable = conf->default_val;
+ break;
}
- else if (DoIt)
- *conf->variable = conf->default_val;
- break;
- }
case PGC_STRING:
- {
- struct config_string * conf = (struct config_string *)record;
-
- if (value)
{
- if (conf->parse_hook && !(conf->parse_hook)(value))
+ struct config_string *conf = (struct config_string *) record;
+
+ if (value)
{
- elog(elevel, "invalid value for option '%s': '%s'", name, value);
- return false;
+ if (conf->parse_hook && !(conf->parse_hook) (value))
+ {
+ elog(elevel, "invalid value for option '%s': '%s'", name, value);
+ return false;
+ }
+ if (DoIt)
+ {
+ char *str;
+
+ str = strdup(value);
+ if (str == NULL)
+ {
+ elog(elevel, "out of memory");
+ return false;
+ }
+ if (conf->assign_hook)
+ (conf->assign_hook) (str);
+ if (*conf->variable)
+ free(*conf->variable);
+ *conf->variable = str;
+ }
}
- if (DoIt)
+ else if (DoIt)
{
- char * str;
+ char *str;
- str = strdup(value);
+ str = strdup(conf->default_val);
if (str == NULL)
{
elog(elevel, "out of memory");
return false;
}
if (conf->assign_hook)
- (conf->assign_hook)(str);
+ (conf->assign_hook) (str);
if (*conf->variable)
free(*conf->variable);
*conf->variable = str;
}
+ break;
}
- else if (DoIt)
- {
- char * str;
- str = strdup(conf->default_val);
- if (str == NULL)
- {
- elog(elevel, "out of memory");
- return false;
- }
- if (conf->assign_hook)
- (conf->assign_hook)(str);
- if (*conf->variable)
- free(*conf->variable);
- *conf->variable = str;
- }
- break;
- }
-
- default: ;
+ default:;
}
return true;
}
@@ -761,10 +766,10 @@ set_config_option(const char * name, const char * value, GucContext
* this is just the wrapper to be called from the outside.
*/
void
-SetConfigOption(const char * name, const char * value, GucContext
+SetConfigOption(const char *name, const char *value, GucContext
context)
{
- (void)set_config_option(name, value, context, true);
+ (void) set_config_option(name, value, context, true);
}
@@ -780,9 +785,9 @@ SetConfigOption(const char * name, const char * value, GucContext
* valid until the next call to configuration related functions.
*/
const char *
-GetConfigOption(const char * name)
+GetConfigOption(const char *name)
{
- struct config_generic * record;
+ struct config_generic *record;
static char buffer[256];
enum config_type opttype;
@@ -790,21 +795,21 @@ GetConfigOption(const char * name)
if (opttype == PGC_NONE)
elog(ERROR, "Option '%s' is not recognized", name);
- switch(opttype)
+ switch (opttype)
{
case PGC_BOOL:
- return *((struct config_bool *)record)->variable ? "on" : "off";
+ return *((struct config_bool *) record)->variable ? "on" : "off";
case PGC_INT:
- snprintf(buffer, 256, "%d", *((struct config_int *)record)->variable);
+ snprintf(buffer, 256, "%d", *((struct config_int *) record)->variable);
return buffer;
case PGC_REAL:
- snprintf(buffer, 256, "%g", *((struct config_real *)record)->variable);
+ snprintf(buffer, 256, "%g", *((struct config_real *) record)->variable);
return buffer;
case PGC_STRING:
- return *((struct config_string *)record)->variable;
+ return *((struct config_string *) record)->variable;
default:
;
@@ -822,10 +827,10 @@ GetConfigOption(const char * name)
* there is no '=' in the input string then value will be NULL.
*/
void
-ParseLongOption(const char * string, char ** name, char ** value)
+ParseLongOption(const char *string, char **name, char **value)
{
- size_t equal_pos;
- char *cp;
+ size_t equal_pos;
+ char *cp;
AssertArg(string);
AssertArg(name);
@@ -845,7 +850,8 @@ ParseLongOption(const char * string, char ** name, char ** value)
if (!*value)
elog(FATAL, "out of memory");
}
- else /* no equal sign in string */
+ else
+/* no equal sign in string */
{
*name = strdup(string);
if (!*name)
@@ -853,7 +859,7 @@ ParseLongOption(const char * string, char ** name, char ** value)
*value = NULL;
}
- for(cp = *name; *cp; cp++)
+ for (cp = *name; *cp; cp++)
if (*cp == '-')
*cp = '_';
}
@@ -864,14 +870,23 @@ ParseLongOption(const char * string, char ** name, char ** value)
static bool
check_facility(const char *facility)
{
- if (strcasecmp(facility,"LOCAL0") == 0) return true;
- if (strcasecmp(facility,"LOCAL1") == 0) return true;
- if (strcasecmp(facility,"LOCAL2") == 0) return true;
- if (strcasecmp(facility,"LOCAL3") == 0) return true;
- if (strcasecmp(facility,"LOCAL4") == 0) return true;
- if (strcasecmp(facility,"LOCAL5") == 0) return true;
- if (strcasecmp(facility,"LOCAL6") == 0) return true;
- if (strcasecmp(facility,"LOCAL7") == 0) return true;
+ if (strcasecmp(facility, "LOCAL0") == 0)
+ return true;
+ if (strcasecmp(facility, "LOCAL1") == 0)
+ return true;
+ if (strcasecmp(facility, "LOCAL2") == 0)
+ return true;
+ if (strcasecmp(facility, "LOCAL3") == 0)
+ return true;
+ if (strcasecmp(facility, "LOCAL4") == 0)
+ return true;
+ if (strcasecmp(facility, "LOCAL5") == 0)
+ return true;
+ if (strcasecmp(facility, "LOCAL6") == 0)
+ return true;
+ if (strcasecmp(facility, "LOCAL7") == 0)
+ return true;
return false;
}
+
#endif
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index a977ee11ed4..4eb49fbeb64 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -5,7 +5,7 @@
* to contain some useful information. Differs wildly across
* platforms.
*
- * $Header: /cvsroot/pgsql/src/backend/utils/misc/ps_status.c,v 1.3 2001/03/20 22:31:54 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/misc/ps_status.c,v 1.4 2001/03/22 04:00:06 momjian Exp $
*
* Copyright 2000 by PostgreSQL Global Development Group
* various details abducted from various places
@@ -18,11 +18,11 @@
#ifdef HAVE_SYS_PSTAT_H
-# include <sys/pstat.h> /* for HP-UX */
+#include <sys/pstat.h> /* for HP-UX */
#endif
#ifdef HAVE_PS_STRINGS
-# include <machine/vmparam.h> /* for old BSD */
-# include <sys/exec.h>
+#include <machine/vmparam.h> /* for old BSD */
+#include <sys/exec.h>
#endif
#include "miscadmin.h"
@@ -36,44 +36,44 @@ extern char **environ;
* Alternative ways of updating ps display:
*
* PS_USE_SETPROCTITLE
- * use the function setproctitle(const char *, ...)
- * (newer BSD systems)
+ * use the function setproctitle(const char *, ...)
+ * (newer BSD systems)
* PS_USE_PSTAT
- * use the pstat(PSTAT_SETCMD, )
- * (HPUX)
+ * use the pstat(PSTAT_SETCMD, )
+ * (HPUX)
* PS_USE_PS_STRINGS
- * assign PS_STRINGS->ps_argvstr = "string"
- * (some BSD systems)
+ * assign PS_STRINGS->ps_argvstr = "string"
+ * (some BSD systems)
* PS_USE_CHANGE_ARGV
- * assign argv[0] = "string"
- * (some other BSD systems)
+ * assign argv[0] = "string"
+ * (some other BSD systems)
* PS_USE_CLOBBER_ARGV
- * write over the argv and environment area
- * (most SysV-like systems)
+ * write over the argv and environment area
+ * (most SysV-like systems)
* PS_USE_NONE
- * don't update ps display
- * (This is the default, as it is safest.)
+ * don't update ps display
+ * (This is the default, as it is safest.)
*/
#if defined(HAVE_SETPROCTITLE)
-# define PS_USE_SETPROCTITLE
+#define PS_USE_SETPROCTITLE
#elif defined(HAVE_PSTAT) && defined(PSTAT_SETCMD)
-# define PS_USE_PSTAT
+#define PS_USE_PSTAT
#elif defined(HAVE_PS_STRINGS)
-# define PS_USE_PS_STRINGS
+#define PS_USE_PS_STRINGS
#elif defined(BSD) || defined(__bsdi__) || defined(__hurd__)
-# define PS_USE_CHANGE_ARGV
+#define PS_USE_CHANGE_ARGV
#elif defined(__linux__) || defined(_AIX4) || defined(_AIX3) || defined(__sgi) || (defined(sun) && !defined(BSD)) || defined(ultrix) || defined(__ksr__) || defined(__osf__) || defined(__QNX__) || defined(__svr4__) || defined(__svr5__)
-# define PS_USE_CLOBBER_ARGV
+#define PS_USE_CLOBBER_ARGV
#else
-# define PS_USE_NONE
+#define PS_USE_NONE
#endif
/* Different systems want the buffer padded differently */
#if defined(_AIX3) || defined(__linux__) || defined(__QNX__) || defined(__svr4__)
-# define PS_PADDING '\0'
+#define PS_PADDING '\0'
#else
-# define PS_PADDING ' '
+#define PS_PADDING ' '
#endif
@@ -83,12 +83,13 @@ extern char **environ;
static char ps_buffer[PS_BUFFER_SIZE];
static const size_t ps_buffer_size = PS_BUFFER_SIZE;
-#else /* PS_USE_CLOBBER_ARGV */
-static char * ps_buffer; /* will point to argv area */
-static size_t ps_buffer_size; /* space determined at run time */
-#endif /* PS_USE_CLOBBER_ARGV */
+#else /* PS_USE_CLOBBER_ARGV */
+static char *ps_buffer; /* will point to argv area */
+static size_t ps_buffer_size; /* space determined at run time */
-static size_t ps_buffer_fixed_size; /* size of the constant prefix */
+#endif /* PS_USE_CLOBBER_ARGV */
+
+static size_t ps_buffer_fixed_size; /* size of the constant prefix */
@@ -97,30 +98,31 @@ static size_t ps_buffer_fixed_size; /* size of the constant prefix */
*/
void
init_ps_display(int argc, char *argv[],
- const char * username, const char * dbname,
- const char * host_info)
+ const char *username, const char *dbname,
+ const char *host_info)
{
#ifndef PS_USE_NONE
- Assert(username);
- Assert(dbname);
+ Assert(username);
+ Assert(dbname);
/* no ps display for stand-alone backend */
- if (!IsUnderPostmaster)
- return;
+ if (!IsUnderPostmaster)
+ return;
-# ifdef PS_USE_CHANGE_ARGV
+#ifdef PS_USE_CHANGE_ARGV
argv[0] = ps_buffer;
argv[1] = NULL;
-# endif /* PS_USE_CHANGE_ARGV */
+#endif /* PS_USE_CHANGE_ARGV */
-# ifdef PS_USE_CLOBBER_ARGV
- /*
- * If we're going to overwrite the argv area, count the space.
- */
+#ifdef PS_USE_CLOBBER_ARGV
+
+ /*
+ * If we're going to overwrite the argv area, count the space.
+ */
{
- char * end_of_area = NULL;
- char **new_environ;
- int i;
+ char *end_of_area = NULL;
+ char **new_environ;
+ int i;
/*
* check for contiguous argv strings
@@ -153,31 +155,34 @@ init_ps_display(int argc, char *argv[],
*/
for (i = 0; environ[i] != NULL; i++)
;
- new_environ = malloc(sizeof (char *) * (i + 1));
+ new_environ = malloc(sizeof(char *) * (i + 1));
for (i = 0; environ[i] != NULL; i++)
- new_environ[i] = strdup(environ[i]);
+ new_environ[i] = strdup(environ[i]);
new_environ[i] = NULL;
environ = new_environ;
}
-# endif /* PS_USE_CLOBBER_ARGV */
+#endif /* PS_USE_CLOBBER_ARGV */
/*
* Make fixed prefix
*/
-# ifdef PS_USE_SETPROCTITLE
- /* apparently setproctitle() already adds a `progname:' prefix to
- * the ps line */
+#ifdef PS_USE_SETPROCTITLE
+
+ /*
+ * apparently setproctitle() already adds a `progname:' prefix to the
+ * ps line
+ */
snprintf(ps_buffer, ps_buffer_size,
"%s %s %s ",
username, dbname, host_info);
-# else
+#else
snprintf(ps_buffer, ps_buffer_size,
"postgres: %s %s %s ",
username, dbname, host_info);
-# endif
+#endif
- ps_buffer_fixed_size = strlen(ps_buffer);
-#endif /* not PS_USE_NONE */
+ ps_buffer_fixed_size = strlen(ps_buffer);
+#endif /* not PS_USE_NONE */
}
@@ -187,18 +192,18 @@ init_ps_display(int argc, char *argv[],
* indication of what you're currently doing passed in the argument.
*/
void
-set_ps_display(const char * value)
+set_ps_display(const char *value)
{
#ifndef PS_USE_NONE
/* no ps display for stand-alone backend */
if (!IsUnderPostmaster)
return;
-# ifdef PS_USE_CLOBBER_ARGV
+#ifdef PS_USE_CLOBBER_ARGV
/* If ps_buffer is a pointer, it might still be null */
if (!ps_buffer)
return;
-# endif
+#endif
/* Update ps_buffer to contain both fixed part and value */
StrNCpy(ps_buffer + ps_buffer_fixed_size, value,
@@ -206,42 +211,43 @@ set_ps_display(const char * value)
/* Transmit new setting to kernel, if necessary */
-# ifdef PS_USE_SETPROCTITLE
+#ifdef PS_USE_SETPROCTITLE
setproctitle("%s", ps_buffer);
-# endif
+#endif
-# ifdef PS_USE_PSTAT
- {
- union pstun pst;
+#ifdef PS_USE_PSTAT
+ {
+ union pstun pst;
- pst.pst_command = ps_buffer;
- pstat(PSTAT_SETCMD, pst, strlen(ps_buffer), 0, 0);
- }
-# endif /* PS_USE_PSTAT */
+ pst.pst_command = ps_buffer;
+ pstat(PSTAT_SETCMD, pst, strlen(ps_buffer), 0, 0);
+ }
+#endif /* PS_USE_PSTAT */
-# ifdef PS_USE_PS_STRINGS
- PS_STRINGS->ps_nargvstr = 1;
- PS_STRINGS->ps_argvstr = ps_buffer;
-# endif /* PS_USE_PS_STRINGS */
+#ifdef PS_USE_PS_STRINGS
+ PS_STRINGS->ps_nargvstr = 1;
+ PS_STRINGS->ps_argvstr = ps_buffer;
+#endif /* PS_USE_PS_STRINGS */
-# ifdef PS_USE_CLOBBER_ARGV
- {
- char * cp;
- /* pad unused memory */
- for(cp = ps_buffer + strlen(ps_buffer);
- cp < ps_buffer + ps_buffer_size;
- cp++)
- *cp = PS_PADDING;
- }
-# endif /* PS_USE_CLOBBER_ARGV */
+#ifdef PS_USE_CLOBBER_ARGV
+ {
+ char *cp;
+
+ /* pad unused memory */
+ for (cp = ps_buffer + strlen(ps_buffer);
+ cp < ps_buffer + ps_buffer_size;
+ cp++)
+ *cp = PS_PADDING;
+ }
+#endif /* PS_USE_CLOBBER_ARGV */
-#endif /* not PS_USE_NONE */
+#endif /* not PS_USE_NONE */
}
/*
* Returns what's currently in the ps display, in case someone needs
- * it. Note that only the variable part is returned.
+ * it. Note that only the variable part is returned.
*/
const char *
get_ps_display(void)
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 666d8fd83f2..f66e021ad51 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/mmgr/aset.c,v 1.40 2001/03/19 22:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/mmgr/aset.c,v 1.41 2001/03/22 04:00:07 momjian Exp $
*
* NOTE:
* This is a new (Feb. 05, 1999) implementation of the allocation set
@@ -38,7 +38,7 @@
* request, even if it was much larger than necessary. This led to more
* and more wasted space in allocated chunks over time. To fix, get rid
* of the midrange behavior: we now handle only "small" power-of-2-size
- * chunks as chunks. Anything "large" is passed off to malloc(). Change
+ * chunks as chunks. Anything "large" is passed off to malloc(). Change
* the number of freelists to change the small/large boundary.
*
*
@@ -54,7 +54,7 @@
* Thus, if someone makes the common error of writing past what they've
* requested, the problem is likely to go unnoticed ... until the day when
* there *isn't* any wasted space, perhaps because of different memory
- * alignment on a new platform, or some other effect. To catch this sort
+ * alignment on a new platform, or some other effect. To catch this sort
* of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
* the requested space whenever the request is less than the actual chunk
* size, and verifies that the byte is undamaged when the chunk is freed.
@@ -112,7 +112,7 @@
#define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
#define ALLOC_CHUNKHDRSZ MAXALIGN(sizeof(AllocChunkData))
-typedef struct AllocBlockData *AllocBlock; /* forward reference */
+typedef struct AllocBlockData *AllocBlock; /* forward reference */
typedef struct AllocChunkData *AllocChunk;
/*
@@ -126,15 +126,15 @@ typedef void *AllocPointer;
*/
typedef struct AllocSetContext
{
- MemoryContextData header; /* Standard memory-context fields */
+ MemoryContextData header; /* Standard memory-context fields */
/* Info about storage allocated in this context: */
- AllocBlock blocks; /* head of list of blocks in this set */
- AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
+ AllocBlock blocks; /* head of list of blocks in this set */
+ AllocChunk freelist[ALLOCSET_NUM_FREELISTS]; /* free chunk lists */
/* Allocation parameters for this context: */
- Size initBlockSize; /* initial block size */
- Size maxBlockSize; /* maximum block size */
- AllocBlock keeper; /* if not NULL, keep this block
- * over resets */
+ Size initBlockSize; /* initial block size */
+ Size maxBlockSize; /* maximum block size */
+ AllocBlock keeper; /* if not NULL, keep this block over
+ * resets */
} AllocSetContext;
typedef AllocSetContext *AllocSet;
@@ -204,8 +204,10 @@ static void *AllocSetRealloc(MemoryContext context, void *pointer, Size size);
static void AllocSetInit(MemoryContext context);
static void AllocSetReset(MemoryContext context);
static void AllocSetDelete(MemoryContext context);
+
#ifdef MEMORY_CONTEXT_CHECKING
static void AllocSetCheck(MemoryContext context);
+
#endif
static void AllocSetStats(MemoryContext context);
@@ -240,7 +242,7 @@ static MemoryContextMethods AllocSetMethods = {
#else
#define AllocFreeInfo(_cxt, _chunk)
#define AllocAllocInfo(_cxt, _chunk)
-#endif
+#endif
/* ----------
* AllocSetFreeIndex -
@@ -300,6 +302,7 @@ AllocSetContextCreate(MemoryContext parent,
&AllocSetMethods,
parent,
name);
+
/*
* Make sure alloc parameters are reasonable, and save them.
*
@@ -356,9 +359,10 @@ AllocSetContextCreate(MemoryContext parent,
static void
AllocSetInit(MemoryContext context)
{
+
/*
- * Since MemoryContextCreate already zeroed the context node,
- * we don't have to do anything here: it's already OK.
+ * Since MemoryContextCreate already zeroed the context node, we don't
+ * have to do anything here: it's already OK.
*/
}
@@ -397,7 +401,7 @@ AllocSetReset(MemoryContext context)
if (block == set->keeper)
{
/* Reset the block, but don't return it to malloc */
- char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
+ char *datastart = ((char *) block) + ALLOC_BLOCKHDRSZ;
#ifdef CLOBBER_FREED_MEMORY
/* Wipe freed memory for debugging purposes */
@@ -505,8 +509,8 @@ AllocSetAlloc(MemoryContext context, Size size)
#endif
/*
- * Stick the new block underneath the active allocation block,
- * so that we don't lose the use of the space remaining therein.
+ * Stick the new block underneath the active allocation block, so
+ * that we don't lose the use of the space remaining therein.
*/
if (set->blocks != NULL)
{
@@ -518,7 +522,7 @@ AllocSetAlloc(MemoryContext context, Size size)
block->next = NULL;
set->blocks = block;
}
-
+
AllocAllocInfo(set, chunk);
return AllocChunkGetPointer(chunk);
}
@@ -568,8 +572,8 @@ AllocSetAlloc(MemoryContext context, Size size)
Assert(chunk_size >= size);
/*
- * If there is enough room in the active allocation block,
- * we will put the chunk into that block. Else must start a new one.
+ * If there is enough room in the active allocation block, we will put
+ * the chunk into that block. Else must start a new one.
*/
if ((block = set->blocks) != NULL)
{
@@ -577,6 +581,7 @@ AllocSetAlloc(MemoryContext context, Size size)
if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
{
+
/*
* The existing active (top) block does not have enough room
* for the requested allocation, but it might still have a
@@ -591,17 +596,18 @@ AllocSetAlloc(MemoryContext context, Size size)
*/
while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
{
- Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
- int a_fidx = AllocSetFreeIndex(availchunk);
+ Size availchunk = availspace - ALLOC_CHUNKHDRSZ;
+ int a_fidx = AllocSetFreeIndex(availchunk);
/*
- * In most cases, we'll get back the index of the next larger
- * freelist than the one we need to put this chunk on. The
- * exception is when availchunk is exactly a power of 2.
+ * In most cases, we'll get back the index of the next
+ * larger freelist than the one we need to put this chunk
+ * on. The exception is when availchunk is exactly a
+ * power of 2.
*/
if (availchunk != (1 << (a_fidx + ALLOC_MINBITS)))
{
- a_fidx--;
+ a_fidx--;
Assert(a_fidx >= 0);
availchunk = (1 << (a_fidx + ALLOC_MINBITS));
}
@@ -613,7 +619,7 @@ AllocSetAlloc(MemoryContext context, Size size)
chunk->size = availchunk;
#ifdef MEMORY_CONTEXT_CHECKING
- chunk->requested_size = 0; /* mark it free */
+ chunk->requested_size = 0; /* mark it free */
#endif
chunk->aset = (void *) set->freelist[a_fidx];
set->freelist[a_fidx] = chunk;
@@ -629,7 +635,7 @@ AllocSetAlloc(MemoryContext context, Size size)
*/
if (block == NULL)
{
- Size required_size;
+ Size required_size;
if (set->blocks == NULL)
{
@@ -687,7 +693,7 @@ AllocSetAlloc(MemoryContext context, Size size)
elog(ERROR, "Memory exhausted in AllocSetAlloc(%lu)",
(unsigned long) size);
}
-
+
block->aset = set;
block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
block->endptr = ((char *) block) + blksize;
@@ -711,7 +717,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/* set mark to catch clobber of "unused" space */
if (size < chunk->size)
((char *) AllocChunkGetPointer(chunk))[size] = 0x7E;
-#endif
+#endif
AllocAllocInfo(set, chunk);
return AllocChunkGetPointer(chunk);
@@ -735,10 +741,11 @@ AllocSetFree(MemoryContext context, void *pointer)
if (((char *) pointer)[chunk->requested_size] != 0x7E)
elog(NOTICE, "AllocSetFree: detected write past chunk end in %s %p",
set->header.name, chunk);
-#endif
+#endif
if (chunk->size > ALLOC_CHUNK_LIMIT)
{
+
/*
* Big chunks are certain to have been allocated as single-chunk
* blocks. Find the containing block and return it to malloc().
@@ -786,7 +793,7 @@ AllocSetFree(MemoryContext context, void *pointer)
#ifdef MEMORY_CONTEXT_CHECKING
/* Reset requested_size to 0 in chunks that are on freelist */
chunk->requested_size = 0;
-#endif
+#endif
set->freelist[fidx] = chunk;
}
}
@@ -804,7 +811,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
AllocChunk chunk = AllocPointerGetChunk(pointer);
Size oldsize = chunk->size;
-#ifdef MEMORY_CONTEXT_CHECKING
+#ifdef MEMORY_CONTEXT_CHECKING
/* Test for someone scribbling on unused space in chunk */
if (chunk->requested_size < oldsize)
if (((char *) pointer)[chunk->requested_size] != 0x7E)
@@ -819,7 +826,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
*/
if (oldsize >= size)
{
-#ifdef MEMORY_CONTEXT_CHECKING
+#ifdef MEMORY_CONTEXT_CHECKING
chunk->requested_size = size;
/* set mark to catch clobber of "unused" space */
if (size < oldsize)
@@ -830,10 +837,11 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > ALLOC_CHUNK_LIMIT)
{
+
/*
- * The chunk must been allocated as a single-chunk block. Find the
- * containing block and use realloc() to make it bigger with minimum
- * space wastage.
+ * The chunk must been allocated as a single-chunk block. Find
+ * the containing block and use realloc() to make it bigger with
+ * minimum space wastage.
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
@@ -873,7 +881,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
else
prevblock->next = block;
chunk->size = chksize;
-
+
#ifdef MEMORY_CONTEXT_CHECKING
chunk->requested_size = size;
/* set mark to catch clobber of "unused" space */
@@ -885,14 +893,15 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
}
else
{
+
/*
* Small-chunk case. If the chunk is the last one in its block,
* there might be enough free space after it that we can just
- * enlarge the chunk in-place. It's relatively painful to find
+ * enlarge the chunk in-place. It's relatively painful to find
* the containing block in the general case, but we can detect
- * last-ness quite cheaply for the typical case where the chunk
- * is in the active (topmost) allocation block. (At least with
- * the regression tests and code as of 1/2001, realloc'ing the last
+ * last-ness quite cheaply for the typical case where the chunk is
+ * in the active (topmost) allocation block. (At least with the
+ * regression tests and code as of 1/2001, realloc'ing the last
* chunk of a non-topmost block hardly ever happens, so it's not
* worth scanning the block list to catch that case.)
*
@@ -908,12 +917,12 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
chunk_end = (char *) chunk + (oldsize + ALLOC_CHUNKHDRSZ);
if (chunk_end == block->freeptr)
- {
+ {
/* OK, it's last in block ... is there room? */
- Size freespace = block->endptr - block->freeptr;
- int fidx;
- Size newsize;
- Size delta;
+ Size freespace = block->endptr - block->freeptr;
+ int fidx;
+ Size newsize;
+ Size delta;
fidx = AllocSetFreeIndex(size);
newsize = 1 << (fidx + ALLOC_MINBITS);
@@ -924,7 +933,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
/* Yes, so just enlarge the chunk. */
block->freeptr += delta;
chunk->size += delta;
-#ifdef MEMORY_CONTEXT_CHECKING
+#ifdef MEMORY_CONTEXT_CHECKING
chunk->requested_size = size;
/* set mark to catch clobber of "unused" space */
if (size < chunk->size)
@@ -982,7 +991,7 @@ AllocSetStats(MemoryContext context)
}
}
fprintf(stderr,
- "%s: %ld total in %ld blocks; %ld free (%ld chunks); %ld used\n",
+ "%s: %ld total in %ld blocks; %ld free (%ld chunks); %ld used\n",
set->header.name, totalspace, nblocks, freespace, nchunks,
totalspace - freespace);
}
@@ -990,7 +999,7 @@ AllocSetStats(MemoryContext context)
#ifdef MEMORY_CONTEXT_CHECKING
-/*
+/*
* AllocSetCheck
* Walk through chunks and check consistency of memory.
*
@@ -998,19 +1007,19 @@ AllocSetStats(MemoryContext context)
* find yourself in an infinite loop when trouble occurs, because this
* routine will be entered again when elog cleanup tries to release memory!
*/
-static void
+static void
AllocSetCheck(MemoryContext context)
{
- AllocSet set = (AllocSet) context;
- char *name = set->header.name;
+ AllocSet set = (AllocSet) context;
+ char *name = set->header.name;
AllocBlock block;
for (block = set->blocks; block != NULL; block = block->next)
- {
- char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
- long blk_used = block->freeptr - bpoz;
- long blk_data = 0;
- long nchunks = 0;
+ {
+ char *bpoz = ((char *) block) + ALLOC_BLOCKHDRSZ;
+ long blk_used = block->freeptr - bpoz;
+ long blk_data = 0;
+ long nchunks = 0;
/*
* Empty block - empty can be keeper-block only
@@ -1018,44 +1027,44 @@ AllocSetCheck(MemoryContext context)
if (!blk_used)
{
if (set->keeper != block)
- elog(NOTICE, "AllocSetCheck: %s: empty block %p",
+ elog(NOTICE, "AllocSetCheck: %s: empty block %p",
name, block);
- }
-
+ }
+
/*
* Chunk walker
- */
+ */
while (bpoz < block->freeptr)
{
AllocChunk chunk = (AllocChunk) bpoz;
Size chsize,
dsize;
char *chdata_end;
-
- chsize = chunk->size; /* aligned chunk size */
- dsize = chunk->requested_size; /* real data */
+
+ chsize = chunk->size; /* aligned chunk size */
+ dsize = chunk->requested_size; /* real data */
chdata_end = ((char *) chunk) + (ALLOC_CHUNKHDRSZ + dsize);
-
+
/*
* Check chunk size
*/
if (dsize > chsize)
elog(NOTICE, "AllocSetCheck: %s: req size > alloc size for chunk %p in block %p",
- name, chunk, block);
+ name, chunk, block);
if (chsize < (1 << ALLOC_MINBITS))
elog(NOTICE, "AllocSetCheck: %s: bad size %lu for chunk %p in block %p",
name, (unsigned long) chsize, chunk, block);
-
+
/* single-chunk block? */
if (chsize > ALLOC_CHUNK_LIMIT &&
- chsize + ALLOC_CHUNKHDRSZ != blk_used)
+ chsize + ALLOC_CHUNKHDRSZ != blk_used)
elog(NOTICE, "AllocSetCheck: %s: bad single-chunk %p in block %p",
name, chunk, block);
/*
- * If chunk is allocated, check for correct aset pointer.
- * (If it's free, the aset is the freelist pointer, which we
- * can't check as easily...)
+ * If chunk is allocated, check for correct aset pointer. (If
+ * it's free, the aset is the freelist pointer, which we can't
+ * check as easily...)
*/
if (dsize > 0 && chunk->aset != (void *) set)
elog(NOTICE, "AllocSetCheck: %s: bogus aset link in block %p, chunk %p",
@@ -1063,14 +1072,14 @@ AllocSetCheck(MemoryContext context)
/*
* Check for overwrite of "unallocated" space in chunk
- */
+ */
if (dsize > 0 && dsize < chsize && *chdata_end != 0x7E)
elog(NOTICE, "AllocSetCheck: %s: detected write past chunk end in block %p, chunk %p",
name, block, chunk);
-
+
blk_data += chsize;
nchunks++;
-
+
bpoz += ALLOC_CHUNKHDRSZ + chsize;
}
@@ -1080,4 +1089,4 @@ AllocSetCheck(MemoryContext context)
}
}
-#endif /* MEMORY_CONTEXT_CHECKING */
+#endif /* MEMORY_CONTEXT_CHECKING */
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index ab8434a0581..402e4010f76 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/mmgr/mcxt.c,v 1.27 2001/02/06 01:53:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/mmgr/mcxt.c,v 1.28 2001/03/22 04:00:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -72,9 +72,10 @@ void
MemoryContextInit(void)
{
AssertState(TopMemoryContext == NULL);
+
/*
- * Initialize TopMemoryContext as an AllocSetContext with slow
- * growth rate --- we don't really expect much to be allocated in it.
+ * Initialize TopMemoryContext as an AllocSetContext with slow growth
+ * rate --- we don't really expect much to be allocated in it.
*
* (There is special-case code in MemoryContextCreate() for this call.)
*/
@@ -83,18 +84,20 @@ MemoryContextInit(void)
8 * 1024,
8 * 1024,
8 * 1024);
+
/*
- * Not having any other place to point CurrentMemoryContext,
- * make it point to TopMemoryContext. Caller should change this soon!
+ * Not having any other place to point CurrentMemoryContext, make it
+ * point to TopMemoryContext. Caller should change this soon!
*/
CurrentMemoryContext = TopMemoryContext;
+
/*
- * Initialize ErrorContext as an AllocSetContext with slow
- * growth rate --- we don't really expect much to be allocated in it.
- * More to the point, require it to contain at least 8K at all times.
- * This is the only case where retained memory in a context is
- * *essential* --- we want to be sure ErrorContext still has some
- * memory even if we've run out elsewhere!
+ * Initialize ErrorContext as an AllocSetContext with slow growth rate
+ * --- we don't really expect much to be allocated in it. More to the
+ * point, require it to contain at least 8K at all times. This is the
+ * only case where retained memory in a context is *essential* --- we
+ * want to be sure ErrorContext still has some memory even if we've
+ * run out elsewhere!
*/
ErrorContext = AllocSetContextCreate(TopMemoryContext,
"ErrorContext",
@@ -129,14 +132,12 @@ MemoryContextReset(MemoryContext context)
void
MemoryContextResetChildren(MemoryContext context)
{
- MemoryContext child;
+ MemoryContext child;
AssertArg(MemoryContextIsValid(context));
for (child = context->firstchild; child != NULL; child = child->nextchild)
- {
MemoryContextReset(child);
- }
}
/*
@@ -146,7 +147,7 @@ MemoryContextResetChildren(MemoryContext context)
*
* The type-specific delete routine removes all subsidiary storage
* for the context, but we have to delete the context node itself,
- * as well as recurse to get the children. We must also delink the
+ * as well as recurse to get the children. We must also delink the
* node from its parent, if it has one.
*/
void
@@ -159,23 +160,21 @@ MemoryContextDelete(MemoryContext context)
Assert(context != CurrentMemoryContext);
MemoryContextDeleteChildren(context);
+
/*
- * We delink the context from its parent before deleting it,
- * so that if there's an error we won't have deleted/busted
- * contexts still attached to the context tree. Better a leak
- * than a crash.
+ * We delink the context from its parent before deleting it, so that
+ * if there's an error we won't have deleted/busted contexts still
+ * attached to the context tree. Better a leak than a crash.
*/
if (context->parent)
{
- MemoryContext parent = context->parent;
+ MemoryContext parent = context->parent;
if (context == parent->firstchild)
- {
parent->firstchild = context->nextchild;
- }
else
{
- MemoryContext child;
+ MemoryContext child;
for (child = parent->firstchild; child; child = child->nextchild)
{
@@ -200,14 +199,13 @@ void
MemoryContextDeleteChildren(MemoryContext context)
{
AssertArg(MemoryContextIsValid(context));
+
/*
- * MemoryContextDelete will delink the child from me,
- * so just iterate as long as there is a child.
+ * MemoryContextDelete will delink the child from me, so just iterate
+ * as long as there is a child.
*/
while (context->firstchild != NULL)
- {
MemoryContextDelete(context->firstchild);
- }
}
/*
@@ -237,15 +235,13 @@ MemoryContextResetAndDeleteChildren(MemoryContext context)
void
MemoryContextStats(MemoryContext context)
{
- MemoryContext child;
+ MemoryContext child;
AssertArg(MemoryContextIsValid(context));
(*context->methods->stats) (context);
for (child = context->firstchild; child != NULL; child = child->nextchild)
- {
MemoryContextStats(child);
- }
}
@@ -253,22 +249,21 @@ MemoryContextStats(MemoryContext context)
* MemoryContextCheck
* Check all chunks in the named context.
*
- * This is just a debugging utility, so it's not fancy.
+ * This is just a debugging utility, so it's not fancy.
*/
#ifdef MEMORY_CONTEXT_CHECKING
void
MemoryContextCheck(MemoryContext context)
{
- MemoryContext child;
+ MemoryContext child;
AssertArg(MemoryContextIsValid(context));
(*context->methods->check) (context);
for (child = context->firstchild; child != NULL; child = child->nextchild)
- {
MemoryContextCheck(child);
- }
}
+
#endif
/*
@@ -285,24 +280,26 @@ MemoryContextCheck(MemoryContext context)
bool
MemoryContextContains(MemoryContext context, void *pointer)
{
- StandardChunkHeader *header;
+ StandardChunkHeader *header;
/*
* Try to detect bogus pointers handed to us, poorly though we can.
- * Presumably, a pointer that isn't MAXALIGNED isn't pointing at
- * an allocated chunk.
+ * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
+ * allocated chunk.
*/
if (pointer == NULL || pointer != (void *) MAXALIGN(pointer))
return false;
+
/*
* OK, it's probably safe to look at the chunk header.
*/
header = (StandardChunkHeader *)
((char *) pointer - STANDARDCHUNKHEADERSIZE);
+
/*
* If the context link doesn't match then we certainly have a
- * non-member chunk. Also check for a reasonable-looking size
- * as extra guard against being fooled by bogus pointers.
+ * non-member chunk. Also check for a reasonable-looking size as
+ * extra guard against being fooled by bogus pointers.
*/
if (header->context == context && AllocSizeIsValid(header->size))
return true;
@@ -347,7 +344,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
*
* Normally, the context node and the name are allocated from
* TopMemoryContext (NOT from the parent context, since the node must
- * survive resets of its parent context!). However, this routine is itself
+ * survive resets of its parent context!). However, this routine is itself
* used to create TopMemoryContext! If we see that TopMemoryContext is NULL,
* we assume we are creating TopMemoryContext and use malloc() to allocate
* the node.
@@ -363,8 +360,8 @@ MemoryContextCreate(NodeTag tag, Size size,
MemoryContext parent,
const char *name)
{
- MemoryContext node;
- Size needed = size + strlen(name) + 1;
+ MemoryContext node;
+ Size needed = size + strlen(name) + 1;
/* Get space for node and name */
if (TopMemoryContext != NULL)
@@ -431,15 +428,16 @@ MemoryContextAlloc(MemoryContext context, Size size)
void
pfree(void *pointer)
{
- StandardChunkHeader *header;
+ StandardChunkHeader *header;
/*
* Try to detect bogus pointers handed to us, poorly though we can.
- * Presumably, a pointer that isn't MAXALIGNED isn't pointing at
- * an allocated chunk.
+ * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
+ * allocated chunk.
*/
Assert(pointer != NULL);
Assert(pointer == (void *) MAXALIGN(pointer));
+
/*
* OK, it's probably safe to look at the chunk header.
*/
@@ -448,7 +446,7 @@ pfree(void *pointer)
AssertArg(MemoryContextIsValid(header->context));
- (*header->context->methods->free_p) (header->context, pointer);
+ (*header->context->methods->free_p) (header->context, pointer);
}
/*
@@ -458,15 +456,16 @@ pfree(void *pointer)
void *
repalloc(void *pointer, Size size)
{
- StandardChunkHeader *header;
+ StandardChunkHeader *header;
/*
* Try to detect bogus pointers handed to us, poorly though we can.
- * Presumably, a pointer that isn't MAXALIGNED isn't pointing at
- * an allocated chunk.
+ * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
+ * allocated chunk.
*/
Assert(pointer != NULL);
Assert(pointer == (void *) MAXALIGN(pointer));
+
/*
* OK, it's probably safe to look at the chunk header.
*/
@@ -479,7 +478,7 @@ repalloc(void *pointer, Size size)
elog(ERROR, "repalloc: invalid request size %lu",
(unsigned long) size);
- return (*header->context->methods->realloc) (header->context,
+ return (*header->context->methods->realloc) (header->context,
pointer, size);
}
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 63d3ed363cb..a5534dc1cde 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.40 2001/02/27 22:07:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/mmgr/portalmem.c,v 1.41 2001/03/22 04:00:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -126,8 +126,8 @@ EnablePortalManager(void)
ctl.datasize = sizeof(Portal);
/*
- * use PORTALS_PER_USER, defined in utils/portal.h as a guess of
- * how many hash table entries to create, initially
+ * use PORTALS_PER_USER, defined in utils/portal.h as a guess of how
+ * many hash table entries to create, initially
*/
PortalHashTable = hash_create(PORTALS_PER_USER * 3, &ctl, HASH_ELEM);
}
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 6dbcc701290..38f4c2fc91e 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -78,7 +78,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/sort/tuplesort.c,v 1.13 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/sort/tuplesort.c,v 1.14 2001/03/22 04:00:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1851,7 +1851,7 @@ comparetup_index(Tuplesortstate *state, const void *a, const void *b)
else
{
compare = DatumGetInt32(FunctionCall2(&entry->sk_func,
- attrDatum1, attrDatum2));
+ attrDatum1, attrDatum2));
}
if (compare != 0)
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 48a8fbb0716..7621e572978 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -11,7 +11,7 @@
* space limit specified by the caller.
*
* The (approximate) amount of memory allowed to the tuplestore is specified
- * in kilobytes by the caller. We absorb tuples and simply store them in an
+ * in kilobytes by the caller. We absorb tuples and simply store them in an
* in-memory array as long as we haven't exceeded maxKBytes. If we reach the
* end of the input without exceeding maxKBytes, we just return tuples during
* the read phase by scanning the tuple array sequentially. If we do exceed
@@ -26,7 +26,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/sort/tuplestore.c,v 1.2 2001/01/24 19:43:18 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/sort/tuplestore.c,v 1.3 2001/03/22 04:00:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@
#include "utils/tuplestore.h"
/*
- * Possible states of a Tuplestore object. These denote the states that
+ * Possible states of a Tuplestore object. These denote the states that
* persist between calls of Tuplestore routines.
*/
typedef enum
@@ -66,12 +66,12 @@ struct Tuplestorestate
* know it. They are set up by the tuplestore_begin_xxx routines.
*
* (Although tuplestore.c currently only supports heap tuples, I've
- * copied this part of tuplesort.c so that extension to other kinds
- * of objects will be easy if it's ever needed.)
+ * copied this part of tuplesort.c so that extension to other kinds of
+ * objects will be easy if it's ever needed.)
*
- * Function to copy a supplied input tuple into palloc'd space. (NB:
- * we assume that a single pfree() is enough to release the tuple
- * later, so the representation must be "flat" in one palloc chunk.)
+ * Function to copy a supplied input tuple into palloc'd space. (NB: we
+ * assume that a single pfree() is enough to release the tuple later,
+ * so the representation must be "flat" in one palloc chunk.)
* state->availMem must be decreased by the amount of space used.
*/
void *(*copytup) (Tuplestorestate *state, void *tup);
@@ -95,7 +95,8 @@ struct Tuplestorestate
/*
* This array holds pointers to tuples in memory if we are in state
- * INITIAL or READMEM. In states WRITEFILE and READFILE it's not used.
+ * INITIAL or READMEM. In states WRITEFILE and READFILE it's not
+ * used.
*/
void **memtuples; /* array of pointers to palloc'd tuples */
int memtupcount; /* number of tuples currently present */
@@ -116,7 +117,7 @@ struct Tuplestorestate
};
#define COPYTUP(state,tup) ((*(state)->copytup) (state, tup))
-#define WRITETUP(state,tup) ((*(state)->writetup) (state, tup))
+#define WRITETUP(state,tup) ((*(state)->writetup) (state, tup))
#define READTUP(state,len) ((*(state)->readtup) (state, len))
#define LACKMEM(state) ((state)->availMem < 0)
#define USEMEM(state,amt) ((state)->availMem -= (amt))
@@ -145,7 +146,7 @@ struct Tuplestorestate
* the back length word (if present).
*
* The write/read routines can make use of the tuple description data
- * stored in the Tuplestorestate record, if needed. They are also expected
+ * stored in the Tuplestorestate record, if needed. They are also expected
* to adjust state->availMem by the amount of memory space (not tape space!)
* released or consumed. There is no error return from either writetup
* or readtup; they should elog() on failure.
@@ -183,7 +184,7 @@ struct Tuplestorestate
static Tuplestorestate *tuplestore_begin_common(bool randomAccess,
- int maxKBytes);
+ int maxKBytes);
static void dumptuples(Tuplestorestate *state);
static unsigned int getlen(Tuplestorestate *state, bool eofOK);
static void markrunend(Tuplestorestate *state);
@@ -222,7 +223,7 @@ tuplestore_begin_common(bool randomAccess, int maxKBytes)
state->memtupcount = 0;
if (maxKBytes > 0)
- state->memtupsize = 1024; /* initial guess */
+ state->memtupsize = 1024; /* initial guess */
else
state->memtupsize = 1; /* won't really need any space */
state->memtuples = (void **) palloc(state->memtupsize * sizeof(void *));
@@ -270,14 +271,16 @@ tuplestore_end(Tuplestorestate *state)
void
tuplestore_puttuple(Tuplestorestate *state, void *tuple)
{
+
/*
- * Copy the tuple. (Must do this even in WRITEFILE case.)
+ * Copy the tuple. (Must do this even in WRITEFILE case.)
*/
tuple = COPYTUP(state, tuple);
switch (state->status)
{
case TSS_INITIAL:
+
/*
* Stash the tuple in the in-memory array.
*/
@@ -321,7 +324,8 @@ tuplestore_donestoring(Tuplestorestate *state)
{
switch (state->status)
{
- case TSS_INITIAL:
+ case TSS_INITIAL:
+
/*
* We were able to accumulate all the tuples within the
* allowed amount of memory. Just set up to scan them.
@@ -333,10 +337,12 @@ tuplestore_donestoring(Tuplestorestate *state)
state->status = TSS_READMEM;
break;
case TSS_WRITEFILE:
+
/*
* Write the EOF marker.
*/
markrunend(state);
+
/*
* Set up for reading from tape.
*/
@@ -361,7 +367,7 @@ tuplestore_donestoring(Tuplestorestate *state)
*/
void *
tuplestore_gettuple(Tuplestorestate *state, bool forward,
- bool *should_free)
+ bool *should_free)
{
unsigned int tuplen;
void *tup;
@@ -434,7 +440,7 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
* empty file.
*/
if (BufFileSeek(state->myfile, 0,
- - (long) (2 * sizeof(unsigned int)),
+ -(long) (2 * sizeof(unsigned int)),
SEEK_CUR) != 0)
return NULL;
state->eof_reached = false;
@@ -448,7 +454,7 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
* file.
*/
if (BufFileSeek(state->myfile, 0,
- - (long) sizeof(unsigned int),
+ -(long) sizeof(unsigned int),
SEEK_CUR) != 0)
return NULL;
tuplen = getlen(state, false);
@@ -457,7 +463,7 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
* Back up to get ending length word of tuple before it.
*/
if (BufFileSeek(state->myfile, 0,
- - (long) (tuplen + 2 * sizeof(unsigned int)),
+ -(long) (tuplen + 2 * sizeof(unsigned int)),
SEEK_CUR) != 0)
{
@@ -468,7 +474,7 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
* but that is what in-memory case does).
*/
if (BufFileSeek(state->myfile, 0,
- - (long) (tuplen + sizeof(unsigned int)),
+ -(long) (tuplen + sizeof(unsigned int)),
SEEK_CUR) != 0)
elog(ERROR, "tuplestore_gettuple: bogus tuple len in backward scan");
return NULL;
@@ -483,7 +489,7 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
* initial length word of the tuple, so back up to that point.
*/
if (BufFileSeek(state->myfile, 0,
- - (long) tuplen,
+ -(long) tuplen,
SEEK_CUR) != 0)
elog(ERROR, "tuplestore_gettuple: bogus tuple len in backward scan");
tup = READTUP(state, tuplen);
@@ -504,9 +510,7 @@ dumptuples(Tuplestorestate *state)
int i;
for (i = 0; i < state->memtupcount; i++)
- {
WRITETUP(state, state->memtuples[i]);
- }
state->memtupcount = 0;
}