Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruce Momjian2003-08-04 00:43:34 +0000
committerBruce Momjian2003-08-04 00:43:34 +0000
commit089003fb462fcce46c02bf47322b429f73c33c50 (patch)
tree77d78bc3a149df06f5603f60200a6ab363336624 /src/backend
parent63354a0228a1dbc4a0d5ddc8ecdd8326349d2100 (diff)
pgindent run.
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/common/heaptuple.c4
-rw-r--r--src/backend/access/common/indextuple.c8
-rw-r--r--src/backend/access/common/printtup.c58
-rw-r--r--src/backend/access/gist/gistscan.c9
-rw-r--r--src/backend/access/hash/hashfunc.c20
-rw-r--r--src/backend/access/hash/hashovfl.c6
-rw-r--r--src/backend/access/heap/heapam.c16
-rw-r--r--src/backend/access/index/indexam.c51
-rw-r--r--src/backend/access/nbtree/nbtinsert.c55
-rw-r--r--src/backend/access/nbtree/nbtpage.c149
-rw-r--r--src/backend/access/nbtree/nbtree.c90
-rw-r--r--src/backend/access/nbtree/nbtsearch.c91
-rw-r--r--src/backend/access/nbtree/nbtsort.c6
-rw-r--r--src/backend/access/nbtree/nbtxlog.c224
-rw-r--r--src/backend/access/rtree/rtscan.c10
-rw-r--r--src/backend/access/transam/clog.c4
-rw-r--r--src/backend/access/transam/rmgr.c6
-rw-r--r--src/backend/access/transam/slru.c94
-rw-r--r--src/backend/access/transam/xact.c58
-rw-r--r--src/backend/access/transam/xlog.c251
-rw-r--r--src/backend/bootstrap/bootstrap.c44
-rw-r--r--src/backend/catalog/aclchk.c61
-rw-r--r--src/backend/catalog/dependency.c86
-rw-r--r--src/backend/catalog/heap.c41
-rw-r--r--src/backend/catalog/index.c65
-rw-r--r--src/backend/catalog/namespace.c32
-rw-r--r--src/backend/catalog/pg_aggregate.c42
-rw-r--r--src/backend/catalog/pg_constraint.c19
-rw-r--r--src/backend/catalog/pg_conversion.c10
-rw-r--r--src/backend/catalog/pg_operator.c8
-rw-r--r--src/backend/catalog/pg_proc.c44
-rw-r--r--src/backend/catalog/pg_type.c27
-rw-r--r--src/backend/commands/aggregatecmds.c10
-rw-r--r--src/backend/commands/alter.c82
-rw-r--r--src/backend/commands/analyze.c8
-rw-r--r--src/backend/commands/async.c13
-rw-r--r--src/backend/commands/cluster.c133
-rw-r--r--src/backend/commands/comment.c35
-rw-r--r--src/backend/commands/conversioncmds.c8
-rw-r--r--src/backend/commands/copy.c209
-rw-r--r--src/backend/commands/dbcommands.c60
-rw-r--r--src/backend/commands/define.c5
-rw-r--r--src/backend/commands/explain.c40
-rw-r--r--src/backend/commands/functioncmds.c51
-rw-r--r--src/backend/commands/indexcmds.c85
-rw-r--r--src/backend/commands/opclasscmds.c18
-rw-r--r--src/backend/commands/operatorcmds.c6
-rw-r--r--src/backend/commands/portalcmds.c64
-rw-r--r--src/backend/commands/prepare.c58
-rw-r--r--src/backend/commands/proclang.c12
-rw-r--r--src/backend/commands/schemacmds.c14
-rw-r--r--src/backend/commands/sequence.c54
-rw-r--r--src/backend/commands/tablecmds.c285
-rw-r--r--src/backend/commands/trigger.c228
-rw-r--r--src/backend/commands/typecmds.c326
-rw-r--r--src/backend/commands/user.c56
-rw-r--r--src/backend/commands/vacuum.c61
-rw-r--r--src/backend/commands/vacuumlazy.c30
-rw-r--r--src/backend/commands/variable.c85
-rw-r--r--src/backend/commands/view.c6
-rw-r--r--src/backend/executor/execAmi.c28
-rw-r--r--src/backend/executor/execGrouping.c12
-rw-r--r--src/backend/executor/execMain.c123
-rw-r--r--src/backend/executor/execProcnode.c18
-rw-r--r--src/backend/executor/execQual.c363
-rw-r--r--src/backend/executor/execScan.c23
-rw-r--r--src/backend/executor/execTuples.c8
-rw-r--r--src/backend/executor/execUtils.c62
-rw-r--r--src/backend/executor/functions.c28
-rw-r--r--src/backend/executor/nodeAgg.c168
-rw-r--r--src/backend/executor/nodeAppend.c4
-rw-r--r--src/backend/executor/nodeHash.c4
-rw-r--r--src/backend/executor/nodeHashjoin.c44
-rw-r--r--src/backend/executor/nodeIndexscan.c7
-rw-r--r--src/backend/executor/nodeLimit.c41
-rw-r--r--src/backend/executor/nodeMaterial.c21
-rw-r--r--src/backend/executor/nodeMergejoin.c28
-rw-r--r--src/backend/executor/nodeNestloop.c10
-rw-r--r--src/backend/executor/nodeResult.c4
-rw-r--r--src/backend/executor/nodeSeqscan.c20
-rw-r--r--src/backend/executor/nodeSubplan.c275
-rw-r--r--src/backend/executor/nodeSubqueryscan.c17
-rw-r--r--src/backend/executor/nodeUnique.c8
-rw-r--r--src/backend/executor/spi.c29
-rw-r--r--src/backend/executor/tstoreReceiver.c10
-rw-r--r--src/backend/lib/stringinfo.c12
-rw-r--r--src/backend/libpq/auth.c42
-rw-r--r--src/backend/libpq/be-fsstubs.c6
-rw-r--r--src/backend/libpq/be-secure.c23
-rw-r--r--src/backend/libpq/crypt.c7
-rw-r--r--src/backend/libpq/hba.c73
-rw-r--r--src/backend/libpq/ip.c153
-rw-r--r--src/backend/libpq/md5.c6
-rw-r--r--src/backend/libpq/pqcomm.c88
-rw-r--r--src/backend/libpq/pqformat.c81
-rw-r--r--src/backend/main/main.c21
-rw-r--r--src/backend/nodes/bitmapset.c174
-rw-r--r--src/backend/nodes/copyfuncs.c64
-rw-r--r--src/backend/nodes/equalfuncs.c109
-rw-r--r--src/backend/nodes/list.c19
-rw-r--r--src/backend/nodes/nodes.c4
-rw-r--r--src/backend/nodes/outfuncs.c48
-rw-r--r--src/backend/nodes/readfuncs.c59
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c25
-rw-r--r--src/backend/optimizer/geqo/geqo_main.c6
-rw-r--r--src/backend/optimizer/geqo/geqo_misc.c4
-rw-r--r--src/backend/optimizer/path/allpaths.c31
-rw-r--r--src/backend/optimizer/path/costsize.c286
-rw-r--r--src/backend/optimizer/path/indxpath.c191
-rw-r--r--src/backend/optimizer/path/joinpath.c41
-rw-r--r--src/backend/optimizer/path/joinrels.c42
-rw-r--r--src/backend/optimizer/path/orindxpath.c4
-rw-r--r--src/backend/optimizer/path/pathkeys.c57
-rw-r--r--src/backend/optimizer/path/tidpath.c6
-rw-r--r--src/backend/optimizer/plan/createplan.c183
-rw-r--r--src/backend/optimizer/plan/initsplan.c165
-rw-r--r--src/backend/optimizer/plan/planmain.c15
-rw-r--r--src/backend/optimizer/plan/planner.c222
-rw-r--r--src/backend/optimizer/plan/setrefs.c74
-rw-r--r--src/backend/optimizer/plan/subselect.c173
-rw-r--r--src/backend/optimizer/prep/prepjointree.c107
-rw-r--r--src/backend/optimizer/prep/prepqual.c18
-rw-r--r--src/backend/optimizer/prep/preptlist.c24
-rw-r--r--src/backend/optimizer/prep/prepunion.c20
-rw-r--r--src/backend/optimizer/util/clauses.c236
-rw-r--r--src/backend/optimizer/util/joininfo.c14
-rw-r--r--src/backend/optimizer/util/pathnode.c29
-rw-r--r--src/backend/optimizer/util/plancat.c28
-rw-r--r--src/backend/optimizer/util/relnode.c9
-rw-r--r--src/backend/optimizer/util/restrictinfo.c17
-rw-r--r--src/backend/optimizer/util/tlist.c6
-rw-r--r--src/backend/optimizer/util/var.c43
-rw-r--r--src/backend/parser/analyze.c249
-rw-r--r--src/backend/parser/parse_agg.c99
-rw-r--r--src/backend/parser/parse_clause.c54
-rw-r--r--src/backend/parser/parse_coerce.c212
-rw-r--r--src/backend/parser/parse_expr.c164
-rw-r--r--src/backend/parser/parse_func.c82
-rw-r--r--src/backend/parser/parse_node.c10
-rw-r--r--src/backend/parser/parse_oper.c65
-rw-r--r--src/backend/parser/parse_relation.c43
-rw-r--r--src/backend/parser/parse_target.c22
-rw-r--r--src/backend/parser/parse_type.c8
-rw-r--r--src/backend/port/beos/sem.c4
-rw-r--r--src/backend/port/dynloader/darwin.c4
-rw-r--r--src/backend/port/dynloader/linux.c4
-rw-r--r--src/backend/port/dynloader/linux.h3
-rw-r--r--src/backend/port/dynloader/win32.c8
-rw-r--r--src/backend/port/ipc_test.c14
-rw-r--r--src/backend/port/posix_sema.c3
-rw-r--r--src/backend/port/sysv_sema.c10
-rw-r--r--src/backend/port/sysv_shmem.c34
-rw-r--r--src/backend/port/win32/sema.c13
-rw-r--r--src/backend/postmaster/pgstat.c67
-rw-r--r--src/backend/postmaster/postmaster.c231
-rw-r--r--src/backend/regex/regc_color.c412
-rw-r--r--src/backend/regex/regc_cvec.c197
-rw-r--r--src/backend/regex/regc_lex.c1492
-rw-r--r--src/backend/regex/regc_locale.c982
-rw-r--r--src/backend/regex/regc_nfa.c858
-rw-r--r--src/backend/regex/regcomp.c1836
-rw-r--r--src/backend/regex/rege_dfa.c416
-rw-r--r--src/backend/regex/regerror.c118
-rw-r--r--src/backend/regex/regexec.c776
-rw-r--r--src/backend/regex/regfree.c16
-rw-r--r--src/backend/rewrite/rewriteDefine.c26
-rw-r--r--src/backend/rewrite/rewriteHandler.c113
-rw-r--r--src/backend/rewrite/rewriteManip.c17
-rw-r--r--src/backend/storage/buffer/bufmgr.c22
-rw-r--r--src/backend/storage/file/fd.c14
-rw-r--r--src/backend/storage/freespace/freespace.c215
-rw-r--r--src/backend/storage/ipc/ipc.c6
-rw-r--r--src/backend/storage/ipc/ipci.c3
-rw-r--r--src/backend/storage/ipc/sinval.c9
-rw-r--r--src/backend/storage/lmgr/deadlock.c35
-rw-r--r--src/backend/storage/lmgr/lock.c65
-rw-r--r--src/backend/storage/lmgr/proc.c25
-rw-r--r--src/backend/storage/page/bufpage.c15
-rw-r--r--src/backend/storage/smgr/md.c15
-rw-r--r--src/backend/tcop/dest.c9
-rw-r--r--src/backend/tcop/fastpath.c56
-rw-r--r--src/backend/tcop/postgres.c406
-rw-r--r--src/backend/tcop/pquery.c229
-rw-r--r--src/backend/tcop/utility.c145
-rw-r--r--src/backend/utils/adt/acl.c89
-rw-r--r--src/backend/utils/adt/array_userfuncs.c91
-rw-r--r--src/backend/utils/adt/arrayfuncs.c230
-rw-r--r--src/backend/utils/adt/ascii.c22
-rw-r--r--src/backend/utils/adt/char.c4
-rw-r--r--src/backend/utils/adt/date.c70
-rw-r--r--src/backend/utils/adt/datetime.c86
-rw-r--r--src/backend/utils/adt/float.c10
-rw-r--r--src/backend/utils/adt/formatting.c81
-rw-r--r--src/backend/utils/adt/geo_ops.c16
-rw-r--r--src/backend/utils/adt/inet_net_ntop.c180
-rw-r--r--src/backend/utils/adt/inet_net_pton.c116
-rw-r--r--src/backend/utils/adt/int.c6
-rw-r--r--src/backend/utils/adt/int8.c6
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/like_match.c4
-rw-r--r--src/backend/utils/adt/mac.c8
-rw-r--r--src/backend/utils/adt/nabstime.c22
-rw-r--r--src/backend/utils/adt/name.c4
-rw-r--r--src/backend/utils/adt/network.c258
-rw-r--r--src/backend/utils/adt/not_in.c4
-rw-r--r--src/backend/utils/adt/numeric.c285
-rw-r--r--src/backend/utils/adt/numutils.c4
-rw-r--r--src/backend/utils/adt/oid.c6
-rw-r--r--src/backend/utils/adt/oracle_compat.c17
-rw-r--r--src/backend/utils/adt/pg_locale.c8
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c14
-rw-r--r--src/backend/utils/adt/pseudotypes.c4
-rw-r--r--src/backend/utils/adt/regexp.c32
-rw-r--r--src/backend/utils/adt/regproc.c14
-rw-r--r--src/backend/utils/adt/ri_triggers.c193
-rw-r--r--src/backend/utils/adt/ruleutils.c582
-rw-r--r--src/backend/utils/adt/selfuncs.c160
-rw-r--r--src/backend/utils/adt/sets.c7
-rw-r--r--src/backend/utils/adt/timestamp.c170
-rw-r--r--src/backend/utils/adt/varbit.c14
-rw-r--r--src/backend/utils/adt/varchar.c20
-rw-r--r--src/backend/utils/adt/varlena.c69
-rw-r--r--src/backend/utils/adt/xid.c4
-rw-r--r--src/backend/utils/cache/catcache.c6
-rw-r--r--src/backend/utils/cache/inval.c4
-rw-r--r--src/backend/utils/cache/lsyscache.c10
-rw-r--r--src/backend/utils/cache/relcache.c62
-rw-r--r--src/backend/utils/error/elog.c173
-rw-r--r--src/backend/utils/fmgr/dfmgr.c8
-rw-r--r--src/backend/utils/fmgr/fmgr.c32
-rw-r--r--src/backend/utils/init/findbe.c9
-rw-r--r--src/backend/utils/init/miscinit.c94
-rw-r--r--src/backend/utils/init/postinit.c28
-rw-r--r--src/backend/utils/mb/conv.c10
-rw-r--r--src/backend/utils/mb/mbutils.c26
-rw-r--r--src/backend/utils/mb/wchar.c10
-rw-r--r--src/backend/utils/misc/guc.c135
-rw-r--r--src/backend/utils/misc/help_config.c24
-rw-r--r--src/backend/utils/mmgr/aset.c22
-rw-r--r--src/backend/utils/mmgr/mcxt.c7
-rw-r--r--src/backend/utils/mmgr/portalmem.c53
-rw-r--r--src/backend/utils/sort/logtape.c6
-rw-r--r--src/backend/utils/sort/tuplesort.c14
-rw-r--r--src/backend/utils/sort/tuplestore.c37
-rw-r--r--src/backend/utils/time/tqual.c34
245 files changed, 11482 insertions, 10318 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index ae1df582b0e..4f2fd0efd1e 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.84 2003/07/21 20:29:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.85 2003/08/04 00:43:11 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -617,7 +617,7 @@ heap_formtuple(TupleDesc tupleDescriptor,
td->t_natts = numberOfAttributes;
td->t_hoff = hoff;
- if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
+ if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
td->t_infomask = HEAP_HASOID;
DataFill((char *) td + hoff,
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index abf25915ab5..d0ee3798086 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.65 2003/07/21 20:29:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.66 2003/08/04 00:43:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -162,9 +162,9 @@ index_formtuple(TupleDesc tupleDescriptor,
if ((size & INDEX_SIZE_MASK) != size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index tuple requires %lu bytes, maximum size is %lu",
- (unsigned long) size,
- (unsigned long) INDEX_SIZE_MASK)));
+ errmsg("index tuple requires %lu bytes, maximum size is %lu",
+ (unsigned long) size,
+ (unsigned long) INDEX_SIZE_MASK)));
infomask |= size;
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 61ecdcd7e50..ecee11718d0 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.75 2003/07/21 20:29:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.76 2003/08/04 00:43:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,13 +24,13 @@
static void printtup_startup(DestReceiver *self, int operation,
- TupleDesc typeinfo);
+ TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo,
- DestReceiver *self);
+ DestReceiver *self);
static void printtup_20(HeapTuple tuple, TupleDesc typeinfo,
- DestReceiver *self);
+ DestReceiver *self);
static void printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo,
- DestReceiver *self);
+ DestReceiver *self);
static void printtup_shutdown(DestReceiver *self);
static void printtup_destroy(DestReceiver *self);
@@ -81,8 +81,8 @@ printtup_create_DR(CommandDest dest, Portal portal)
else
{
/*
- * In protocol 2.0 the Bind message does not exist, so there is
- * no way for the columns to have different print formats; it's
+ * In protocol 2.0 the Bind message does not exist, so there is no
+ * way for the columns to have different print formats; it's
* sufficient to look at the first one.
*/
if (portal->formats && portal->formats[0] != 0)
@@ -111,12 +111,13 @@ static void
printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
{
DR_printtup *myState = (DR_printtup *) self;
- Portal portal = myState->portal;
+ Portal portal = myState->portal;
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
{
/*
- * Send portal name to frontend (obsolete cruft, gone in proto 3.0)
+ * Send portal name to frontend (obsolete cruft, gone in proto
+ * 3.0)
*
* If portal name not specified, use "blank" portal.
*/
@@ -129,8 +130,8 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
}
/*
- * If this is a retrieve, and we are supposed to emit row descriptions,
- * then we send back the tuple descriptor of the tuples.
+ * If this is a retrieve, and we are supposed to emit row
+ * descriptions, then we send back the tuple descriptor of the tuples.
*/
if (operation == CMD_SELECT && myState->sendDescrip)
{
@@ -163,7 +164,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
- * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
+ * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/
@@ -176,14 +177,14 @@ SendRowDescriptionMessage(TupleDesc typeinfo, List *targetlist, int16 *formats)
int i;
StringInfoData buf;
- pq_beginmessage(&buf, 'T'); /* tuple descriptor message type */
- pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
+ pq_beginmessage(&buf, 'T'); /* tuple descriptor message type */
+ pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
for (i = 0; i < natts; ++i)
{
- Oid atttypid = attrs[i]->atttypid;
- int32 atttypmod = attrs[i]->atttypmod;
- Oid basetype;
+ Oid atttypid = attrs[i]->atttypid;
+ int32 atttypmod = attrs[i]->atttypmod;
+ Oid basetype;
pq_sendstring(&buf, NameStr(attrs[i]->attname));
/* column ID info appears in protocol 3.0 and up */
@@ -320,8 +321,8 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
}
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -347,7 +348,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typelem)));
+ ObjectIdGetDatum(thisState->typelem)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
@@ -424,8 +425,8 @@ printtup_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
Assert(thisState->format == 0);
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -536,9 +537,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue;
getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena);
+
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -547,7 +549,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
value = DatumGetCString(OidFunctionCall3(typoutput,
attr,
- ObjectIdGetDatum(typelem),
+ ObjectIdGetDatum(typelem),
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
@@ -627,8 +629,8 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
Assert(thisState->format == 1);
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -637,7 +639,7 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typelem)));
+ ObjectIdGetDatum(thisState->typelem)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index d3208e248e1..505fd762565 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.45 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.46 2003/08/04 00:43:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -104,11 +104,12 @@ gistrescan(PG_FUNCTION_ARGS)
memmove(s->keyData,
key,
s->numberOfKeys * sizeof(ScanKeyData));
+
/*
* Play games here with the scan key to use the Consistent
- * function for all comparisons: 1) the sk_procedure field
- * will now be used to hold the strategy number 2) the
- * sk_func field will point to the Consistent function
+ * function for all comparisons: 1) the sk_procedure field will
+ * now be used to hold the strategy number 2) the sk_func field
+ * will point to the Consistent function
*/
for (i = 0; i < s->numberOfKeys; i++)
{
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index a82b8b32d55..4dd9d9df3ee 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.36 2003/06/22 22:04:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.37 2003/08/04 00:43:12 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -60,9 +60,9 @@ hashfloat4(PG_FUNCTION_ARGS)
float4 key = PG_GETARG_FLOAT4(0);
/*
- * On IEEE-float machines, minus zero and zero have different bit patterns
- * but should compare as equal. We must ensure that they have the same
- * hash value, which is most easily done this way:
+ * On IEEE-float machines, minus zero and zero have different bit
+ * patterns but should compare as equal. We must ensure that they
+ * have the same hash value, which is most easily done this way:
*/
if (key == (float4) 0)
PG_RETURN_UINT32(0);
@@ -76,9 +76,9 @@ hashfloat8(PG_FUNCTION_ARGS)
float8 key = PG_GETARG_FLOAT8(0);
/*
- * On IEEE-float machines, minus zero and zero have different bit patterns
- * but should compare as equal. We must ensure that they have the same
- * hash value, which is most easily done this way:
+ * On IEEE-float machines, minus zero and zero have different bit
+ * patterns but should compare as equal. We must ensure that they
+ * have the same hash value, which is most easily done this way:
*/
if (key == (float8) 0)
PG_RETURN_UINT32(0);
@@ -121,9 +121,9 @@ hashtext(PG_FUNCTION_ARGS)
Datum result;
/*
- * Note: this is currently identical in behavior to hashvarlena,
- * but it seems likely that we may need to do something different
- * in non-C locales. (See also hashbpchar, if so.)
+ * Note: this is currently identical in behavior to hashvarlena, but
+ * it seems likely that we may need to do something different in non-C
+ * locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA(key),
VARSIZE(key) - VARHDRSZ);
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index ed9459feb90..fd7fc158220 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.35 2003/07/21 20:29:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.36 2003/08/04 00:43:12 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -205,8 +205,8 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
if (++splitnum >= NCACHED)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("out of overflow pages in hash index \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("out of overflow pages in hash index \"%s\"",
+ RelationGetRelationName(rel))));
metap->hashm_ovflpoint = splitnum;
metap->hashm_spares[splitnum] = metap->hashm_spares[splitnum - 1];
metap->hashm_spares[splitnum - 1]--;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index a0d191f8a9d..8b4b5590ca9 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.152 2003/07/21 20:29:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.153 2003/08/04 00:43:14 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1132,6 +1132,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
xlhdr.t_natts = tup->t_data->t_natts;
xlhdr.t_infomask = tup->t_data->t_infomask;
xlhdr.t_hoff = tup->t_data->t_hoff;
+
/*
* note we mark rdata[1] as belonging to buffer; if XLogInsert
* decides to write the whole page to the xlog, we don't need to
@@ -1149,9 +1150,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
rdata[2].next = NULL;
/*
- * If this is the single and first tuple on page, we can reinit the
- * page instead of restoring the whole thing. Set flag, and hide
- * buffer references from XLogInsert.
+ * If this is the single and first tuple on page, we can reinit
+ * the page instead of restoring the whole thing. Set flag, and
+ * hide buffer references from XLogInsert.
*/
if (ItemPointerGetOffsetNumber(&(tup->t_self)) == FirstOffsetNumber &&
PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
@@ -1912,7 +1913,7 @@ log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *unused, int uncnt)
/*
* The unused-offsets array is not actually in the buffer, but pretend
- * that it is. When XLogInsert stores the whole buffer, the offsets
+ * that it is. When XLogInsert stores the whole buffer, the offsets
* array need not be stored too.
*/
rdata[1].buffer = buffer;
@@ -1991,9 +1992,10 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
2 * sizeof(TransactionId));
hsize += 2 * sizeof(TransactionId);
}
+
/*
- * As with insert records, we need not store the rdata[2] segment
- * if we decide to store the whole buffer instead.
+ * As with insert records, we need not store the rdata[2] segment if
+ * we decide to store the whole buffer instead.
*/
rdata[2].buffer = newbuf;
rdata[2].data = (char *) &xlhdr;
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 731c34b3ab6..ee93e8a7222 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.67 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.68 2003/08/04 00:43:15 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -300,7 +300,7 @@ index_beginscan(Relation heapRelation,
* index_rescan - (re)start a scan of an index
*
* The caller may specify a new set of scankeys (but the number of keys
- * cannot change). To restart the scan without changing keys, pass NULL
+ * cannot change). To restart the scan without changing keys, pass NULL
* for the key array.
*
* Note that this is also called when first starting an indexscan;
@@ -394,8 +394,8 @@ index_restrpos(IndexScanDesc scan)
/*
* We do not reset got_tuple; so if the scan is actually being
- * short-circuited by index_getnext, the effective position restoration
- * is done by restoring unique_tuple_pos.
+ * short-circuited by index_getnext, the effective position
+ * restoration is done by restoring unique_tuple_pos.
*/
scan->unique_tuple_pos = scan->unique_tuple_mark;
@@ -427,24 +427,24 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
}
/*
- * If we already got a tuple and it must be unique, there's no need
- * to make the index AM look through any additional tuples. (This can
+ * If we already got a tuple and it must be unique, there's no need to
+ * make the index AM look through any additional tuples. (This can
* save a useful amount of work in scenarios where there are many dead
* tuples due to heavy update activity.)
*
* To do this we must keep track of the logical scan position
* (before/on/after tuple). Also, we have to be sure to release scan
- * resources before returning NULL; if we fail to do so then a multi-index
- * scan can easily run the system out of free buffers. We can release
- * index-level resources fairly cheaply by calling index_rescan. This
- * means there are two persistent states as far as the index AM is
- * concerned: on-tuple and rescanned. If we are actually asked to
- * re-fetch the single tuple, we have to go through a fresh indexscan
- * startup, which penalizes that (infrequent) case.
+ * resources before returning NULL; if we fail to do so then a
+ * multi-index scan can easily run the system out of free buffers. We
+ * can release index-level resources fairly cheaply by calling
+ * index_rescan. This means there are two persistent states as far as
+ * the index AM is concerned: on-tuple and rescanned. If we are
+ * actually asked to re-fetch the single tuple, we have to go through
+ * a fresh indexscan startup, which penalizes that (infrequent) case.
*/
if (scan->keys_are_unique && scan->got_tuple)
{
- int new_tuple_pos = scan->unique_tuple_pos;
+ int new_tuple_pos = scan->unique_tuple_pos;
if (ScanDirectionIsForward(direction))
{
@@ -459,22 +459,23 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
if (new_tuple_pos == 0)
{
/*
- * We are moving onto the unique tuple from having been off it.
- * We just fall through and let the index AM do the work. Note
- * we should get the right answer regardless of scan direction.
+ * We are moving onto the unique tuple from having been off
+ * it. We just fall through and let the index AM do the work.
+ * Note we should get the right answer regardless of scan
+ * direction.
*/
- scan->unique_tuple_pos = 0; /* need to update position */
+ scan->unique_tuple_pos = 0; /* need to update position */
}
else
{
/*
- * Moving off the tuple; must do amrescan to release index-level
- * pins before we return NULL. Since index_rescan will reset
- * my state, must save and restore...
+ * Moving off the tuple; must do amrescan to release
+ * index-level pins before we return NULL. Since index_rescan
+ * will reset my state, must save and restore...
*/
- int unique_tuple_mark = scan->unique_tuple_mark;
+ int unique_tuple_mark = scan->unique_tuple_mark;
- index_rescan(scan, NULL /* no change to key */);
+ index_rescan(scan, NULL /* no change to key */ );
scan->keys_are_unique = true;
scan->got_tuple = true;
@@ -631,7 +632,7 @@ index_bulk_delete(Relation indexRelation,
*/
IndexBulkDeleteResult *
index_vacuum_cleanup(Relation indexRelation,
- IndexVacuumCleanupInfo *info,
+ IndexVacuumCleanupInfo * info,
IndexBulkDeleteResult *stats)
{
RegProcedure procedure;
@@ -649,7 +650,7 @@ index_vacuum_cleanup(Relation indexRelation,
DatumGetPointer(OidFunctionCall3(procedure,
PointerGetDatum(indexRelation),
PointerGetDatum((Pointer) info),
- PointerGetDatum((Pointer) stats)));
+ PointerGetDatum((Pointer) stats)));
return result;
}
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index dd8eda99b93..962d7a1822e 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.102 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.103 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -432,9 +432,9 @@ _bt_insertonpg(Relation rel,
*
* must write-lock that page before releasing write lock on
* current page; else someone else's _bt_check_unique scan
- * could fail to see our insertion. write locks on intermediate
- * dead pages won't do because we don't know when they will get
- * de-linked from the tree.
+ * could fail to see our insertion. write locks on
+ * intermediate dead pages won't do because we don't know when
+ * they will get de-linked from the tree.
*/
Buffer rbuf = InvalidBuffer;
@@ -523,9 +523,10 @@ _bt_insertonpg(Relation rel,
/*
* If we are doing this insert because we split a page that was
* the only one on its tree level, but was not the root, it may
- * have been the "fast root". We need to ensure that the fast root
- * link points at or above the current page. We can safely acquire
- * a lock on the metapage here --- see comments for _bt_newroot().
+ * have been the "fast root". We need to ensure that the fast
+ * root link points at or above the current page. We can safely
+ * acquire a lock on the metapage here --- see comments for
+ * _bt_newroot().
*/
if (split_only_page)
{
@@ -1135,7 +1136,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
- * write locks once we have write lock on the parent page. (Any sooner,
+ * write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@@ -1155,19 +1156,19 @@ _bt_insert_parent(Relation rel,
bool is_only)
{
/*
- * Here we have to do something Lehman and Yao don't talk about:
- * deal with a root split and construction of a new root. If our
- * stack is empty then we have just split a node on what had been
- * the root level when we descended the tree. If it was still the
- * root then we perform a new-root construction. If it *wasn't*
- * the root anymore, search to find the next higher level that
- * someone constructed meanwhile, and find the right place to insert
- * as for the normal case.
+ * Here we have to do something Lehman and Yao don't talk about: deal
+ * with a root split and construction of a new root. If our stack is
+ * empty then we have just split a node on what had been the root
+ * level when we descended the tree. If it was still the root then we
+ * perform a new-root construction. If it *wasn't* the root anymore,
+ * search to find the next higher level that someone constructed
+ * meanwhile, and find the right place to insert as for the normal
+ * case.
*
- * If we have to search for the parent level, we do so by
- * re-descending from the root. This is not super-efficient,
- * but it's rare enough not to matter. (This path is also taken
- * when called from WAL recovery --- we have no stack in that case.)
+ * If we have to search for the parent level, we do so by re-descending
+ * from the root. This is not super-efficient, but it's rare enough
+ * not to matter. (This path is also taken when called from WAL
+ * recovery --- we have no stack in that case.)
*/
if (is_root)
{
@@ -1222,9 +1223,9 @@ _bt_insert_parent(Relation rel,
/*
* Find the parent buffer and get the parent page.
*
- * Oops - if we were moved right then we need to change stack
- * item! We want to find parent pointing to where we are,
- * right ? - vadim 05/27/97
+ * Oops - if we were moved right then we need to change stack item!
+ * We want to find parent pointing to where we are, right ? -
+ * vadim 05/27/97
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY);
@@ -1296,16 +1297,16 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* start = InvalidOffsetNumber means "search the whole page".
- * We need this test anyway due to possibility that
- * page has a high key now when it didn't before.
+ * We need this test anyway due to possibility that page has a
+ * high key now when it didn't before.
*/
if (start < minoff)
start = minoff;
/*
* These loops will check every item on the page --- but in an
- * order that's attuned to the probability of where it actually
- * is. Scan to the right first, then to the left.
+ * order that's attuned to the probability of where it
+ * actually is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 33f85cd59a6..ace06f0a250 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.66 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.67 2003/08/04 00:43:15 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -181,8 +181,8 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
- * page and start all over again. (Is that really true?
- * But it's hardly worth trying to optimize this case.)
+ * page and start all over again. (Is that really true? But
+ * it's hardly worth trying to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
return _bt_getroot(rel, access);
@@ -190,8 +190,8 @@ _bt_getroot(Relation rel, int access)
/*
* Get, initialize, write, and leave a lock of the appropriate
- * type on the new root page. Since this is the first page in
- * the tree, it's a leaf as well as the root.
+ * type on the new root page. Since this is the first page in the
+ * tree, it's a leaf as well as the root.
*/
rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
rootblkno = BufferGetBlockNumber(rootbuf);
@@ -240,7 +240,7 @@ _bt_getroot(Relation rel, int access)
_bt_wrtnorelbuf(rel, rootbuf);
/*
- * swap root write lock for read lock. There is no danger of
+ * swap root write lock for read lock. There is no danger of
* anyone else accessing the new root page while it's unlocked,
* since no one else knows where it is yet.
*/
@@ -284,8 +284,8 @@ _bt_getroot(Relation rel, int access)
}
/*
- * By here, we have a pin and read lock on the root page, and no
- * lock set on the metadata page. Return the root page's buffer.
+ * By here, we have a pin and read lock on the root page, and no lock
+ * set on the metadata page. Return the root page's buffer.
*/
return rootbuf;
}
@@ -299,7 +299,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
- * from whatever non-root page we were at. If we ever do need to lock the
+ * from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@@ -406,9 +406,9 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check
- * that the page is still free. (For example, an already-free page
- * could have been re-used between the time the last VACUUM scanned
- * it and the time the VACUUM made its FSM updates.)
+ * that the page is still free. (For example, an already-free
+ * page could have been re-used between the time the last VACUUM
+ * scanned it and the time the VACUUM made its FSM updates.)
*/
for (;;)
{
@@ -431,10 +431,10 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
/*
* Extend the relation by one page.
*
- * We have to use a lock to ensure no one else is extending the rel at
- * the same time, else we will both try to initialize the same new
- * page. We can skip locking for new or temp relations, however,
- * since no one else could be accessing them.
+ * We have to use a lock to ensure no one else is extending the rel
+ * at the same time, else we will both try to initialize the same
+ * new page. We can skip locking for new or temp relations,
+ * however, since no one else could be accessing them.
*/
needLock = !(rel->rd_isnew || rel->rd_istemp);
@@ -444,8 +444,8 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
buf = ReadBuffer(rel, P_NEW);
/*
- * Release the file-extension lock; it's now OK for someone else to
- * extend the relation some more.
+ * Release the file-extension lock; it's now OK for someone else
+ * to extend the relation some more.
*/
if (needLock)
UnlockPage(rel, 0, ExclusiveLock);
@@ -484,7 +484,7 @@ _bt_relbuf(Relation rel, Buffer buf)
* and a pin on the buffer.
*
* NOTE: actually, the buffer manager just marks the shared buffer page
- * dirty here; the real I/O happens later. This is okay since we are not
+ * dirty here; the real I/O happens later. This is okay since we are not
* relying on write ordering anyway. The WAL mechanism is responsible for
* guaranteeing correctness after a crash.
*/
@@ -534,13 +534,14 @@ _bt_page_recyclable(Page page)
BTPageOpaque opaque;
/*
- * It's possible to find an all-zeroes page in an index --- for example,
- * a backend might successfully extend the relation one page and then
- * crash before it is able to make a WAL entry for adding the page.
- * If we find a zeroed page then reclaim it.
+ * It's possible to find an all-zeroes page in an index --- for
+ * example, a backend might successfully extend the relation one page
+ * and then crash before it is able to make a WAL entry for adding the
+ * page. If we find a zeroed page then reclaim it.
*/
if (PageIsNew(page))
return true;
+
/*
* Otherwise, recycle if deleted and too old to have any processes
* interested in it.
@@ -565,7 +566,7 @@ _bt_page_recyclable(Page page)
* mistake. On exit, metapage data is correct and we no longer have
* a pin or lock on the metapage.
*
- * Actually this is not used for splitting on-the-fly anymore. It's only used
+ * Actually this is not used for splitting on-the-fly anymore. It's only used
* in nbtsort.c at the completion of btree building, where we know we have
* sole access to the index anyway.
*/
@@ -623,7 +624,7 @@ _bt_metaproot(Relation rel, BlockNumber rootbknum, uint32 level)
/*
* Delete item(s) from a btree page.
*
- * This must only be used for deleting leaf items. Deleting an item on a
+ * This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@@ -646,9 +647,7 @@ _bt_delitems(Relation rel, Buffer buf,
* adjusting item numbers for previous deletions.
*/
for (i = nitems - 1; i >= 0; i--)
- {
PageIndexTupleDelete(page, itemnos[i]);
- }
/* XLOG stuff */
if (!rel->rd_istemp)
@@ -666,8 +665,8 @@ _bt_delitems(Relation rel, Buffer buf,
rdata[0].next = &(rdata[1]);
/*
- * The target-offsets array is not in the buffer, but pretend
- * that it is. When XLogInsert stores the whole buffer, the offsets
+ * The target-offsets array is not in the buffer, but pretend that
+ * it is. When XLogInsert stores the whole buffer, the offsets
* array need not be stored too.
*/
rdata[1].buffer = buf;
@@ -701,7 +700,7 @@ _bt_delitems(Relation rel, Buffer buf,
* may currently be trying to follow links leading to the page; they have to
* be allowed to use its right-link to recover. See nbtree/README.
*
- * On entry, the target buffer must be pinned and read-locked. This lock and
+ * On entry, the target buffer must be pinned and read-locked. This lock and
* pin will be dropped before exiting.
*
* Returns the number of pages successfully deleted (zero on failure; could
@@ -714,7 +713,7 @@ _bt_delitems(Relation rel, Buffer buf,
int
_bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
{
- BlockNumber target,
+ BlockNumber target,
leftsib,
rightsib,
parent;
@@ -740,17 +739,18 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
BTPageOpaque opaque;
/*
- * We can never delete rightmost pages nor root pages. While at it,
+ * We can never delete rightmost pages nor root pages. While at it,
* check that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
- P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
+ P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
{
_bt_relbuf(rel, buf);
return 0;
}
+
/*
* Save info about page, including a copy of its high key (it must
* have one, being non-rightmost).
@@ -760,12 +760,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
leftsib = opaque->btpo_prev;
itemid = PageGetItemId(page, P_HIKEY);
targetkey = CopyBTItem((BTItem) PageGetItem(page, itemid));
+
/*
* We need to get an approximate pointer to the page's parent page.
- * Use the standard search mechanism to search for the page's high key;
- * this will give us a link to either the current parent or someplace
- * to its left (if there are multiple equal high keys). To avoid
- * deadlocks, we'd better drop the target page lock first.
+ * Use the standard search mechanism to search for the page's high
+ * key; this will give us a link to either the current parent or
+ * someplace to its left (if there are multiple equal high keys). To
+ * avoid deadlocks, we'd better drop the target page lock first.
*/
_bt_relbuf(rel, buf);
/* we need a scan key to do our search, so build one */
@@ -775,9 +776,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
&lbuf, BT_READ);
/* don't need a pin on that either */
_bt_relbuf(rel, lbuf);
+
/*
* If we are trying to delete an interior page, _bt_search did more
- * than we needed. Locate the stack item pointing to our parent level.
+ * than we needed. Locate the stack item pointing to our parent
+ * level.
*/
ilevel = 0;
for (;;)
@@ -789,10 +792,12 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
stack = stack->bts_parent;
ilevel++;
}
+
/*
* We have to lock the pages we need to modify in the standard order:
- * moving right, then up. Else we will deadlock against other writers.
- *
+ * moving right, then up. Else we will deadlock against other
+ * writers.
+ *
* So, we need to find and write-lock the current left sibling of the
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if
@@ -823,21 +828,24 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
else
lbuf = InvalidBuffer;
+
/*
- * Next write-lock the target page itself. It should be okay to take just
- * a write lock not a superexclusive lock, since no scans would stop on an
- * empty page.
+ * Next write-lock the target page itself. It should be okay to take
+ * just a write lock not a superexclusive lock, since no scans would
+ * stop on an empty page.
*/
buf = _bt_getbuf(rel, target, BT_WRITE);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
- * Check page is still empty etc, else abandon deletion. The empty check
- * is necessary since someone else might have inserted into it while
- * we didn't have it locked; the others are just for paranoia's sake.
+ * Check page is still empty etc, else abandon deletion. The empty
+ * check is necessary since someone else might have inserted into it
+ * while we didn't have it locked; the others are just for paranoia's
+ * sake.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
- P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
+ P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
{
_bt_relbuf(rel, buf);
if (BufferIsValid(lbuf))
@@ -846,14 +854,17 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
if (opaque->btpo_prev != leftsib)
elog(ERROR, "left link changed unexpectedly");
+
/*
* And next write-lock the (current) right sibling.
*/
rightsib = opaque->btpo_next;
rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
+
/*
* Next find and write-lock the current parent of the target page.
- * This is essentially the same as the corresponding step of splitting.
+ * This is essentially the same as the corresponding step of
+ * splitting.
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
target, P_HIKEY);
@@ -863,10 +874,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
RelationGetRelationName(rel));
parent = stack->bts_blkno;
poffset = stack->bts_offset;
+
/*
* If the target is the rightmost child of its parent, then we can't
- * delete, unless it's also the only child --- in which case the parent
- * changes to half-dead status.
+ * delete, unless it's also the only child --- in which case the
+ * parent changes to half-dead status.
*/
page = BufferGetPage(pbuf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -893,12 +905,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
if (OffsetNumberNext(P_FIRSTDATAKEY(opaque)) == maxoff)
parent_one_child = true;
}
+
/*
* If we are deleting the next-to-last page on the target's level,
- * then the rightsib is a candidate to become the new fast root.
- * (In theory, it might be possible to push the fast root even further
- * down, but the odds of doing so are slim, and the locking considerations
- * daunting.)
+ * then the rightsib is a candidate to become the new fast root. (In
+ * theory, it might be possible to push the fast root even further
+ * down, but the odds of doing so are slim, and the locking
+ * considerations daunting.)
*
* We can safely acquire a lock on the metapage here --- see comments for
* _bt_newroot().
@@ -914,12 +927,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
metapg = BufferGetPage(metabuf);
metad = BTPageGetMeta(metapg);
+
/*
* The expected case here is btm_fastlevel == targetlevel+1;
- * if the fastlevel is <= targetlevel, something is wrong, and we
- * choose to overwrite it to fix it.
+ * if the fastlevel is <= targetlevel, something is wrong, and
+ * we choose to overwrite it to fix it.
*/
- if (metad->btm_fastlevel > targetlevel+1)
+ if (metad->btm_fastlevel > targetlevel + 1)
{
/* no update wanted */
_bt_relbuf(rel, metabuf);
@@ -937,9 +951,9 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
/*
* Update parent. The normal case is a tad tricky because we want to
- * delete the target's downlink and the *following* key. Easiest way is
- * to copy the right sibling's downlink over the target downlink, and then
- * delete the following item.
+ * delete the target's downlink and the *following* key. Easiest way
+ * is to copy the right sibling's downlink over the target downlink,
+ * and then delete the following item.
*/
page = BufferGetPage(pbuf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -950,7 +964,7 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
else
{
- OffsetNumber nextoffset;
+ OffsetNumber nextoffset;
itemid = PageGetItemId(page, poffset);
btitem = (BTItem) PageGetItem(page, itemid);
@@ -968,8 +982,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
/*
- * Update siblings' side-links. Note the target page's side-links will
- * continue to point to the siblings.
+ * Update siblings' side-links. Note the target page's side-links
+ * will continue to point to the siblings.
*/
if (BufferIsValid(lbuf))
{
@@ -1096,10 +1110,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
_bt_wrtbuf(rel, lbuf);
/*
- * If parent became half dead, recurse to try to delete it. Otherwise,
- * if right sibling is empty and is now the last child of the parent,
- * recurse to try to delete it. (These cases cannot apply at the same
- * time, though the second case might itself recurse to the first.)
+ * If parent became half dead, recurse to try to delete it.
+ * Otherwise, if right sibling is empty and is now the last child of
+ * the parent, recurse to try to delete it. (These cases cannot apply
+ * at the same time, though the second case might itself recurse to
+ * the first.)
*/
if (parent_half_dead)
{
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 3c814725fef..7d0dea4e788 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.103 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.104 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -580,19 +580,20 @@ btbulkdelete(PG_FUNCTION_ARGS)
/*
* The outer loop iterates over index leaf pages, the inner over items
- * on a leaf page. We issue just one _bt_delitems() call per page,
- * so as to minimize WAL traffic.
+ * on a leaf page. We issue just one _bt_delitems() call per page, so
+ * as to minimize WAL traffic.
*
- * Note that we exclusive-lock every leaf page containing data items,
- * in sequence left to right. It sounds attractive to only exclusive-lock
- * those containing items we need to delete, but unfortunately that
- * is not safe: we could then pass a stopped indexscan, which could
- * in rare cases lead to deleting the item it needs to find when it
- * resumes. (See _bt_restscan --- this could only happen if an indexscan
- * stops on a deletable item and then a page split moves that item
- * into a page further to its right, which the indexscan will have no
- * pin on.) We can skip obtaining exclusive lock on empty pages
- * though, since no indexscan could be stopped on those.
+ * Note that we exclusive-lock every leaf page containing data items, in
+ * sequence left to right. It sounds attractive to only
+ * exclusive-lock those containing items we need to delete, but
+ * unfortunately that is not safe: we could then pass a stopped
+ * indexscan, which could in rare cases lead to deleting the item it
+ * needs to find when it resumes. (See _bt_restscan --- this could
+ * only happen if an indexscan stops on a deletable item and then a
+ * page split moves that item into a page further to its right, which
+ * the indexscan will have no pin on.) We can skip obtaining
+ * exclusive lock on empty pages though, since no indexscan could be
+ * stopped on those.
*/
buf = _bt_get_endpoint(rel, 0, false);
if (BufferIsValid(buf)) /* check for empty index */
@@ -604,7 +605,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
OffsetNumber offnum,
minoff,
maxoff;
- BlockNumber nextpage;
+ BlockNumber nextpage;
CHECK_FOR_INTERRUPTS();
@@ -622,12 +623,14 @@ btbulkdelete(PG_FUNCTION_ARGS)
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBufferForCleanup(buf);
+
/*
- * Recompute minoff/maxoff, both of which could have changed
- * while we weren't holding the lock.
+ * Recompute minoff/maxoff, both of which could have
+ * changed while we weren't holding the lock.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
+
/*
* Scan over all items to see which ones need deleted
* according to the callback function.
@@ -640,7 +643,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
ItemPointer htup;
btitem = (BTItem) PageGetItem(page,
- PageGetItemId(page, offnum));
+ PageGetItemId(page, offnum));
htup = &(btitem->bti_itup.t_tid);
if (callback(htup, callback_state))
{
@@ -651,6 +654,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
num_index_tuples += 1;
}
}
+
/*
* If we need to delete anything, do it and write the buffer;
* else just release the buffer.
@@ -662,9 +666,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
_bt_wrtbuf(rel, buf);
}
else
- {
_bt_relbuf(rel, buf);
- }
/* And advance to next page, if any */
if (nextpage == P_NONE)
break;
@@ -712,7 +714,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/* No point in remembering more than MaxFSMPages pages */
maxFreePages = MaxFSMPages;
if ((BlockNumber) maxFreePages > num_pages)
- maxFreePages = (int) num_pages + 1; /* +1 to avoid palloc(0) */
+ maxFreePages = (int) num_pages + 1; /* +1 to avoid palloc(0) */
freePages = (BlockNumber *) palloc(maxFreePages * sizeof(BlockNumber));
nFreePages = 0;
@@ -728,10 +730,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* after we start the scan will not be examined; this should be fine,
* since they can't possibly be empty.)
*/
- for (blkno = BTREE_METAPAGE+1; blkno < num_pages; blkno++)
+ for (blkno = BTREE_METAPAGE + 1; blkno < num_pages; blkno++)
{
- Buffer buf;
- Page page;
+ Buffer buf;
+ Page page;
BTPageOpaque opaque;
buf = _bt_getbuf(rel, blkno, BT_READ);
@@ -753,7 +755,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page))
{
/* Empty, try to delete */
- int ndel;
+ int ndel;
/* Run pagedel in a temp context to avoid memory leakage */
MemoryContextReset(mycontext);
@@ -768,7 +770,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* During VACUUM FULL it's okay to recycle deleted pages
* immediately, since there can be no other transactions
- * scanning the index. Note that we will only recycle the
+ * scanning the index. Note that we will only recycle the
* current page and not any parent pages that _bt_pagedel
* might have recursed to; this seems reasonable in the name
* of simplicity. (Trying to do otherwise would mean we'd
@@ -787,16 +789,16 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
}
/*
- * During VACUUM FULL, we truncate off any recyclable pages at the
- * end of the index. In a normal vacuum it'd be unsafe to do this
- * except by acquiring exclusive lock on the index and then rechecking
- * all the pages; doesn't seem worth it.
+ * During VACUUM FULL, we truncate off any recyclable pages at the end
+ * of the index. In a normal vacuum it'd be unsafe to do this except
+ * by acquiring exclusive lock on the index and then rechecking all
+ * the pages; doesn't seem worth it.
*/
if (info->vacuum_full && nFreePages > 0)
{
- BlockNumber new_pages = num_pages;
+ BlockNumber new_pages = num_pages;
- while (nFreePages > 0 && freePages[nFreePages-1] == new_pages-1)
+ while (nFreePages > 0 && freePages[nFreePages - 1] == new_pages - 1)
{
new_pages--;
pages_deleted--;
@@ -810,9 +812,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* Okay to truncate.
*
* First, flush any shared buffers for the blocks we intend to
- * delete. FlushRelationBuffers is a bit more than we need for
- * this, since it will also write out dirty buffers for blocks we
- * aren't deleting, but it's the closest thing in bufmgr's API.
+ * delete. FlushRelationBuffers is a bit more than we need
+ * for this, since it will also write out dirty buffers for
+ * blocks we aren't deleting, but it's the closest thing in
+ * bufmgr's API.
*/
i = FlushRelationBuffers(rel, new_pages);
if (i < 0)
@@ -822,7 +825,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* Do the physical truncation.
*/
new_pages = smgrtruncate(DEFAULT_SMGR, rel, new_pages);
- rel->rd_nblocks = new_pages; /* update relcache immediately */
+ rel->rd_nblocks = new_pages; /* update relcache
+ * immediately */
rel->rd_targblock = InvalidBlockNumber;
num_pages = new_pages;
}
@@ -856,7 +860,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* and so no deletion can have occurred on that page.
*
* On entry, we have a pin but no read lock on the buffer that contained
- * the index tuple we stopped the scan on. On exit, we have pin and read
+ * the index tuple we stopped the scan on. On exit, we have pin and read
* lock on the buffer that now contains that index tuple, and the scandesc's
* current position is updated to point at it.
*/
@@ -877,8 +881,8 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
- * Reacquire read lock on the buffer. (We should still have
- * a reference-count pin on it, so need not get that.)
+ * Reacquire read lock on the buffer. (We should still have a
+ * reference-count pin on it, so need not get that.)
*/
LockBuffer(buf, BT_READ);
@@ -921,11 +925,11 @@ _bt_restscan(IndexScanDesc scan)
/*
* The item we're looking for moved right at least one page, so
- * move right. We are careful here to pin and read-lock the next
- * non-dead page before releasing the current one. This ensures that
- * a concurrent btbulkdelete scan cannot pass our position --- if it
- * did, it might be able to reach and delete our target item before
- * we can find it again.
+ * move right. We are careful here to pin and read-lock the next
+ * non-dead page before releasing the current one. This ensures
+ * that a concurrent btbulkdelete scan cannot pass our position
+ * --- if it did, it might be able to reach and delete our target
+ * item before we can find it again.
*/
if (P_RIGHTMOST(opaque))
elog(ERROR, "failed to re-find previous key in \"%s\"",
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 457914adf73..80abe195cea 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.77 2003/07/29 22:18:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.78 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,8 +64,8 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
/*
* Race -- the page we just grabbed may have split since we read
- * its pointer in the parent (or metapage). If it has, we may need
- * to move right to its new sibling. Do that.
+ * its pointer in the parent (or metapage). If it has, we may
+ * need to move right to its new sibling. Do that.
*/
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
@@ -87,14 +87,14 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
par_blkno = BufferGetBlockNumber(*bufP);
/*
- * We need to save the location of the index entry we chose in
- * the parent page on a stack. In case we split the tree, we'll
- * use the stack to work back up to the parent page. We also save
- * the actual downlink (TID) to uniquely identify the index entry,
- * in case it moves right while we're working lower in the
- * tree. See the paper by Lehman and Yao for how this is detected
- * and handled. (We use the child link to disambiguate duplicate
- * keys in the index -- Lehman and Yao disallow duplicate keys.)
+ * We need to save the location of the index entry we chose in the
+ * parent page on a stack. In case we split the tree, we'll use
+ * the stack to work back up to the parent page. We also save the
+ * actual downlink (TID) to uniquely identify the index entry, in
+ * case it moves right while we're working lower in the tree. See
+ * the paper by Lehman and Yao for how this is detected and
+ * handled. (We use the child link to disambiguate duplicate keys
+ * in the index -- Lehman and Yao disallow duplicate keys.)
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
@@ -151,8 +151,8 @@ _bt_moveright(Relation rel,
* might not need to move right; have to scan the page first anyway.)
* It could even have split more than once, so scan as far as needed.
*
- * We also have to move right if we followed a link that brought us to
- * a dead page.
+ * We also have to move right if we followed a link that brought us to a
+ * dead page.
*/
while (!P_RIGHTMOST(opaque) &&
(P_IGNORE(opaque) ||
@@ -599,8 +599,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* At this point we are positioned at the first item >= scan key, or
* possibly at the end of a page on which all the existing items are
- * less than the scan key and we know that everything on later
- * pages is greater than or equal to scan key.
+ * less than the scan key and we know that everything on later pages
+ * is greater than or equal to scan key.
*
* We could step forward in the latter case, but that'd be a waste of
* time if we want to scan backwards. So, it's now time to examine
@@ -851,7 +851,8 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
}
}
}
- else /* backwards scan */
+ else
+/* backwards scan */
{
if (offnum > P_FIRSTDATAKEY(opaque))
offnum = OffsetNumberPrev(offnum);
@@ -860,9 +861,9 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
/*
* Walk left to the next page with data. This is much more
* complex than the walk-right case because of the possibility
- * that the page to our left splits while we are in flight to it,
- * plus the possibility that the page we were on gets deleted
- * after we leave it. See nbtree/README for details.
+ * that the page to our left splits while we are in flight to
+ * it, plus the possibility that the page we were on gets
+ * deleted after we leave it. See nbtree/README for details.
*/
for (;;)
{
@@ -877,10 +878,11 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
}
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
* Okay, we managed to move left to a non-deleted page.
- * Done if it's not half-dead and not empty. Else loop back
- * and do it all again.
+ * Done if it's not half-dead and not empty. Else loop
+ * back and do it all again.
*/
if (!P_IGNORE(opaque))
{
@@ -946,17 +948,18 @@ _bt_walk_left(Relation rel, Buffer buf)
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
- * If this isn't the page we want, walk right till we find
- * what we want --- but go no more than four hops (an
- * arbitrary limit). If we don't find the correct page by then,
- * the most likely bet is that the original page got deleted
- * and isn't in the sibling chain at all anymore, not that its
- * left sibling got split more than four times.
+ * If this isn't the page we want, walk right till we find what we
+ * want --- but go no more than four hops (an arbitrary limit).
+ * If we don't find the correct page by then, the most likely bet
+ * is that the original page got deleted and isn't in the sibling
+ * chain at all anymore, not that its left sibling got split more
+ * than four times.
*
- * Note that it is correct to test P_ISDELETED not P_IGNORE
- * here, because half-dead pages are still in the sibling
- * chain. Caller must reject half-dead pages if wanted.
+ * Note that it is correct to test P_ISDELETED not P_IGNORE here,
+ * because half-dead pages are still in the sibling chain. Caller
+ * must reject half-dead pages if wanted.
*/
tries = 0;
for (;;)
@@ -983,8 +986,8 @@ _bt_walk_left(Relation rel, Buffer buf)
if (P_ISDELETED(opaque))
{
/*
- * It was deleted. Move right to first nondeleted page
- * (there must be one); that is the page that has acquired the
+ * It was deleted. Move right to first nondeleted page (there
+ * must be one); that is the page that has acquired the
* deleted one's keyspace, so stepping left from it will take
* us where we want to be.
*/
@@ -1001,18 +1004,18 @@ _bt_walk_left(Relation rel, Buffer buf)
if (!P_ISDELETED(opaque))
break;
}
+
/*
- * Now return to top of loop, resetting obknum to
- * point to this nondeleted page, and try again.
+ * Now return to top of loop, resetting obknum to point to
+ * this nondeleted page, and try again.
*/
}
else
{
/*
- * It wasn't deleted; the explanation had better be
- * that the page to the left got split or deleted.
- * Without this check, we'd go into an infinite loop
- * if there's anything wrong.
+ * It wasn't deleted; the explanation had better be that the
+ * page to the left got split or deleted. Without this check,
+ * we'd go into an infinite loop if there's anything wrong.
*/
if (opaque->btpo_prev == lblkno)
elog(ERROR, "could not find left sibling in \"%s\"",
@@ -1028,7 +1031,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes ereport(). We will not return a dead page.
+ * condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/
@@ -1045,8 +1048,8 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
/*
* If we are looking for a leaf page, okay to descend from fast root;
- * otherwise better descend from true root. (There is no point in being
- * smarter about intermediate levels.)
+ * otherwise better descend from true root. (There is no point in
+ * being smarter about intermediate levels.)
*/
if (level == 0)
buf = _bt_getroot(rel, BT_READ);
@@ -1066,9 +1069,9 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
{
/*
* If we landed on a deleted page, step right to find a live page
- * (there must be one). Also, if we want the rightmost page,
- * step right if needed to get to it (this could happen if the
- * page split since we obtained a pointer to it).
+ * (there must be one). Also, if we want the rightmost page, step
+ * right if needed to get to it (this could happen if the page
+ * split since we obtained a pointer to it).
*/
while (P_IGNORE(opaque) ||
(rightmost && !P_RIGHTMOST(opaque)))
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 92a73021f66..f8eb671df71 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.73 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.74 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ typedef struct BTPageState
static void _bt_blnewpage(Relation index, Buffer *buf, Page *page,
- uint32 level);
+ uint32 level);
static BTPageState *_bt_pagestate(Relation index, uint32 level);
static void _bt_slideleft(Relation index, Buffer buf, Page page);
static void _bt_sortaddtup(Page page, Size itemsize,
@@ -469,7 +469,7 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
oopaque->btpo_next = BufferGetBlockNumber(nbuf);
nopaque->btpo_prev = BufferGetBlockNumber(obuf);
- nopaque->btpo_next = P_NONE; /* redundant */
+ nopaque->btpo_next = P_NONE; /* redundant */
}
/*
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index a1a52571fe1..35e5ae6ccb8 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.3 2003/02/23 22:43:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.4 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,10 +29,10 @@
typedef struct bt_incomplete_split
{
RelFileNode node; /* the index */
- BlockNumber leftblk; /* left half of split */
- BlockNumber rightblk; /* right half of split */
+ BlockNumber leftblk; /* left half of split */
+ BlockNumber rightblk; /* right half of split */
bool is_root; /* we split the root */
-} bt_incomplete_split;
+} bt_incomplete_split;
static List *incomplete_splits;
@@ -107,7 +107,7 @@ _bt_restore_page(Page page, char *from, int len)
}
static void
-_bt_restore_meta(Relation reln, XLogRecPtr lsn,
+_bt_restore_meta(Relation reln, XLogRecPtr lsn,
BlockNumber root, uint32 level,
BlockNumber fastroot, uint32 fastlevel)
{
@@ -172,7 +172,7 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
if (!redo || !(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(PANIC, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
page = (Page) BufferGetPage(buffer);
@@ -183,13 +183,11 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
if (redo)
{
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
if (PageAddItem(page, (Item) datapos, datalen,
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
LP_USED) == InvalidOffsetNumber)
elog(PANIC, "btree_insert_redo: failed to add item");
@@ -204,13 +202,9 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
elog(PANIC, "btree_insert_undo: bad page LSN");
if (!P_ISLEAF(pageop))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
- {
elog(PANIC, "btree_insert_undo: unimplemented");
- }
}
}
@@ -226,8 +220,8 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
if (redo && !isleaf && incomplete_splits != NIL)
{
forget_matching_split(reln, xlrec->target.node,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
false);
}
}
@@ -238,9 +232,9 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
{
xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
Relation reln;
- BlockNumber targetblk;
- BlockNumber leftsib;
- BlockNumber rightsib;
+ BlockNumber targetblk;
+ BlockNumber leftsib;
+ BlockNumber rightsib;
Buffer buffer;
Page page;
BTPageOpaque pageop;
@@ -338,9 +332,7 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
elog(PANIC, "btree_split_redo: uninitialized next right page");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -357,8 +349,8 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
if (redo && xlrec->level > 0 && incomplete_splits != NIL)
{
forget_matching_split(reln, xlrec->target.node,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
false);
}
@@ -422,10 +414,10 @@ btree_xlog_delete_page(bool redo, bool ismeta,
{
xl_btree_delete_page *xlrec = (xl_btree_delete_page *) XLogRecGetData(record);
Relation reln;
- BlockNumber parent;
- BlockNumber target;
- BlockNumber leftsib;
- BlockNumber rightsib;
+ BlockNumber parent;
+ BlockNumber target;
+ BlockNumber leftsib;
+ BlockNumber rightsib;
Buffer buffer;
Page page;
BTPageOpaque pageop;
@@ -451,9 +443,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
if (PageIsNew((PageHeader) page))
elog(PANIC, "btree_delete_page_redo: uninitialized parent page");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
OffsetNumber poffset;
@@ -469,7 +459,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
{
ItemId itemid;
BTItem btitem;
- OffsetNumber nextoffset;
+ OffsetNumber nextoffset;
itemid = PageGetItemId(page, poffset);
btitem = (BTItem) PageGetItem(page, itemid);
@@ -494,9 +484,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
if (PageIsNew((PageHeader) page))
elog(PANIC, "btree_delete_page_redo: uninitialized right sibling");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -520,9 +508,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
if (PageIsNew((PageHeader) page))
elog(PANIC, "btree_delete_page_redo: uninitialized left sibling");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -799,116 +785,116 @@ btree_desc(char *buf, uint8 xl_info, char *rec)
switch (info)
{
case XLOG_BTREE_INSERT_LEAF:
- {
- xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+ {
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
- strcat(buf, "insert: ");
- out_target(buf, &(xlrec->target));
- break;
- }
+ strcat(buf, "insert: ");
+ out_target(buf, &(xlrec->target));
+ break;
+ }
case XLOG_BTREE_INSERT_UPPER:
- {
- xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+ {
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
- strcat(buf, "insert_upper: ");
- out_target(buf, &(xlrec->target));
- break;
- }
+ strcat(buf, "insert_upper: ");
+ out_target(buf, &(xlrec->target));
+ break;
+ }
case XLOG_BTREE_INSERT_META:
- {
- xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+ {
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
- strcat(buf, "insert_meta: ");
- out_target(buf, &(xlrec->target));
- break;
- }
+ strcat(buf, "insert_meta: ");
+ out_target(buf, &(xlrec->target));
+ break;
+ }
case XLOG_BTREE_SPLIT_L:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_l: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_l: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_SPLIT_R:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_r: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_r: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_SPLIT_L_ROOT:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_l_root: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_l_root: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_SPLIT_R_ROOT:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_r_root: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_r_root: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_DELETE:
- {
- xl_btree_delete *xlrec = (xl_btree_delete *) rec;
+ {
+ xl_btree_delete *xlrec = (xl_btree_delete *) rec;
- sprintf(buf + strlen(buf), "delete: node %u/%u; blk %u",
- xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
- break;
- }
+ sprintf(buf + strlen(buf), "delete: node %u/%u; blk %u",
+ xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
+ break;
+ }
case XLOG_BTREE_DELETE_PAGE:
case XLOG_BTREE_DELETE_PAGE_META:
- {
- xl_btree_delete_page *xlrec = (xl_btree_delete_page *) rec;
+ {
+ xl_btree_delete_page *xlrec = (xl_btree_delete_page *) rec;
- strcat(buf, "delete_page: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; dead %u; left %u; right %u",
- xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "delete_page: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; dead %u; left %u; right %u",
+ xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_NEWROOT:
- {
- xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
+ {
+ xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
- sprintf(buf + strlen(buf), "newroot: node %u/%u; root %u lev %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- xlrec->rootblk, xlrec->level);
- break;
- }
+ sprintf(buf + strlen(buf), "newroot: node %u/%u; root %u lev %u",
+ xlrec->node.tblNode, xlrec->node.relNode,
+ xlrec->rootblk, xlrec->level);
+ break;
+ }
case XLOG_BTREE_NEWMETA:
- {
- xl_btree_newmeta *xlrec = (xl_btree_newmeta *) rec;
+ {
+ xl_btree_newmeta *xlrec = (xl_btree_newmeta *) rec;
- sprintf(buf + strlen(buf), "newmeta: node %u/%u; root %u lev %u fast %u lev %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- xlrec->meta.root, xlrec->meta.level,
- xlrec->meta.fastroot, xlrec->meta.fastlevel);
- break;
- }
+ sprintf(buf + strlen(buf), "newmeta: node %u/%u; root %u lev %u fast %u lev %u",
+ xlrec->node.tblNode, xlrec->node.relNode,
+ xlrec->meta.root, xlrec->meta.level,
+ xlrec->meta.fastroot, xlrec->meta.fastlevel);
+ break;
+ }
case XLOG_BTREE_NEWPAGE:
- {
- xl_btree_newpage *xlrec = (xl_btree_newpage *) rec;
+ {
+ xl_btree_newpage *xlrec = (xl_btree_newpage *) rec;
- sprintf(buf + strlen(buf), "newpage: node %u/%u; page %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- xlrec->blkno);
- break;
- }
+ sprintf(buf + strlen(buf), "newpage: node %u/%u; page %u",
+ xlrec->node.tblNode, xlrec->node.relNode,
+ xlrec->blkno);
+ break;
+ }
default:
strcat(buf, "UNKNOWN");
break;
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index 6358d622e1f..4362835d700 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.45 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.46 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -109,10 +109,10 @@ rtrescan(PG_FUNCTION_ARGS)
s->numberOfKeys * sizeof(ScanKeyData));
/*
- * Scans on internal pages use different operators than they
- * do on leaf pages. For example, if the user wants all boxes
- * that exactly match (x1,y1,x2,y2), then on internal pages we
- * need to find all boxes that contain (x1,y1,x2,y2).
+ * Scans on internal pages use different operators than they do on
+ * leaf pages. For example, if the user wants all boxes that
+ * exactly match (x1,y1,x2,y2), then on internal pages we need to
+ * find all boxes that contain (x1,y1,x2,y2).
*/
for (i = 0; i < s->numberOfKeys; i++)
{
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 3653d05bc1e..6741e5436d5 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.16 2003/06/11 22:37:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.17 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,7 +73,7 @@
static SlruCtlData ClogCtlData;
static SlruCtl ClogCtl = &ClogCtlData;
-
+
static int ZeroCLOGPage(int pageno, bool writeXlog);
static bool CLOGPagePrecedes(int page1, int page2);
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index 59af2808026..444d2b97d7d 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -3,7 +3,7 @@
*
* Resource managers definition
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.10 2003/02/21 00:06:22 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.11 2003/08/04 00:43:15 momjian Exp $
*/
#include "postgres.h"
@@ -19,7 +19,7 @@
#include "commands/sequence.h"
-RmgrData RmgrTable[RM_MAX_ID+1] = {
+RmgrData RmgrTable[RM_MAX_ID + 1] = {
{"XLOG", xlog_redo, xlog_undo, xlog_desc, NULL, NULL},
{"Transaction", xact_redo, xact_undo, xact_desc, NULL, NULL},
{"Storage", smgr_redo, smgr_undo, smgr_desc, NULL, NULL},
@@ -32,7 +32,7 @@ RmgrData RmgrTable[RM_MAX_ID+1] = {
{"Reserved 9", NULL, NULL, NULL, NULL, NULL},
{"Heap", heap_redo, heap_undo, heap_desc, NULL, NULL},
{"Btree", btree_redo, btree_undo, btree_desc,
- btree_xlog_startup, btree_xlog_cleanup},
+ btree_xlog_startup, btree_xlog_cleanup},
{"Hash", hash_redo, hash_undo, hash_desc, NULL, NULL},
{"Rtree", rtree_redo, rtree_undo, rtree_desc, NULL, NULL},
{"Gist", gist_redo, gist_undo, gist_desc, NULL, NULL},
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 5129dd3c7e5..1c290f2cf57 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.3 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.4 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ typedef enum
SLRU_PAGE_CLEAN, /* page is valid and not dirty */
SLRU_PAGE_DIRTY, /* page is valid but needs write */
SLRU_PAGE_WRITE_IN_PROGRESS /* page is being written out */
-} SlruPageStatus;
+} SlruPageStatus;
/*
* Shared-memory state
@@ -117,7 +117,7 @@ typedef struct SlruSharedData
* swapping out the latest page.
*/
int latest_page_number;
-} SlruSharedData;
+} SlruSharedData;
typedef SlruSharedData *SlruShared;
@@ -145,7 +145,7 @@ typedef enum
SLRU_SEEK_FAILED,
SLRU_READ_FAILED,
SLRU_WRITE_FAILED
-} SlruErrorCause;
+} SlruErrorCause;
static SlruErrorCause slru_errcause;
static int slru_errno;
@@ -166,9 +166,9 @@ SimpleLruShmemSize(void)
{
return MAXALIGN(sizeof(SlruSharedData)) + BLCKSZ * NUM_CLOG_BUFFERS
#ifdef EXEC_BACKEND
- + MAXALIGN(sizeof(SlruLockData))
+ + MAXALIGN(sizeof(SlruLockData))
#endif
- ;
+ ;
}
void
@@ -183,12 +183,14 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
shared = (SlruShared) ptr;
#ifdef EXEC_BACKEND
+
/*
* Locks are in shared memory
*/
- locks = (SlruLock)(ptr + MAXALIGN(sizeof(SlruSharedData)) +
- BLCKSZ * NUM_CLOG_BUFFERS);
+ locks = (SlruLock) (ptr + MAXALIGN(sizeof(SlruSharedData)) +
+ BLCKSZ * NUM_CLOG_BUFFERS);
#else
+
/*
* Locks are in private memory
*/
@@ -199,7 +201,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
if (!IsUnderPostmaster)
- /* Initialize locks and shared memory area */
+ /* Initialize locks and shared memory area */
{
char *bufptr;
int slotno;
@@ -210,8 +212,8 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
memset(shared, 0, sizeof(SlruSharedData));
- bufptr = (char *)shared + MAXALIGN(sizeof(SlruSharedData));
-
+ bufptr = (char *) shared + MAXALIGN(sizeof(SlruSharedData));
+
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
locks->BufferLocks[slotno] = LWLockAssign();
@@ -247,7 +249,7 @@ int
SimpleLruZeroPage(SlruCtl ctl, int pageno)
{
int slotno;
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Find a suitable buffer slot for the page */
slotno = SlruSelectLRUPage(ctl, pageno);
@@ -285,7 +287,7 @@ SimpleLruZeroPage(SlruCtl ctl, int pageno)
char *
SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid, bool forwrite)
{
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Outer loop handles restart if we lose the buffer to someone else */
for (;;)
@@ -383,7 +385,7 @@ SimpleLruWritePage(SlruCtl ctl, int slotno)
{
int pageno;
bool ok;
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Do nothing if page does not need writing */
if (shared->page_status[slotno] != SLRU_PAGE_DIRTY &&
@@ -539,13 +541,13 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno)
* possible for this to need to happen when writing a page that's not
* first in its segment; we assume the OS can cope with that. (Note:
* it might seem that it'd be okay to create files only when
- * SimpleLruZeroPage is called for the first page of a segment. However,
- * if after a crash and restart the REDO logic elects to replay the
- * log from a checkpoint before the latest one, then it's possible
- * that we will get commands to set transaction status of transactions
- * that have already been truncated from the commit log. Easiest way
- * to deal with that is to accept references to nonexistent files here
- * and in SlruPhysicalReadPage.)
+ * SimpleLruZeroPage is called for the first page of a segment.
+ * However, if after a crash and restart the REDO logic elects to
+ * replay the log from a checkpoint before the latest one, then it's
+ * possible that we will get commands to set transaction status of
+ * transactions that have already been truncated from the commit log.
+ * Easiest way to deal with that is to accept references to
+ * nonexistent files here and in SlruPhysicalReadPage.)
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
@@ -608,37 +610,37 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
case SLRU_OPEN_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("open of file \"%s\" failed: %m",
path)));
break;
case SLRU_CREATE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("creation of file \"%s\" failed: %m",
path)));
break;
case SLRU_SEEK_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("lseek of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("lseek of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
case SLRU_READ_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("read of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("read of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
case SLRU_WRITE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("write of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("write of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
default:
/* can't get here, we trust */
@@ -665,6 +667,7 @@ static int
SlruSelectLRUPage(SlruCtl ctl, int pageno)
{
SlruShared shared = (SlruShared) ctl->shared;
+
/* Outer loop handles restart after I/O */
for (;;)
{
@@ -689,7 +692,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
return slotno;
if (shared->page_lru_count[slotno] > bestcount &&
- shared->page_number[slotno] != shared->latest_page_number)
+ shared->page_number[slotno] != shared->latest_page_number)
{
bestslot = slotno;
bestcount = shared->page_lru_count[slotno];
@@ -705,12 +708,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* We need to do I/O. Normal case is that we have to write it
* out, but it's possible in the worst case to have selected a
- * read-busy page. In that case we use SimpleLruReadPage to wait for
- * the read to complete.
+ * read-busy page. In that case we use SimpleLruReadPage to wait
+ * for the read to complete.
*/
if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS)
(void) SimpleLruReadPage(ctl, shared->page_number[bestslot],
- InvalidTransactionId, false);
+ InvalidTransactionId, false);
else
SimpleLruWritePage(ctl, bestslot);
@@ -747,10 +750,11 @@ SimpleLruFlush(SlruCtl ctl, bool checkpoint)
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
SimpleLruWritePage(ctl, slotno);
+
/*
- * When called during a checkpoint,
- * we cannot assert that the slot is clean now, since another
- * process might have re-dirtied it already. That's okay.
+ * When called during a checkpoint, we cannot assert that the slot
+ * is clean now, since another process might have re-dirtied it
+ * already. That's okay.
*/
Assert(checkpoint ||
shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
@@ -792,10 +796,10 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
CreateCheckPoint(false, true);
/*
- * Scan shared memory and remove any pages preceding the cutoff
- * page, to ensure we won't rewrite them later. (Any dirty pages
- * should have been flushed already during the checkpoint, we're just
- * being extra careful here.)
+ * Scan shared memory and remove any pages preceding the cutoff page,
+ * to ensure we won't rewrite them later. (Any dirty pages should
+ * have been flushed already during the checkpoint, we're just being
+ * extra careful here.)
*/
LWLockAcquire(ctl->locks->ControlLock, LW_EXCLUSIVE);
@@ -870,7 +874,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
if (cldir == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open directory \"%s\": %m", ctl->Dir)));
+ errmsg("could not open directory \"%s\": %m", ctl->Dir)));
errno = 0;
while ((clde = readdir(cldir)) != NULL)
@@ -898,7 +902,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
if (errno)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read directory \"%s\": %m", ctl->Dir)));
+ errmsg("could not read directory \"%s\": %m", ctl->Dir)));
closedir(cldir);
return found;
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 40b41519a93..550f2ae924b 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.149 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.150 2003/08/04 00:43:15 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -92,7 +92,7 @@
* AbortTransactionBlock
*
* These are invoked only in response to a user "BEGIN WORK", "COMMIT",
- * or "ROLLBACK" command. The tricky part about these functions
+ * or "ROLLBACK" command. The tricky part about these functions
* is that they are called within the postgres main loop, in between
* the StartTransactionCommand() and CommitTransactionCommand().
*
@@ -197,8 +197,8 @@ static TransactionStateData CurrentTransactionStateData = {
0, /* scan command id */
0x0, /* start time */
TRANS_DEFAULT, /* transaction state */
- TBLOCK_DEFAULT /* transaction block state from
- the client perspective */
+ TBLOCK_DEFAULT /* transaction block state from the client
+ * perspective */
};
TransactionState CurrentTransactionState = &CurrentTransactionStateData;
@@ -359,7 +359,7 @@ GetCurrentTransactionStartTimeUsec(int *msec)
* TransactionIdIsCurrentTransactionId
*
* During bootstrap, we cheat and say "it's not my transaction ID" even though
- * it is. Along with transam.c's cheat to say that the bootstrap XID is
+ * it is. Along with transam.c's cheat to say that the bootstrap XID is
* already committed, this causes the tqual.c routines to see previously
* inserted tuples as committed, which is what we need during bootstrap.
*/
@@ -561,13 +561,13 @@ RecordTransactionCommit(void)
/*
* We must mark the transaction committed in clog if its XID
- * appears either in permanent rels or in local temporary rels.
- * We test this by seeing if we made transaction-controlled
- * entries *OR* local-rel tuple updates. Note that if we made
- * only the latter, we have not emitted an XLOG record for our
- * commit, and so in the event of a crash the clog update might be
- * lost. This is okay because no one else will ever care whether
- * we committed.
+ * appears either in permanent rels or in local temporary rels. We
+ * test this by seeing if we made transaction-controlled entries
+ * *OR* local-rel tuple updates. Note that if we made only the
+ * latter, we have not emitted an XLOG record for our commit, and
+ * so in the event of a crash the clog update might be lost. This
+ * is okay because no one else will ever care whether we
+ * committed.
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
TransactionIdCommit(xid);
@@ -755,9 +755,9 @@ AtAbort_Memory(void)
{
/*
* Make sure we are in a valid context (not a child of
- * TopTransactionContext...). Note that it is possible for this
- * code to be called when we aren't in a transaction at all; go
- * directly to TopMemoryContext in that case.
+ * TopTransactionContext...). Note that it is possible for this code
+ * to be called when we aren't in a transaction at all; go directly to
+ * TopMemoryContext in that case.
*/
if (TopTransactionContext != NULL)
{
@@ -891,8 +891,8 @@ CommitTransaction(void)
DeferredTriggerEndXact();
/*
- * Similarly, let ON COMMIT management do its thing before we start
- * to commit.
+ * Similarly, let ON COMMIT management do its thing before we start to
+ * commit.
*/
PreCommit_on_commit_actions();
@@ -953,10 +953,10 @@ CommitTransaction(void)
* noncritical resource releasing.
*
* The ordering of operations is not entirely random. The idea is:
- * release resources visible to other backends (eg, files, buffer pins);
- * then release locks; then release backend-local resources. We want
- * to release locks at the point where any backend waiting for us will
- * see our transaction as being fully cleaned up.
+ * release resources visible to other backends (eg, files, buffer
+ * pins); then release locks; then release backend-local resources.
+ * We want to release locks at the point where any backend waiting for
+ * us will see our transaction as being fully cleaned up.
*/
smgrDoPendingDeletes(true);
@@ -1064,7 +1064,7 @@ AbortTransaction(void)
}
/*
- * Post-abort cleanup. See notes in CommitTransaction() concerning
+ * Post-abort cleanup. See notes in CommitTransaction() concerning
* ordering.
*/
@@ -1194,8 +1194,8 @@ StartTransactionCommand(void)
}
/*
- * We must switch to TopTransactionContext before returning. This
- * is already done if we called StartTransaction, otherwise not.
+ * We must switch to TopTransactionContext before returning. This is
+ * already done if we called StartTransaction, otherwise not.
*/
Assert(TopTransactionContext != NULL);
MemoryContextSwitchTo(TopTransactionContext);
@@ -1370,9 +1370,10 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
if (IsTransactionBlock())
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s cannot run inside a transaction block",
stmtType)));
+
/*
* Are we inside a function call? If the statement's parameter block
* was allocated in QueryContext, assume it is an interactive command.
@@ -1381,8 +1382,8 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
if (!MemoryContextContains(QueryContext, stmtNode))
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
- errmsg("%s cannot be executed from a function", stmtType)));
+ /* translator: %s represents an SQL statement name */
+ errmsg("%s cannot be executed from a function", stmtType)));
/* If we got past IsTransactionBlock test, should be in default state */
if (CurrentTransactionState->blockState != TBLOCK_DEFAULT)
elog(ERROR, "cannot prevent transaction chain");
@@ -1414,6 +1415,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
*/
if (IsTransactionBlock())
return;
+
/*
* Are we inside a function call? If the statement's parameter block
* was allocated in QueryContext, assume it is an interactive command.
@@ -1423,7 +1425,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
return;
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s may only be used in BEGIN/END transaction blocks",
stmtType)));
}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 0ceb8951cbe..45a2743ba97 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.120 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.121 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1046,8 +1046,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
XLByteToPrevSeg(LogwrtResult.Write, openLogId, openLogSeg);
@@ -1162,8 +1162,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
if (openLogFile < 0)
@@ -1266,7 +1266,7 @@ XLogFlush(XLogRecPtr record)
XLogCtlInsert *Insert = &XLogCtl->Insert;
uint32 freespace = INSERT_FREESPACE(Insert);
- if (freespace < SizeOfXLogRecord) /* buffer is full */
+ if (freespace < SizeOfXLogRecord) /* buffer is full */
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
else
{
@@ -1449,8 +1449,8 @@ XLogFileInit(uint32 log, uint32 seg,
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
@@ -1563,14 +1563,14 @@ XLogFileOpen(uint32 log, uint32 seg, bool econt)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
}
return (fd);
@@ -1621,8 +1621,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
if (xldir == NULL)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not open transaction log directory \"%s\": %m",
- XLogDir)));
+ errmsg("could not open transaction log directory \"%s\": %m",
+ XLogDir)));
sprintf(lastoff, "%08X%08X", log, seg);
@@ -1654,15 +1654,15 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
true))
{
ereport(LOG,
- (errmsg("recycled transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("recycled transaction log file \"%s\"",
+ xlde->d_name)));
}
else
{
/* No need for any more future segments... */
ereport(LOG,
- (errmsg("removing transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("removing transaction log file \"%s\"",
+ xlde->d_name)));
unlink(path);
}
}
@@ -1672,8 +1672,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
if (errno)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not read transaction log directory \"%s\": %m",
- XLogDir)));
+ errmsg("could not read transaction log directory \"%s\": %m",
+ XLogDir)));
closedir(xldir);
}
@@ -1746,8 +1746,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
if (!EQ_CRC64(record->xl_crc, crc))
{
ereport(emode,
- (errmsg("bad resource manager data checksum in record at %X/%X",
- recptr.xlogid, recptr.xrecoff)));
+ (errmsg("bad resource manager data checksum in record at %X/%X",
+ recptr.xlogid, recptr.xrecoff)));
return (false);
}
@@ -1769,8 +1769,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
if (!EQ_CRC64(cbuf, crc))
{
ereport(emode,
- (errmsg("bad checksum of backup block %d in record at %X/%X",
- i + 1, recptr.xlogid, recptr.xrecoff)));
+ (errmsg("bad checksum of backup block %d in record at %X/%X",
+ i + 1, recptr.xlogid, recptr.xrecoff)));
return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
@@ -1931,7 +1931,7 @@ got_record:;
{
ereport(emode,
(errmsg("invalid resource manager id %u at %X/%X",
- record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
+ record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
goto next_record_is_invalid;
}
nextRecord = NULL;
@@ -2063,7 +2063,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
{
ereport(emode,
(errmsg("unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
- hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
+ hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
readId, readSeg, readOff)));
return false;
}
@@ -2084,7 +2084,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_sui > lastReadSUI + 512)
{
ereport(emode,
- /* translator: SUI = startup id */
+ /* translator: SUI = startup id */
(errmsg("out-of-sequence SUI %u (after %u) in log file %u, segment %u, offset %u",
hdr->xlp_sui, lastReadSUI,
readId, readSeg, readOff)));
@@ -2235,8 +2235,8 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
- " but the server was compiled with PG_CONTROL_VERSION %d.",
- ControlFile->pg_control_version, PG_CONTROL_VERSION),
+ " but the server was compiled with PG_CONTROL_VERSION %d.",
+ ControlFile->pg_control_version, PG_CONTROL_VERSION),
errhint("It looks like you need to initdb.")));
/* Now check the CRC. */
INIT_CRC64(crc);
@@ -2265,75 +2265,75 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d,"
- " but the server was compiled with CATALOG_VERSION_NO %d.",
- ControlFile->catalog_version_no, CATALOG_VERSION_NO),
+ " but the server was compiled with CATALOG_VERSION_NO %d.",
+ ControlFile->catalog_version_no, CATALOG_VERSION_NO),
errhint("It looks like you need to initdb.")));
if (ControlFile->blcksz != BLCKSZ)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with BLCKSZ %d,"
- " but the server was compiled with BLCKSZ %d.",
- ControlFile->blcksz, BLCKSZ),
- errhint("It looks like you need to recompile or initdb.")));
+ errdetail("The database cluster was initialized with BLCKSZ %d,"
+ " but the server was compiled with BLCKSZ %d.",
+ ControlFile->blcksz, BLCKSZ),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->relseg_size != RELSEG_SIZE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
- " but the server was compiled with RELSEG_SIZE %d.",
+ " but the server was compiled with RELSEG_SIZE %d.",
ControlFile->relseg_size, RELSEG_SIZE),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->nameDataLen != NAMEDATALEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with NAMEDATALEN %d,"
- " but the server was compiled with NAMEDATALEN %d.",
+ " but the server was compiled with NAMEDATALEN %d.",
ControlFile->nameDataLen, NAMEDATALEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->funcMaxArgs != FUNC_MAX_ARGS)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with FUNC_MAX_ARGS %d,"
- " but the server was compiled with FUNC_MAX_ARGS %d.",
+ " but the server was compiled with FUNC_MAX_ARGS %d.",
ControlFile->funcMaxArgs, FUNC_MAX_ARGS),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
#ifdef HAVE_INT64_TIMESTAMP
if (ControlFile->enableIntTimes != TRUE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized without HAVE_INT64_TIMESTAMP"
- " but the server was compiled with HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled with HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#else
if (ControlFile->enableIntTimes != FALSE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with HAVE_INT64_TIMESTAMP"
- " but the server was compiled without HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled without HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#endif
if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with LOCALE_NAME_BUFLEN %d,"
- " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
+ " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
ControlFile->localeBuflen, LOCALE_NAME_BUFLEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_collate),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_collate),
+ errhint("It looks like you need to initdb or install locale support.")));
if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_ctype),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_ctype),
+ errhint("It looks like you need to initdb or install locale support.")));
/* Make the fixed locale settings visible as GUC variables, too */
SetConfigOption("lc_collate", ControlFile->lc_collate,
@@ -2602,10 +2602,10 @@ StartupXLOG(void)
str_time(ControlFile->time))));
else if (ControlFile->state == DB_IN_RECOVERY)
ereport(LOG,
- (errmsg("database system was interrupted while in recovery at %s",
- str_time(ControlFile->time)),
- errhint("This probably means that some data is corrupted and"
- " you will have to use the last backup for recovery.")));
+ (errmsg("database system was interrupted while in recovery at %s",
+ str_time(ControlFile->time)),
+ errhint("This probably means that some data is corrupted and"
+ " you will have to use the last backup for recovery.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
(errmsg("database system was interrupted at %s",
@@ -2637,12 +2637,12 @@ StartupXLOG(void)
checkPointLoc = ControlFile->prevCheckPoint;
ereport(LOG,
(errmsg("using previous checkpoint record at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
ereport(PANIC,
- (errmsg("could not locate a valid checkpoint record")));
+ (errmsg("could not locate a valid checkpoint record")));
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
@@ -2665,11 +2665,12 @@ StartupXLOG(void)
ShmemVariableCache->oidCount = 0;
/*
- * If it was a shutdown checkpoint, then any following WAL entries were
- * created under the next StartUpID; if it was a regular checkpoint then
- * any following WAL entries were created under the same StartUpID.
- * We must replay WAL entries using the same StartUpID they were created
- * under, so temporarily adopt that SUI (see also xlog_redo()).
+ * If it was a shutdown checkpoint, then any following WAL entries
+ * were created under the next StartUpID; if it was a regular
+ * checkpoint then any following WAL entries were created under the
+ * same StartUpID. We must replay WAL entries using the same StartUpID
+ * they were created under, so temporarily adopt that SUI (see also
+ * xlog_redo()).
*/
if (wasShutdown)
ThisStartUpID = checkPoint.ThisStartUpID + 1;
@@ -2690,7 +2691,7 @@ StartupXLOG(void)
{
if (wasShutdown)
ereport(PANIC,
- (errmsg("invalid redo/undo record in shutdown checkpoint")));
+ (errmsg("invalid redo/undo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
@@ -2699,7 +2700,7 @@ StartupXLOG(void)
/* REDO */
if (InRecovery)
{
- int rmid;
+ int rmid;
ereport(LOG,
(errmsg("database system was not properly shut down; "
@@ -2791,8 +2792,8 @@ StartupXLOG(void)
/*
* Tricky point here: readBuf contains the *last* block that the
- * LastRec record spans, not the one it starts in. The last block
- * is indeed the one we want to use.
+ * LastRec record spans, not the one it starts in. The last block is
+ * indeed the one we want to use.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@@ -2818,11 +2819,12 @@ StartupXLOG(void)
else
{
/*
- * Whenever Write.LogwrtResult points to exactly the end of a page,
- * Write.curridx must point to the *next* page (see XLogWrite()).
+ * Whenever Write.LogwrtResult points to exactly the end of a
+ * page, Write.curridx must point to the *next* page (see
+ * XLogWrite()).
*
- * Note: it might seem we should do AdvanceXLInsertBuffer() here,
- * but we can't since we haven't yet determined the correct StartUpID
+ * Note: it might seem we should do AdvanceXLInsertBuffer() here, but
+ * we can't since we haven't yet determined the correct StartUpID
* to put into the new page's header. The first actual attempt to
* insert a log record will advance the insert state.
*/
@@ -2859,7 +2861,7 @@ StartupXLOG(void)
if (InRecovery)
{
- int rmid;
+ int rmid;
/*
* Allow resource managers to do any required cleanup.
@@ -2885,14 +2887,15 @@ StartupXLOG(void)
ThisStartUpID = ControlFile->checkPointCopy.ThisStartUpID;
/*
- * Perform a new checkpoint to update our recovery activity to disk.
+ * Perform a new checkpoint to update our recovery activity to
+ * disk.
*
* Note that we write a shutdown checkpoint. This is correct since
- * the records following it will use SUI one more than what is shown
- * in the checkpoint's ThisStartUpID.
+ * the records following it will use SUI one more than what is
+ * shown in the checkpoint's ThisStartUpID.
*
- * In case we had to use the secondary checkpoint, make sure that
- * it will still be shown as the secondary checkpoint after this
+ * In case we had to use the secondary checkpoint, make sure that it
+ * will still be shown as the secondary checkpoint after this
* CreateCheckPoint operation; we don't want the broken primary
* checkpoint to become prevCheckPoint...
*/
@@ -2907,10 +2910,10 @@ StartupXLOG(void)
else
{
/*
- * If we are not doing recovery, then we saw a checkpoint with nothing
- * after it, and we can safely use StartUpID equal to one more than
- * the checkpoint's SUI. But just for paranoia's sake, check against
- * pg_control too.
+ * If we are not doing recovery, then we saw a checkpoint with
+ * nothing after it, and we can safely use StartUpID equal to one
+ * more than the checkpoint's SUI. But just for paranoia's sake,
+ * check against pg_control too.
*/
ThisStartUpID = checkPoint.ThisStartUpID;
if (ThisStartUpID < ControlFile->checkPointCopy.ThisStartUpID)
@@ -2923,7 +2926,8 @@ StartupXLOG(void)
PreallocXlogFiles(EndOfLog);
/*
- * Advance StartUpID to one more than the highest value used previously.
+ * Advance StartUpID to one more than the highest value used
+ * previously.
*/
ThisStartUpID++;
XLogCtl->ThisStartUpID = ThisStartUpID;
@@ -2973,9 +2977,9 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
if (!XRecOffIsValid(RecPtr.xrecoff))
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid %s checkpoint link in control file",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
@@ -2984,34 +2988,34 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
if (record == NULL)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_rmid != RM_XLOG_ID)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
- (errmsg("invalid resource manager id in %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ /* translator: %s is "primary" or "secondary" */
+ (errmsg("invalid resource manager id in %s checkpoint record",
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_info != XLOG_CHECKPOINT_SHUTDOWN &&
record->xl_info != XLOG_CHECKPOINT_ONLINE)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid xl_info in %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_len != sizeof(CheckPoint))
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid length of %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
return record;
@@ -3112,10 +3116,11 @@ CreateCheckPoint(bool shutdown, bool force)
if (MyXactMadeXLogEntry)
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("checkpoint cannot be made inside transaction block")));
+ errmsg("checkpoint cannot be made inside transaction block")));
/*
- * Acquire CheckpointLock to ensure only one checkpoint happens at a time.
+ * Acquire CheckpointLock to ensure only one checkpoint happens at a
+ * time.
*
* The CheckpointLock can be held for quite a while, which is not good
* because we won't respond to a cancel/die request while waiting for
@@ -3149,14 +3154,15 @@ CreateCheckPoint(bool shutdown, bool force)
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
/*
- * If this isn't a shutdown or forced checkpoint, and we have not inserted
- * any XLOG records since the start of the last checkpoint, skip the
- * checkpoint. The idea here is to avoid inserting duplicate checkpoints
- * when the system is idle. That wastes log space, and more importantly it
- * exposes us to possible loss of both current and previous checkpoint
- * records if the machine crashes just as we're writing the update.
- * (Perhaps it'd make even more sense to checkpoint only when the previous
- * checkpoint record is in a different xlog page?)
+ * If this isn't a shutdown or forced checkpoint, and we have not
+ * inserted any XLOG records since the start of the last checkpoint,
+ * skip the checkpoint. The idea here is to avoid inserting duplicate
+ * checkpoints when the system is idle. That wastes log space, and
+ * more importantly it exposes us to possible loss of both current and
+ * previous checkpoint records if the machine crashes just as we're
+ * writing the update. (Perhaps it'd make even more sense to
+ * checkpoint only when the previous checkpoint record is in a
+ * different xlog page?)
*
* We have to make two tests to determine that nothing has happened since
* the start of the last checkpoint: current insertion point must
@@ -3204,12 +3210,13 @@ CreateCheckPoint(bool shutdown, bool force)
* Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock AND the info_lck.
*
- * Note: if we fail to complete the checkpoint, RedoRecPtr will be
- * left pointing past where it really needs to point. This is okay;
- * the only consequence is that XLogInsert might back up whole buffers
- * that it didn't really need to. We can't postpone advancing RedoRecPtr
- * because XLogInserts that happen while we are dumping buffers must
- * assume that their buffer changes are not included in the checkpoint.
+ * Note: if we fail to complete the checkpoint, RedoRecPtr will be left
+ * pointing past where it really needs to point. This is okay; the
+ * only consequence is that XLogInsert might back up whole buffers
+ * that it didn't really need to. We can't postpone advancing
+ * RedoRecPtr because XLogInserts that happen while we are dumping
+ * buffers must assume that their buffer changes are not included in
+ * the checkpoint.
*/
{
/* use volatile pointer to prevent code rearrangement */
@@ -3538,15 +3545,15 @@ assign_xlog_sync_method(const char *method, bool doit, bool interactive)
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
if (open_sync_bit != new_sync_bit)
{
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
}
@@ -3570,16 +3577,16 @@ issue_xlog_fsync(void)
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#ifdef HAVE_FDATASYNC
case SYNC_METHOD_FDATASYNC:
if (pg_fdatasync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fdatasync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fdatasync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#endif
case SYNC_METHOD_OPEN:
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index b02fa775ded..328f2ab9b38 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.163 2003/07/27 21:49:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.164 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -238,7 +238,7 @@ BootstrapMain(int argc, char *argv[])
*
* If we are running under the postmaster, this is done already.
*/
- if (!IsUnderPostmaster /* when exec || ExecBackend */)
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
MemoryContextInit();
/*
@@ -247,7 +247,7 @@ BootstrapMain(int argc, char *argv[])
/* Set defaults, to be overriden by explicit options below */
dbname = NULL;
- if (!IsUnderPostmaster /* when exec || ExecBackend*/)
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
{
InitializeGUCOptions();
potential_DataDir = getenv("PGDATA"); /* Null if no PGDATA
@@ -285,22 +285,22 @@ BootstrapMain(int argc, char *argv[])
xlogop = atoi(optarg);
break;
case 'p':
- {
- /* indicates fork from postmaster */
+ {
+ /* indicates fork from postmaster */
#ifdef EXEC_BACKEND
- char *p;
-
- sscanf(optarg, "%d,%p,", &UsedShmemSegID, &UsedShmemSegAddr);
- p = strchr(optarg, ',');
- if (p)
- p = strchr(p+1, ',');
- if (p)
- dbname = strdup(p+1);
+ char *p;
+
+ sscanf(optarg, "%d,%p,", &UsedShmemSegID, &UsedShmemSegAddr);
+ p = strchr(optarg, ',');
+ if (p)
+ p = strchr(p + 1, ',');
+ if (p)
+ dbname = strdup(p + 1);
#else
- dbname = strdup(optarg);
+ dbname = strdup(optarg);
#endif
- break;
- }
+ break;
+ }
case 'B':
SetConfigOption("shared_buffers", optarg, PGC_POSTMASTER, PGC_S_ARGV);
break;
@@ -346,12 +346,10 @@ BootstrapMain(int argc, char *argv[])
usage();
- if (IsUnderPostmaster && ExecBackend && MyProc /* ordinary backend */)
- {
+ if (IsUnderPostmaster && ExecBackend && MyProc /* ordinary backend */ )
AttachSharedMemoryAndSemaphores();
- }
-
- if (!IsUnderPostmaster /* when exec || ExecBackend*/)
+
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
{
if (!potential_DataDir)
{
@@ -473,8 +471,8 @@ BootstrapMain(int argc, char *argv[])
/*
* In NOP mode, all we really want to do is create shared memory and
- * semaphores (just to prove we can do it with the current GUC settings).
- * So, quit now.
+ * semaphores (just to prove we can do it with the current GUC
+ * settings). So, quit now.
*/
if (xlogop == BS_XLOG_NOP)
proc_exit(0);
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 77b1d3b2d7f..7ace67de6b2 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.85 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.86 2003/08/04 00:43:16 momjian Exp $
*
* NOTES
* See acl.h.
@@ -97,37 +97,40 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
if (grantee->username)
{
- aclitem.ai_grantee = get_usesysid(grantee->username);
+ aclitem. ai_grantee = get_usesysid(grantee->username);
+
idtype = ACL_IDTYPE_UID;
}
else if (grantee->groupname)
{
- aclitem.ai_grantee = get_grosysid(grantee->groupname);
+ aclitem. ai_grantee = get_grosysid(grantee->groupname);
+
idtype = ACL_IDTYPE_GID;
}
else
{
- aclitem.ai_grantee = ACL_ID_WORLD;
+ aclitem. ai_grantee = ACL_ID_WORLD;
+
idtype = ACL_IDTYPE_WORLD;
}
/*
* Grant options can only be granted to individual users, not
- * groups or public. The reason is that if a user would
- * re-grant a privilege that he held through a group having a
- * grant option, and later the user is removed from the group,
- * the situation is impossible to clean up.
+ * groups or public. The reason is that if a user would re-grant
+ * a privilege that he held through a group having a grant option,
+ * and later the user is removed from the group, the situation is
+ * impossible to clean up.
*/
if (is_grant && idtype != ACL_IDTYPE_UID && grant_option)
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("grant options can only be granted to individual users")));
- aclitem.ai_grantor = GetUserId();
+ aclitem. ai_grantor = GetUserId();
ACLITEM_SET_PRIVS_IDTYPE(aclitem,
- (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
- (grant_option || !is_grant) ? privileges : ACL_NO_RIGHTS,
+ (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
+ (grant_option || !is_grant) ? privileges : ACL_NO_RIGHTS,
idtype);
new_acl = aclinsert3(new_acl, &aclitem, modechg, behavior);
@@ -247,7 +250,7 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -346,7 +349,7 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -443,7 +446,7 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -543,7 +546,7 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -619,7 +622,7 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
pg_namespace_tuple = (Form_pg_namespace) GETSTRUCT(tuple);
if (stmt->is_grant
- && !pg_namespace_ownercheck(HeapTupleGetOid(tuple), GetUserId())
+ && !pg_namespace_ownercheck(HeapTupleGetOid(tuple), GetUserId())
&& pg_namespace_aclcheck(HeapTupleGetOid(tuple), GetUserId(), ACL_GRANT_OPTION_FOR(privileges)) != ACLCHECK_OK)
aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_NAMESPACE,
nspname);
@@ -640,7 +643,7 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -805,7 +808,7 @@ in_group(AclId uid, AclId gid)
static AclResult
aclcheck(Acl *acl, AclId userid, AclMode mode)
{
- AclItem *aidat;
+ AclItem *aidat;
int i,
num;
@@ -833,10 +836,10 @@ aclcheck(Acl *acl, AclId userid, AclMode mode)
if (aidat[i].ai_privs & mode)
return ACLCHECK_OK;
}
-
+
/*
- * See if he has the permission via any group (do this in a
- * separate pass to avoid expensive(?) lookups in pg_group)
+ * See if he has the permission via any group (do this in a separate
+ * pass to avoid expensive(?) lookups in pg_group)
*/
for (i = 0; i < num; i++)
if (ACLITEM_GET_IDTYPE(aidat[i]) == ACL_IDTYPE_GID
@@ -856,7 +859,7 @@ aclcheck(Acl *acl, AclId userid, AclMode mode)
* supply strings that might be already quoted.
*/
-static const char * const no_priv_msg[MAX_ACL_KIND] =
+static const char *const no_priv_msg[MAX_ACL_KIND] =
{
/* ACL_KIND_CLASS */
gettext_noop("permission denied for relation %s"),
@@ -878,7 +881,7 @@ static const char * const no_priv_msg[MAX_ACL_KIND] =
gettext_noop("permission denied for conversion %s")
};
-static const char * const not_owner_msg[MAX_ACL_KIND] =
+static const char *const not_owner_msg[MAX_ACL_KIND] =
{
/* ACL_KIND_CLASS */
gettext_noop("must be owner of relation %s"),
@@ -972,7 +975,7 @@ pg_class_aclcheck(Oid table_oid, AclId userid, AclMode mode)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation with OID %u does not exist", table_oid)));
+ errmsg("relation with OID %u does not exist", table_oid)));
/*
* Deny anyone permission to update a system catalog unless
@@ -1124,7 +1127,7 @@ pg_proc_aclcheck(Oid proc_oid, AclId userid, AclMode mode)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function with OID %u does not exist", proc_oid)));
+ errmsg("function with OID %u does not exist", proc_oid)));
aclDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_proacl,
&isNull);
@@ -1179,7 +1182,7 @@ pg_language_aclcheck(Oid lang_oid, AclId userid, AclMode mode)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("language with OID %u does not exist", lang_oid)));
+ errmsg("language with OID %u does not exist", lang_oid)));
aclDatum = SysCacheGetAttr(LANGOID, tuple, Anum_pg_language_lanacl,
&isNull);
@@ -1288,7 +1291,7 @@ pg_class_ownercheck(Oid class_oid, AclId userid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation with OID %u does not exist", class_oid)));
+ errmsg("relation with OID %u does not exist", class_oid)));
owner_id = ((Form_pg_class) GETSTRUCT(tuple))->relowner;
@@ -1344,7 +1347,7 @@ pg_oper_ownercheck(Oid oper_oid, AclId userid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("operator with OID %u does not exist", oper_oid)));
+ errmsg("operator with OID %u does not exist", oper_oid)));
owner_id = ((Form_pg_operator) GETSTRUCT(tuple))->oprowner;
@@ -1372,7 +1375,7 @@ pg_proc_ownercheck(Oid proc_oid, AclId userid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function with OID %u does not exist", proc_oid)));
+ errmsg("function with OID %u does not exist", proc_oid)));
owner_id = ((Form_pg_proc) GETSTRUCT(tuple))->proowner;
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 2cdf4bc229c..251fb82d813 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.28 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.29 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,8 +93,8 @@ static Oid object_classes[MAX_OCLASS];
static void findAutoDeletableObjects(const ObjectAddress *object,
- ObjectAddresses *oktodelete,
- Relation depRel);
+ ObjectAddresses *oktodelete,
+ Relation depRel);
static bool recursiveDeletion(const ObjectAddress *object,
DropBehavior behavior,
int msglevel,
@@ -102,11 +102,11 @@ static bool recursiveDeletion(const ObjectAddress *object,
ObjectAddresses *oktodelete,
Relation depRel);
static bool deleteDependentObjects(const ObjectAddress *object,
- const char *objDescription,
- DropBehavior behavior,
- int msglevel,
- ObjectAddresses *oktodelete,
- Relation depRel);
+ const char *objDescription,
+ DropBehavior behavior,
+ int msglevel,
+ ObjectAddresses *oktodelete,
+ Relation depRel);
static void doDeletion(const ObjectAddress *object);
static bool find_expr_references_walker(Node *node,
find_expr_references_context *context);
@@ -118,7 +118,7 @@ static void add_object_address(ObjectClasses oclass, Oid objectId, int32 subId,
static void add_exact_object_address(const ObjectAddress *object,
ObjectAddresses *addrs);
static bool object_address_present(const ObjectAddress *object,
- ObjectAddresses *addrs);
+ ObjectAddresses *addrs);
static void term_object_addresses(ObjectAddresses *addrs);
static void init_object_classes(void);
static ObjectClasses getObjectClass(const ObjectAddress *object);
@@ -158,9 +158,9 @@ performDeletion(const ObjectAddress *object,
/*
* Construct a list of objects that are reachable by AUTO or INTERNAL
- * dependencies from the target object. These should be deleted silently,
- * even if the actual deletion pass first reaches one of them via a
- * non-auto dependency.
+ * dependencies from the target object. These should be deleted
+ * silently, even if the actual deletion pass first reaches one of
+ * them via a non-auto dependency.
*/
init_object_addresses(&oktodelete);
@@ -170,8 +170,8 @@ performDeletion(const ObjectAddress *object,
NULL, &oktodelete, depRel))
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
- errmsg("cannot drop %s because other objects depend on it",
- objDescription),
+ errmsg("cannot drop %s because other objects depend on it",
+ objDescription),
errhint("Use DROP ... CASCADE to drop the dependent objects too.")));
term_object_addresses(&oktodelete);
@@ -184,7 +184,7 @@ performDeletion(const ObjectAddress *object,
/*
* deleteWhatDependsOn: attempt to drop everything that depends on the
- * specified object, though not the object itself. Behavior is always
+ * specified object, though not the object itself. Behavior is always
* CASCADE.
*
* This is currently used only to clean out the contents of a schema
@@ -212,9 +212,9 @@ deleteWhatDependsOn(const ObjectAddress *object,
/*
* Construct a list of objects that are reachable by AUTO or INTERNAL
- * dependencies from the target object. These should be deleted silently,
- * even if the actual deletion pass first reaches one of them via a
- * non-auto dependency.
+ * dependencies from the target object. These should be deleted
+ * silently, even if the actual deletion pass first reaches one of
+ * them via a non-auto dependency.
*/
init_object_addresses(&oktodelete);
@@ -266,9 +266,9 @@ findAutoDeletableObjects(const ObjectAddress *object,
ObjectAddress otherObject;
/*
- * If this object is already in oktodelete, then we already visited it;
- * don't do so again (this prevents infinite recursion if there's a loop
- * in pg_depend). Otherwise, add it.
+ * If this object is already in oktodelete, then we already visited
+ * it; don't do so again (this prevents infinite recursion if there's
+ * a loop in pg_depend). Otherwise, add it.
*/
if (object_address_present(object, oktodelete))
return;
@@ -276,8 +276,8 @@ findAutoDeletableObjects(const ObjectAddress *object,
/*
* Scan pg_depend records that link to this object, showing the things
- * that depend on it. For each one that is AUTO or INTERNAL, visit the
- * referencing object.
+ * that depend on it. For each one that is AUTO or INTERNAL, visit
+ * the referencing object.
*
* When dropping a whole object (subId = 0), find pg_depend records for
* its sub-objects too.
@@ -319,6 +319,7 @@ findAutoDeletableObjects(const ObjectAddress *object,
findAutoDeletableObjects(&otherObject, oktodelete, depRel);
break;
case DEPENDENCY_PIN:
+
/*
* For a PIN dependency we just ereport immediately; there
* won't be any others to examine, and we aren't ever
@@ -461,11 +462,11 @@ recursiveDeletion(const ObjectAddress *object,
char *otherObjDesc = getObjectDescription(&otherObject);
ereport(ERROR,
- (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
- errmsg("cannot drop %s because %s requires it",
- objDescription, otherObjDesc),
- errhint("You may drop %s instead.",
- otherObjDesc)));
+ (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
+ errmsg("cannot drop %s because %s requires it",
+ objDescription, otherObjDesc),
+ errhint("You may drop %s instead.",
+ otherObjDesc)));
}
/*
@@ -559,10 +560,9 @@ recursiveDeletion(const ObjectAddress *object,
/*
* Step 2: scan pg_depend records that link to this object, showing
* the things that depend on it. Recursively delete those things.
- * Note it's important to delete the dependent objects
- * before the referenced one, since the deletion routines might do
- * things like try to update the pg_class record when deleting a check
- * constraint.
+ * Note it's important to delete the dependent objects before the
+ * referenced one, since the deletion routines might do things like
+ * try to update the pg_class record when deleting a check constraint.
*/
if (!deleteDependentObjects(object, objDescription,
behavior, msglevel,
@@ -674,11 +674,12 @@ deleteDependentObjects(const ObjectAddress *object,
switch (foundDep->deptype)
{
case DEPENDENCY_NORMAL:
+
/*
* Perhaps there was another dependency path that would
- * have allowed silent deletion of the otherObject, had
- * we only taken that path first.
- * In that case, act like this link is AUTO, too.
+ * have allowed silent deletion of the otherObject, had we
+ * only taken that path first. In that case, act like this
+ * link is AUTO, too.
*/
if (object_address_present(&otherObject, oktodelete))
ereport(DEBUG2,
@@ -872,7 +873,7 @@ recordDependencyOnExpr(const ObjectAddress *depender,
* recordDependencyOnSingleRelExpr - find expression dependencies
*
* As above, but only one relation is expected to be referenced (with
- * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
+ * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
* range table. An additional frammish is that dependencies on that
* relation (or its component columns) will be marked with 'self_behavior',
* whereas 'behavior' is used for everything else.
@@ -1001,7 +1002,7 @@ find_expr_references_walker(Node *node,
else if (rte->rtekind == RTE_JOIN)
{
/* Scan join output column to add references to join inputs */
- List *save_rtables;
+ List *save_rtables;
/* We must make the context appropriate for join's level */
save_rtables = context->rtables;
@@ -1026,7 +1027,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, OpExpr))
{
- OpExpr *opexpr = (OpExpr *) node;
+ OpExpr *opexpr = (OpExpr *) node;
add_object_address(OCLASS_OPERATOR, opexpr->opno, 0,
&context->addrs);
@@ -1034,7 +1035,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, DistinctExpr))
{
- DistinctExpr *distinctexpr = (DistinctExpr *) node;
+ DistinctExpr *distinctexpr = (DistinctExpr *) node;
add_object_address(OCLASS_OPERATOR, distinctexpr->opno, 0,
&context->addrs);
@@ -1042,7 +1043,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, ScalarArrayOpExpr))
{
- ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
+ ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
add_object_address(OCLASS_OPERATOR, opexpr->opno, 0,
&context->addrs);
@@ -1066,7 +1067,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, SubLink))
{
- SubLink *sublink = (SubLink *) node;
+ SubLink *sublink = (SubLink *) node;
List *opid;
foreach(opid, sublink->operOids)
@@ -1092,7 +1093,8 @@ find_expr_references_walker(Node *node,
* Add whole-relation refs for each plain relation mentioned in
* the subquery's rtable. (Note: query_tree_walker takes care of
* recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need
- * to do that here. But keep it from looking at join alias lists.)
+ * to do that here. But keep it from looking at join alias
+ * lists.)
*/
foreach(rtable, query->rtable)
{
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 15dbc50a13d..c8a411646fa 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.249 2003/07/29 17:21:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.250 2003/08/04 00:43:16 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -418,8 +418,8 @@ CheckAttributeType(const char *attname, Oid atttypid)
* Warn user, but don't fail, if column to be created has UNKNOWN type
* (usually as a result of a 'retrieve into' - jolly)
*
- * Refuse any attempt to create a pseudo-type column or one that uses
- * a standalone composite type. (Eventually we should probably refuse
+ * Refuse any attempt to create a pseudo-type column or one that uses a
+ * standalone composite type. (Eventually we should probably refuse
* all references to complex types, but for now there's still some
* Berkeley-derived code that thinks it can do this...)
*/
@@ -439,7 +439,7 @@ CheckAttributeType(const char *attname, Oid atttypid)
}
else if (att_typtype == 'c')
{
- Oid typrelid = get_typ_typrelid(atttypid);
+ Oid typrelid = get_typ_typrelid(atttypid);
if (get_rel_relkind(typrelid) == RELKIND_COMPOSITE_TYPE)
ereport(ERROR,
@@ -975,12 +975,13 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
attStruct->attisdropped = true;
/*
- * Set the type OID to invalid. A dropped attribute's type link cannot
- * be relied on (once the attribute is dropped, the type might be too).
- * Fortunately we do not need the type row --- the only really essential
- * information is the type's typlen and typalign, which are preserved in
- * the attribute's attlen and attalign. We set atttypid to zero here
- * as a means of catching code that incorrectly expects it to be valid.
+ * Set the type OID to invalid. A dropped attribute's type link
+ * cannot be relied on (once the attribute is dropped, the type might
+ * be too). Fortunately we do not need the type row --- the only
+ * really essential information is the type's typlen and typalign,
+ * which are preserved in the attribute's attlen and attalign. We set
+ * atttypid to zero here as a means of catching code that incorrectly
+ * expects it to be valid.
*/
attStruct->atttypid = InvalidOid;
@@ -1401,7 +1402,7 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
' ',
' ',
' ',
- InvalidOid, /* no associated index */
+ InvalidOid, /* no associated index */
expr, /* Tree form check constraint */
ccbin, /* Binary form check constraint */
ccsrc); /* Source form check constraint */
@@ -1568,8 +1569,8 @@ AddRelationRawConstraints(Relation rel,
if (strcmp(cdef2->name, ccname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("CHECK constraint \"%s\" already exists",
- ccname)));
+ errmsg("CHECK constraint \"%s\" already exists",
+ ccname)));
}
}
else
@@ -1639,7 +1640,7 @@ AddRelationRawConstraints(Relation rel,
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use sub-select in CHECK constraint")));
+ errmsg("cannot use sub-select in CHECK constraint")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
@@ -1750,7 +1751,7 @@ cookDefault(ParseState *pstate,
if (contain_var_clause(expr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("cannot use column references in DEFAULT clause")));
+ errmsg("cannot use column references in DEFAULT clause")));
/*
* It can't return a set either.
@@ -1773,9 +1774,9 @@ cookDefault(ParseState *pstate,
errmsg("cannot use aggregate in DEFAULT clause")));
/*
- * Coerce the expression to the correct type and typmod, if given. This
- * should match the parser's processing of non-defaulted expressions ---
- * see updateTargetListEntry().
+ * Coerce the expression to the correct type and typmod, if given.
+ * This should match the parser's processing of non-defaulted
+ * expressions --- see updateTargetListEntry().
*/
if (OidIsValid(atttypid))
{
@@ -1793,7 +1794,7 @@ cookDefault(ParseState *pstate,
attname,
format_type_be(atttypid),
format_type_be(type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
return expr;
@@ -1952,7 +1953,7 @@ RelationTruncateIndexes(Oid heapId)
/*
* index_build will close both the heap and index relations (but
- * not give up the locks we hold on them). We're done with this
+ * not give up the locks we hold on them). We're done with this
* index, but we must re-open the heap rel.
*/
heapRelation = heap_open(heapId, NoLock);
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index cb5a78c3dac..0b03c630b55 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.212 2003/07/21 01:59:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.213 2003/08/04 00:43:16 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -65,8 +65,8 @@
/* non-export function prototypes */
static TupleDesc ConstructTupleDescriptor(Relation heapRelation,
- IndexInfo *indexInfo,
- Oid *classObjectId);
+ IndexInfo *indexInfo,
+ Oid *classObjectId);
static void UpdateRelationRelation(Relation indexRelation);
static void InitializeAttributeOids(Relation indexRelation,
int numatts, Oid indexoid);
@@ -124,7 +124,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/*
* For simple index columns, we copy the pg_attribute row from the
- * parent relation and modify it as necessary. For expressions we
+ * parent relation and modify it as necessary. For expressions we
* have to cons up a pg_attribute row the hard way.
*/
for (i = 0; i < numatts; i++)
@@ -149,7 +149,7 @@ ConstructTupleDescriptor(Relation heapRelation,
* here we are indexing on a system attribute (-1...-n)
*/
from = SystemAttributeDefinition(atnum,
- heapRelation->rd_rel->relhasoids);
+ heapRelation->rd_rel->relhasoids);
}
else
{
@@ -162,8 +162,8 @@ ConstructTupleDescriptor(Relation heapRelation,
}
/*
- * now that we've determined the "from", let's copy the tuple desc
- * data...
+ * now that we've determined the "from", let's copy the tuple
+ * desc data...
*/
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
@@ -185,7 +185,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/* Expressional index */
Node *indexkey;
- if (indexprs == NIL) /* shouldn't happen */
+ if (indexprs == NIL) /* shouldn't happen */
elog(ERROR, "too few entries in indexprs list");
indexkey = (Node *) lfirst(indexprs);
indexprs = lnext(indexprs);
@@ -197,7 +197,8 @@ ConstructTupleDescriptor(Relation heapRelation,
sprintf(NameStr(to->attname), "pg_expression_%d", i + 1);
/*
- * Lookup the expression type in pg_type for the type length etc.
+ * Lookup the expression type in pg_type for the type length
+ * etc.
*/
keyType = exprType(indexkey);
tuple = SearchSysCache(TYPEOID,
@@ -534,7 +535,7 @@ index_create(Oid heapRelationId,
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared indexes cannot be created after initdb")));
+ errmsg("shared indexes cannot be created after initdb")));
if (get_relname_relid(indexRelationName, namespaceId))
ereport(ERROR,
@@ -668,7 +669,7 @@ index_create(Oid heapRelationId,
' ',
' ',
' ',
- InvalidOid, /* no associated index */
+ InvalidOid, /* no associated index */
NULL, /* no check constraint */
NULL,
NULL);
@@ -709,7 +710,7 @@ index_create(Oid heapRelationId,
if (indexInfo->ii_Expressions)
{
recordDependencyOnSingleRelExpr(&myself,
- (Node *) indexInfo->ii_Expressions,
+ (Node *) indexInfo->ii_Expressions,
heapRelationId,
DEPENDENCY_NORMAL,
DEPENDENCY_AUTO);
@@ -719,7 +720,7 @@ index_create(Oid heapRelationId,
if (indexInfo->ii_Predicate)
{
recordDependencyOnSingleRelExpr(&myself,
- (Node *) indexInfo->ii_Predicate,
+ (Node *) indexInfo->ii_Predicate,
heapRelationId,
DEPENDENCY_NORMAL,
DEPENDENCY_AUTO);
@@ -831,8 +832,8 @@ index_drop(Oid indexId)
/*
* We are presently too lazy to attempt to compute the new correct
- * value of relhasindex (the next VACUUM will fix it if necessary).
- * So there is no need to update the pg_class tuple for the owning
+ * value of relhasindex (the next VACUUM will fix it if necessary). So
+ * there is no need to update the pg_class tuple for the owning
* relation. But we must send out a shared-cache-inval notice on the
* owning relation to ensure other backends update their relcache
* lists of indexes.
@@ -958,7 +959,7 @@ FormIndexDatum(IndexInfo *indexInfo,
if (indexprs == NIL)
elog(ERROR, "wrong number of index expressions");
iDatum = ExecEvalExprSwitchContext((ExprState *) lfirst(indexprs),
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
indexprs = lnext(indexprs);
@@ -1160,7 +1161,7 @@ setNewRelfilenode(Relation relation)
if (!in_place_upd)
{
tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(relation)),
+ ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
}
else
@@ -1170,7 +1171,7 @@ setNewRelfilenode(Relation relation)
ScanKeyEntryInitialize(&key[0], 0,
ObjectIdAttributeNumber,
F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(relation)));
+ ObjectIdGetDatum(RelationGetRelid(relation)));
pg_class_scan = heap_beginscan(pg_class, SnapshotNow, 1, key);
tuple = heap_getnext(pg_class_scan, ForwardScanDirection);
@@ -1325,9 +1326,9 @@ UpdateStats(Oid relid, double reltuples)
}
/*
- * Update statistics in pg_class, if they changed. (Avoiding an
- * unnecessary update is not just a tiny performance improvement;
- * it also reduces the window wherein concurrent CREATE INDEX commands
+ * Update statistics in pg_class, if they changed. (Avoiding an
+ * unnecessary update is not just a tiny performance improvement; it
+ * also reduces the window wherein concurrent CREATE INDEX commands
* may conflict.)
*/
rd_rel = (Form_pg_class) GETSTRUCT(tuple);
@@ -1338,8 +1339,9 @@ UpdateStats(Oid relid, double reltuples)
if (in_place_upd)
{
/*
- * At bootstrap time, we don't need to worry about concurrency or
- * visibility of changes, so we cheat. Also cheat if REINDEX.
+ * At bootstrap time, we don't need to worry about concurrency
+ * or visibility of changes, so we cheat. Also cheat if
+ * REINDEX.
*/
LockBuffer(pg_class_scan->rs_cbuf, BUFFER_LOCK_EXCLUSIVE);
rd_rel->relpages = (int32) relpages;
@@ -1367,7 +1369,7 @@ UpdateStats(Oid relid, double reltuples)
/*
* We shouldn't have to do this, but we do... Modify the reldesc in
* place with the new values so that the cache contains the latest
- * copy. (XXX is this really still necessary? The relcache will get
+ * copy. (XXX is this really still necessary? The relcache will get
* fixed at next CommandCounterIncrement, so why bother here?)
*/
whichRel->rd_rel->relpages = (int32) relpages;
@@ -1454,8 +1456,8 @@ IndexBuildHeapScan(Relation heapRelation,
heapDescriptor = RelationGetDescr(heapRelation);
/*
- * Need an EState for evaluation of index expressions
- * and partial-index predicates.
+ * Need an EState for evaluation of index expressions and
+ * partial-index predicates.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -1463,7 +1465,8 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* If this is a predicate (partial) index, we will need to evaluate
* the predicate using ExecQual, which requires the current tuple to
- * be in a slot of a TupleTable. Likewise if there are any expressions.
+ * be in a slot of a TupleTable. Likewise if there are any
+ * expressions.
*/
if (indexInfo->ii_Predicate != NIL || indexInfo->ii_Expressions != NIL)
{
@@ -1741,15 +1744,15 @@ reindex_index(Oid indexId, bool force, bool inplace)
* it's a nailed-in-cache index, we must do inplace processing because
* the relcache can't cope with changing its relfilenode.
*
- * In either of these cases, we are definitely processing a system
- * index, so we'd better be ignoring system indexes.
+ * In either of these cases, we are definitely processing a system index,
+ * so we'd better be ignoring system indexes.
*/
if (iRel->rd_rel->relisshared)
{
if (!IsIgnoringSystemIndexes())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("the target relation %u is shared", indexId)));
+ errmsg("the target relation %u is shared", indexId)));
inplace = true;
}
if (iRel->rd_isnailed)
@@ -1757,7 +1760,7 @@ reindex_index(Oid indexId, bool force, bool inplace)
if (!IsIgnoringSystemIndexes())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("the target relation %u is nailed", indexId)));
+ errmsg("the target relation %u is nailed", indexId)));
inplace = true;
}
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 28b9859677d..6a39fc69016 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.55 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.56 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -164,7 +164,7 @@ RangeVarGetRelid(const RangeVar *relation, bool failOK)
if (strcmp(relation->catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ errmsg("cross-database references are not implemented")));
}
if (relation->schemaname)
@@ -217,7 +217,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
if (strcmp(newRelation->catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ errmsg("cross-database references are not implemented")));
}
if (newRelation->istemp)
@@ -226,7 +226,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
if (newRelation->schemaname)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("TEMP tables may not specify a schema name")));
+ errmsg("TEMP tables may not specify a schema name")));
/* Initialize temp namespace if first time through */
if (!OidIsValid(myTempNamespace))
InitTempTableNamespace();
@@ -1057,7 +1057,7 @@ OpclassIsVisible(Oid opcid)
Oid
ConversionGetConid(const char *conname)
{
- Oid conid;
+ Oid conid;
List *lptr;
recomputeNamespacePath();
@@ -1115,11 +1115,11 @@ ConversionIsVisible(Oid conid)
/*
* If it is in the path, it might still not be visible; it could
* be hidden by another conversion of the same name earlier in the
- * path. So we must do a slow check to see if this conversion would
- * be found by ConversionGetConid.
+ * path. So we must do a slow check to see if this conversion
+ * would be found by ConversionGetConid.
*/
char *conname = NameStr(conform->conname);
-
+
visible = (ConversionGetConid(conname) == conid);
}
@@ -1164,13 +1164,13 @@ DeconstructQualifiedName(List *names,
if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ errmsg("cross-database references are not implemented")));
break;
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper qualified name (too many dotted names): %s",
- NameListToString(names))));
+ errmsg("improper qualified name (too many dotted names): %s",
+ NameListToString(names))));
break;
}
@@ -1281,8 +1281,8 @@ makeRangeVarFromNameList(List *names)
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper relation name (too many dotted names): %s",
- NameListToString(names))));
+ errmsg("improper relation name (too many dotted names): %s",
+ NameListToString(names))));
break;
}
@@ -1720,8 +1720,8 @@ RemoveTempRelations(Oid tempNamespaceId)
/*
* We want to get rid of everything in the target namespace, but not
- * the namespace itself (deleting it only to recreate it later would be
- * a waste of cycles). We do this by finding everything that has a
+ * the namespace itself (deleting it only to recreate it later would
+ * be a waste of cycles). We do this by finding everything that has a
* dependency on the namespace.
*/
object.classId = get_system_catalog_relid(NamespaceRelationName);
@@ -1797,7 +1797,7 @@ assign_search_path(const char *newval, bool doit, bool interactive)
0, 0, 0))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
- errmsg("schema \"%s\" does not exist", curname)));
+ errmsg("schema \"%s\" does not exist", curname)));
}
}
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 6837f3b9225..779468ce21f 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.61 2003/07/21 01:59:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.62 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,8 +29,8 @@
#include "utils/syscache.h"
-static Oid lookup_agg_function(List *fnName, int nargs, Oid *input_types,
- Oid *rettype);
+static Oid lookup_agg_function(List *fnName, int nargs, Oid *input_types,
+ Oid *rettype);
/*
@@ -79,7 +79,7 @@ AggregateCreate(const char *aggName,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot determine transition datatype"),
errdetail("An aggregate using ANYARRAY or ANYELEMENT as "
- "trans type must have one of them as its base type.")));
+ "trans type must have one of them as its base type.")));
/* handle transfn */
MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -99,8 +99,8 @@ AggregateCreate(const char *aggName,
* enforce_generic_type_consistency, if transtype isn't polymorphic)
* must exactly match declared transtype.
*
- * In the non-polymorphic-transtype case, it might be okay to allow
- * a rettype that's binary-coercible to transtype, but I'm not quite
+ * In the non-polymorphic-transtype case, it might be okay to allow a
+ * rettype that's binary-coercible to transtype, but I'm not quite
* convinced that it's either safe or useful. When transtype is
* polymorphic we *must* demand exact equality.
*/
@@ -151,9 +151,9 @@ AggregateCreate(const char *aggName,
Assert(OidIsValid(finaltype));
/*
- * If finaltype (i.e. aggregate return type) is polymorphic,
- * basetype must be polymorphic also, else parser will fail to deduce
- * result type. (Note: given the previous test on transtype and basetype,
+ * If finaltype (i.e. aggregate return type) is polymorphic, basetype
+ * must be polymorphic also, else parser will fail to deduce result
+ * type. (Note: given the previous test on transtype and basetype,
* this cannot happen, unless someone has snuck a finalfn definition
* into the catalogs that itself violates the rule against polymorphic
* result with no polymorphic input.)
@@ -163,8 +163,8 @@ AggregateCreate(const char *aggName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot determine result datatype"),
- errdetail("An aggregate returning ANYARRAY or ANYELEMENT "
- "must have one of them as its base type.")));
+ errdetail("An aggregate returning ANYARRAY or ANYELEMENT "
+ "must have one of them as its base type.")));
/*
* Everything looks okay. Try to create the pg_proc entry for the
@@ -278,21 +278,21 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function %s does not exist",
- func_signature_string(fnName, nargs, input_types))));
+ func_signature_string(fnName, nargs, input_types))));
if (retset)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s returns a set",
- func_signature_string(fnName, nargs, input_types))));
+ func_signature_string(fnName, nargs, input_types))));
/*
- * If the given type(s) are all polymorphic, there's nothing we
- * can check. Otherwise, enforce consistency, and possibly refine
- * the result type.
+ * If the given type(s) are all polymorphic, there's nothing we can
+ * check. Otherwise, enforce consistency, and possibly refine the
+ * result type.
*/
if ((input_types[0] == ANYARRAYOID || input_types[0] == ANYELEMENTOID) &&
(nargs == 1 ||
- (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
+ (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
{
/* nothing to check here */
}
@@ -305,8 +305,8 @@ lookup_agg_function(List *fnName,
}
/*
- * func_get_detail will find functions requiring run-time argument type
- * coercion, but nodeAgg.c isn't prepared to deal with that
+ * func_get_detail will find functions requiring run-time argument
+ * type coercion, but nodeAgg.c isn't prepared to deal with that
*/
if (true_oid_array[0] != ANYARRAYOID &&
true_oid_array[0] != ANYELEMENTOID &&
@@ -314,7 +314,7 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
if (nargs == 2 &&
true_oid_array[1] != ANYARRAYOID &&
@@ -323,7 +323,7 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
return fnOid;
}
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 89adeb57b99..ae8b7dec038 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.14 2003/07/21 01:59:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.15 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -195,7 +195,7 @@ CreateConstraintEntry(const char *constraintName,
/*
* Register auto dependency from constraint to owning domain
*/
- ObjectAddress domobject;
+ ObjectAddress domobject;
domobject.classId = RelOid_pg_type;
domobject.objectId = domainId;
@@ -234,8 +234,8 @@ CreateConstraintEntry(const char *constraintName,
if (OidIsValid(indexRelId))
{
/*
- * Register normal dependency on the unique index that supports
- * a foreign-key constraint.
+ * Register normal dependency on the unique index that supports a
+ * foreign-key constraint.
*/
ObjectAddress relobject;
@@ -438,8 +438,8 @@ RemoveConstraintById(Oid conId)
Relation rel;
/*
- * If the constraint is for a relation, open and exclusive-lock the
- * relation it's for.
+ * If the constraint is for a relation, open and exclusive-lock
+ * the relation it's for.
*/
rel = heap_open(con->conrelid, AccessExclusiveLock);
@@ -463,7 +463,7 @@ RemoveConstraintById(Oid conId)
con->conrelid);
classForm = (Form_pg_class) GETSTRUCT(relTup);
- if (classForm->relchecks == 0) /* should not happen */
+ if (classForm->relchecks == 0) /* should not happen */
elog(ERROR, "relation \"%s\" has relchecks = 0",
RelationGetRelationName(rel));
classForm->relchecks--;
@@ -483,16 +483,15 @@ RemoveConstraintById(Oid conId)
else if (OidIsValid(con->contypid))
{
/*
- * XXX for now, do nothing special when dropping a domain constraint
+ * XXX for now, do nothing special when dropping a domain
+ * constraint
*
* Probably there should be some form of locking on the domain type,
* but we have no such concept at the moment.
*/
}
else
- {
elog(ERROR, "constraint %u is not of a known type", conId);
- }
/* Fry the constraint itself */
simple_heap_delete(conDesc, &tup->t_self);
diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c
index 70bd294297d..5c10fa7b28c 100644
--- a/src/backend/catalog/pg_conversion.c
+++ b/src/backend/catalog/pg_conversion.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.13 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.14 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,9 +76,9 @@ ConversionCreate(const char *conname, Oid connamespace,
contoencoding))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("default conversion for %s to %s already exists",
- pg_encoding_to_char(conforencoding),
- pg_encoding_to_char(contoencoding))));
+ errmsg("default conversion for %s to %s already exists",
+ pg_encoding_to_char(conforencoding),
+ pg_encoding_to_char(contoencoding))));
}
/* open pg_conversion */
@@ -147,7 +147,7 @@ ConversionDrop(Oid conversionOid, DropBehavior behavior)
if (!superuser() &&
((Form_pg_conversion) GETSTRUCT(tuple))->conowner != GetUserId())
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
- NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
+ NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
ReleaseSysCache(tuple);
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index da3e2a46920..141d3a142a7 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.81 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.82 2003/08/04 00:43:16 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -409,7 +409,7 @@ OperatorCreate(const char *operatorName,
if (!OidIsValid(leftTypeId) && !OidIsValid(rightTypeId))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("at least one of leftarg or rightarg must be specified")));
+ errmsg("at least one of leftarg or rightarg must be specified")));
if (!(OidIsValid(leftTypeId) && OidIsValid(rightTypeId)))
{
@@ -417,11 +417,11 @@ OperatorCreate(const char *operatorName,
if (commutatorName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have commutators")));
+ errmsg("only binary operators can have commutators")));
if (joinName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have join selectivity")));
+ errmsg("only binary operators can have join selectivity")));
if (canHash)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index d8ff4a5225a..2c11a17db5e 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.102 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.103 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -94,7 +94,7 @@ ProcedureCreate(const char *procedureName,
*/
if (returnType == ANYARRAYOID || returnType == ANYELEMENTOID)
{
- bool genericParam = false;
+ bool genericParam = false;
for (i = 0; i < parameterCount; i++)
{
@@ -231,7 +231,7 @@ ProcedureCreate(const char *procedureName,
returnsSet != oldproc->proretset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("cannot change return type of existing function"),
+ errmsg("cannot change return type of existing function"),
errhint("Use DROP FUNCTION first.")));
/* Can't change aggregate status, either */
@@ -339,8 +339,8 @@ ProcedureCreate(const char *procedureName,
*
* This is normally applied during function definition, but in the case
* of a function with polymorphic arguments, we instead apply it during
- * function execution startup. The rettype is then the actual resolved
- * output type of the function, rather than the declared type. (Therefore,
+ * function execution startup. The rettype is then the actual resolved
+ * output type of the function, rather than the declared type. (Therefore,
* we should never see ANYARRAY or ANYELEMENT as rettype.)
*/
void
@@ -366,7 +366,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errdetail("Function's final statement must be a SELECT.")));
return;
}
@@ -395,9 +395,9 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
if (cmd != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Function's final statement must be a SELECT.")));
/*
* Count the non-junk entries in the result targetlist.
@@ -421,7 +421,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Final SELECT must return exactly one column.")));
+ errdetail("Final SELECT must return exactly one column.")));
restype = ((TargetEntry *) lfirst(tlist))->resdom->restype;
if (!IsBinaryCoercible(restype, rettype))
@@ -481,7 +481,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Final SELECT returns too many columns.")));
+ errdetail("Final SELECT returns too many columns.")));
attr = reln->rd_att->attrs[colindex - 1];
} while (attr->attisdropped);
rellogcols++;
@@ -538,8 +538,8 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type %s is not supported for SQL functions",
- format_type_be(rettype))));
+ errmsg("return type %s is not supported for SQL functions",
+ format_type_be(rettype))));
}
@@ -684,8 +684,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL functions cannot have arguments of type %s",
- format_type_be(proc->proargtypes[i]))));
+ errmsg("SQL functions cannot have arguments of type %s",
+ format_type_be(proc->proargtypes[i]))));
}
}
@@ -696,13 +696,13 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
prosrc = DatumGetCString(DirectFunctionCall1(textout, tmp));
/*
- * We can't do full prechecking of the function definition if there are
- * any polymorphic input types, because actual datatypes of expression
- * results will be unresolvable. The check will be done at runtime
- * instead.
+ * We can't do full prechecking of the function definition if there
+ * are any polymorphic input types, because actual datatypes of
+ * expression results will be unresolvable. The check will be done at
+ * runtime instead.
*
- * We can run the text through the raw parser though; this will at
- * least catch silly syntactic errors.
+ * We can run the text through the raw parser though; this will at least
+ * catch silly syntactic errors.
*/
if (!haspolyarg)
{
@@ -712,9 +712,7 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
check_sql_fn_retval(proc->prorettype, functyptype, querytree_list);
}
else
- {
querytree_list = pg_parse_query(prosrc);
- }
ReleaseSysCache(tuple);
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 7980e6afadf..d578644e681 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.88 2003/07/21 01:59:11 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.89 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -359,7 +359,8 @@ TypeCreate(const char *typeName,
void
GenerateTypeDependencies(Oid typeNamespace,
Oid typeObjectId,
- Oid relationOid, /* only for 'c'atalog types */
+ Oid relationOid, /* only for 'c'atalog
+ * types */
char relationKind, /* ditto */
Oid inputProcedure,
Oid outputProcedure,
@@ -426,13 +427,13 @@ GenerateTypeDependencies(Oid typeNamespace,
/*
* If the type is a rowtype for a relation, mark it as internally
- * dependent on the relation, *unless* it is a stand-alone
- * composite type relation. For the latter case, we have to
- * reverse the dependency.
+ * dependent on the relation, *unless* it is a stand-alone composite
+ * type relation. For the latter case, we have to reverse the
+ * dependency.
*
- * In the former case, this allows the type to be auto-dropped when
- * the relation is, and not otherwise. And in the latter, of
- * course we get the opposite effect.
+ * In the former case, this allows the type to be auto-dropped when the
+ * relation is, and not otherwise. And in the latter, of course we get
+ * the opposite effect.
*/
if (OidIsValid(relationOid))
{
@@ -447,11 +448,11 @@ GenerateTypeDependencies(Oid typeNamespace,
}
/*
- * If the type is an array type, mark it auto-dependent on the
- * base type. (This is a compromise between the typical case
- * where the array type is automatically generated and the case
- * where it is manually created: we'd prefer INTERNAL for the
- * former case and NORMAL for the latter.)
+ * If the type is an array type, mark it auto-dependent on the base
+ * type. (This is a compromise between the typical case where the
+ * array type is automatically generated and the case where it is
+ * manually created: we'd prefer INTERNAL for the former case and
+ * NORMAL for the latter.)
*/
if (OidIsValid(elementType))
{
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 1d9b25b5b0a..5a57d5c5c77 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.12 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.13 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -256,16 +256,16 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname)
if (basetypeOid == ANYOID)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
- errmsg("function %s(*) already exists in schema \"%s\"",
- newname,
- get_namespace_name(namespaceOid))));
+ errmsg("function %s(*) already exists in schema \"%s\"",
+ newname,
+ get_namespace_name(namespaceOid))));
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes),
+ procForm->proargtypes),
get_namespace_name(namespaceOid))));
}
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index b377635099e..4fd43871e97 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.4 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.5 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,52 +79,52 @@ ExecRenameStmt(RenameStmt *stmt)
case OBJECT_TABLE:
case OBJECT_COLUMN:
case OBJECT_TRIGGER:
- {
- Oid relid;
+ {
+ Oid relid;
- CheckRelationOwnership(stmt->relation, true);
+ CheckRelationOwnership(stmt->relation, true);
- relid = RangeVarGetRelid(stmt->relation, false);
+ relid = RangeVarGetRelid(stmt->relation, false);
- switch (stmt->renameType)
- {
- case OBJECT_TABLE:
+ switch (stmt->renameType)
{
- /*
- * RENAME TABLE requires that we (still) hold
- * CREATE rights on the containing namespace, as
- * well as ownership of the table.
- */
- Oid namespaceId = get_rel_namespace(relid);
- AclResult aclresult;
-
- aclresult = pg_namespace_aclcheck(namespaceId,
- GetUserId(),
- ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
- get_namespace_name(namespaceId));
-
- renamerel(relid, stmt->newname);
- break;
- }
- case OBJECT_COLUMN:
- renameatt(relid,
- stmt->subname, /* old att name */
- stmt->newname, /* new att name */
+ case OBJECT_TABLE:
+ {
+ /*
+ * RENAME TABLE requires that we (still) hold
+ * CREATE rights on the containing namespace,
+ * as well as ownership of the table.
+ */
+ Oid namespaceId = get_rel_namespace(relid);
+ AclResult aclresult;
+
+ aclresult = pg_namespace_aclcheck(namespaceId,
+ GetUserId(),
+ ACL_CREATE);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
+ get_namespace_name(namespaceId));
+
+ renamerel(relid, stmt->newname);
+ break;
+ }
+ case OBJECT_COLUMN:
+ renameatt(relid,
+ stmt->subname, /* old att name */
+ stmt->newname, /* new att name */
interpretInhOption(stmt->relation->inhOpt), /* recursive? */
- false); /* recursing already? */
- break;
- case OBJECT_TRIGGER:
- renametrig(relid,
- stmt->subname, /* old att name */
- stmt->newname); /* new att name */
- break;
- default:
- /*can't happen*/;
+ false); /* recursing already? */
+ break;
+ case OBJECT_TRIGGER:
+ renametrig(relid,
+ stmt->subname, /* old att name */
+ stmt->newname); /* new att name */
+ break;
+ default:
+ /* can't happen */ ;
+ }
+ break;
}
- break;
- }
default:
elog(ERROR, "unrecognized rename stmt type: %d",
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 5277884f1f8..dac2d5d7bbd 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.56 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.57 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -220,9 +220,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/*
* Silently ignore tables that are temp tables of other backends ---
- * trying to analyze these is rather pointless, since their
- * contents are probably not up-to-date on disk. (We don't throw a
- * warning here; it would just lead to chatter during a database-wide
+ * trying to analyze these is rather pointless, since their contents
+ * are probably not up-to-date on disk. (We don't throw a warning
+ * here; it would just lead to chatter during a database-wide
* ANALYZE.)
*/
if (isOtherTempNamespace(RelationGetNamespace(onerel)))
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index dafea7c8695..69085740cc5 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.96 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.97 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -603,10 +603,10 @@ Async_NotifyHandler(SIGNAL_ARGS)
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
/*
- * We may be called while ImmediateInterruptOK is true; turn it off
- * while messing with the NOTIFY state. (We would have to save
- * and restore it anyway, because PGSemaphore operations inside
- * ProcessIncomingNotify() might reset it.)
+ * We may be called while ImmediateInterruptOK is true; turn it
+ * off while messing with the NOTIFY state. (We would have to
+ * save and restore it anyway, because PGSemaphore operations
+ * inside ProcessIncomingNotify() might reset it.)
*/
ImmediateInterruptOK = false;
@@ -639,7 +639,8 @@ Async_NotifyHandler(SIGNAL_ARGS)
}
/*
- * Restore ImmediateInterruptOK, and check for interrupts if needed.
+ * Restore ImmediateInterruptOK, and check for interrupts if
+ * needed.
*/
ImmediateInterruptOK = save_ImmediateInterruptOK;
if (save_ImmediateInterruptOK)
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 18f6bfcf6b5..23e03443fc5 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.112 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.113 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,12 +58,12 @@ typedef struct
*/
typedef struct
{
- Oid tableOid;
- Oid indexOid;
-} RelToCluster;
+ Oid tableOid;
+ Oid indexOid;
+} RelToCluster;
-static void cluster_rel(RelToCluster *rv, bool recheck);
+static void cluster_rel(RelToCluster * rv, bool recheck);
static Oid make_new_heap(Oid OIDOldHeap, const char *NewName);
static void copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
static List *get_indexattr_list(Relation OldHeap, Oid OldIndex);
@@ -74,7 +74,7 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
/*---------------------------------------------------------------------------
- * This cluster code allows for clustering multiple tables at once. Because
+ * This cluster code allows for clustering multiple tables at once. Because
* of this, we cannot just run everything on a single transaction, or we
* would be forced to acquire exclusive locks on all the tables being
* clustered, simultaneously --- very likely leading to deadlock.
@@ -82,17 +82,17 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
* To solve this we follow a similar strategy to VACUUM code,
* clustering each relation in a separate transaction. For this to work,
* we need to:
- * - provide a separate memory context so that we can pass information in
- * a way that survives across transactions
- * - start a new transaction every time a new relation is clustered
- * - check for validity of the information on to-be-clustered relations,
- * as someone might have deleted a relation behind our back, or
- * clustered one on a different index
- * - end the transaction
+ * - provide a separate memory context so that we can pass information in
+ * a way that survives across transactions
+ * - start a new transaction every time a new relation is clustered
+ * - check for validity of the information on to-be-clustered relations,
+ * as someone might have deleted a relation behind our back, or
+ * clustered one on a different index
+ * - end the transaction
*
* The single-relation case does not have any such overhead.
*
- * We also allow a relation being specified without index. In that case,
+ * We also allow a relation being specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@@ -103,10 +103,10 @@ cluster(ClusterStmt *stmt)
if (stmt->relation != NULL)
{
/* This is the single-relation case. */
- Oid tableOid,
- indexOid = InvalidOid;
- Relation rel;
- RelToCluster rvtc;
+ Oid tableOid,
+ indexOid = InvalidOid;
+ Relation rel;
+ RelToCluster rvtc;
/* Find and lock the table */
rel = heap_openrv(stmt->relation, AccessExclusiveLock);
@@ -123,10 +123,10 @@ cluster(ClusterStmt *stmt)
List *index;
/* We need to find the index that has indisclustered set. */
- foreach (index, RelationGetIndexList(rel))
+ foreach(index, RelationGetIndexList(rel))
{
- HeapTuple idxtuple;
- Form_pg_index indexForm;
+ HeapTuple idxtuple;
+ Form_pg_index indexForm;
indexOid = lfirsto(index);
idxtuple = SearchSysCache(INDEXRELID,
@@ -152,14 +152,17 @@ cluster(ClusterStmt *stmt)
}
else
{
- /* The index is expected to be in the same namespace as the relation. */
+ /*
+ * The index is expected to be in the same namespace as the
+ * relation.
+ */
indexOid = get_relname_relid(stmt->indexname,
rel->rd_rel->relnamespace);
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("index \"%s\" for table \"%s\" does not exist",
- stmt->indexname, stmt->relation->relname)));
+ errmsg("index \"%s\" for table \"%s\" does not exist",
+ stmt->indexname, stmt->relation->relname)));
}
/* All other checks are done in cluster_rel() */
@@ -175,16 +178,16 @@ cluster(ClusterStmt *stmt)
else
{
/*
- * This is the "multi relation" case. We need to cluster all tables
- * that have some index with indisclustered set.
+ * This is the "multi relation" case. We need to cluster all
+ * tables that have some index with indisclustered set.
*/
- MemoryContext cluster_context;
- List *rv,
- *rvs;
+ MemoryContext cluster_context;
+ List *rv,
+ *rvs;
/*
- * We cannot run this form of CLUSTER inside a user transaction block;
- * we'd be holding locks way too long.
+ * We cannot run this form of CLUSTER inside a user transaction
+ * block; we'd be holding locks way too long.
*/
PreventTransactionChain((void *) stmt, "CLUSTER");
@@ -201,8 +204,8 @@ cluster(ClusterStmt *stmt)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives in
- * cluster_context.
+ * Build the list of relations to cluster. Note that this lives
+ * in cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
@@ -210,13 +213,14 @@ cluster(ClusterStmt *stmt)
CommitTransactionCommand();
/* Ok, now that we've got them all, cluster them one by one */
- foreach (rv, rvs)
+ foreach(rv, rvs)
{
- RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
+ RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
/* Start a new transaction for each relation. */
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
cluster_rel(rvtc, true);
CommitTransactionCommand();
}
@@ -244,7 +248,7 @@ cluster(ClusterStmt *stmt)
* them incrementally while we load the table.
*/
static void
-cluster_rel(RelToCluster *rvtc, bool recheck)
+cluster_rel(RelToCluster * rvtc, bool recheck)
{
Relation OldHeap,
OldIndex;
@@ -256,14 +260,14 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* Since we may open a new transaction for each relation, we have to
* check that the relation still is what we think it is.
*
- * If this is a single-transaction CLUSTER, we can skip these tests.
- * We *must* skip the one on indisclustered since it would reject an
+ * If this is a single-transaction CLUSTER, we can skip these tests. We
+ * *must* skip the one on indisclustered since it would reject an
* attempt to cluster a not-previously-clustered index.
*/
if (recheck)
{
- HeapTuple tuple;
- Form_pg_index indexForm;
+ HeapTuple tuple;
+ Form_pg_index indexForm;
/*
* Check if the relation and index still exist before opening them
@@ -319,10 +323,10 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
RelationGetRelationName(OldHeap))));
/*
- * Disallow clustering on incomplete indexes (those that might not index
- * every row of the relation). We could relax this by making a separate
- * seqscan pass over the table to copy the missing rows, but that seems
- * expensive and tedious.
+ * Disallow clustering on incomplete indexes (those that might not
+ * index every row of the relation). We could relax this by making a
+ * separate seqscan pass over the table to copy the missing rows, but
+ * that seems expensive and tedious.
*/
if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred))
ereport(ERROR,
@@ -334,7 +338,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
/*
* If the AM doesn't index nulls, then it's a partial index unless
- * we can prove all the rows are non-null. Note we only need look
+ * we can prove all the rows are non-null. Note we only need look
* at the first column; multicolumn-capable AMs are *required* to
* index nulls in columns after the first.
*/
@@ -347,7 +351,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster when index access method does not handle nulls"),
errhint("You may be able to work around this by marking column \"%s\" NOT NULL.",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
}
else if (colno < 0)
{
@@ -382,7 +386,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temp tables of other processes")));
+ errmsg("cannot cluster temp tables of other processes")));
/* Drop relcache refcnt on OldIndex, but keep lock */
index_close(OldIndex);
@@ -397,7 +401,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* rebuild_relation: rebuild an existing relation
*
* This is shared code between CLUSTER and TRUNCATE. In the TRUNCATE
- * case, the new relation is built and left empty. In the CLUSTER case,
+ * case, the new relation is built and left empty. In the CLUSTER case,
* it is filled with data read from the old relation in the order specified
* by the index.
*
@@ -432,6 +436,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tableOid);
OIDNewHeap = make_new_heap(tableOid, NewHeapName);
+
/*
* We don't need CommandCounterIncrement() because make_new_heap did
* it.
@@ -754,8 +759,8 @@ swap_relfilenodes(Oid r1, Oid r2)
/* swap size statistics too, since new rel has freshly-updated stats */
{
- int4 swap_pages;
- float4 swap_tuples;
+ int4 swap_pages;
+ float4 swap_tuples;
swap_pages = relform1->relpages;
relform1->relpages = relform2->relpages;
@@ -857,20 +862,20 @@ swap_relfilenodes(Oid r1, Oid r2)
static List *
get_tables_to_cluster(MemoryContext cluster_context)
{
- Relation indRelation;
- HeapScanDesc scan;
- ScanKeyData entry;
- HeapTuple indexTuple;
- Form_pg_index index;
- MemoryContext old_context;
- RelToCluster *rvtc;
- List *rvs = NIL;
+ Relation indRelation;
+ HeapScanDesc scan;
+ ScanKeyData entry;
+ HeapTuple indexTuple;
+ Form_pg_index index;
+ MemoryContext old_context;
+ RelToCluster *rvtc;
+ List *rvs = NIL;
/*
* Get all indexes that have indisclustered set and are owned by
- * appropriate user. System relations or nailed-in relations cannot ever
- * have indisclustered set, because CLUSTER will refuse to set it when
- * called with one of them as argument.
+ * appropriate user. System relations or nailed-in relations cannot
+ * ever have indisclustered set, because CLUSTER will refuse to set it
+ * when called with one of them as argument.
*/
indRelation = relation_openr(IndexRelationName, AccessShareLock);
ScanKeyEntryInitialize(&entry, 0,
@@ -886,8 +891,8 @@ get_tables_to_cluster(MemoryContext cluster_context)
continue;
/*
- * We have to build the list in a different memory context so
- * it will survive the cross-transaction processing
+ * We have to build the list in a different memory context so it
+ * will survive the cross-transaction processing
*/
old_context = MemoryContextSwitchTo(cluster_context);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index ecd50bdb367..e0ebba0df96 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2001, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.67 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.68 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -383,8 +383,8 @@ CommentAttribute(List *qualname, char *comment)
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- attrname, RelationGetRelationName(relation))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ attrname, RelationGetRelationName(relation))));
/* Create the comment using the relation's oid */
@@ -418,16 +418,17 @@ CommentDatabase(List *qualname, char *comment)
database = strVal(lfirst(qualname));
/*
- * We cannot currently support cross-database comments (since other DBs
- * cannot see pg_description of this database). So, we reject attempts
- * to comment on a database other than the current one. Someday this
- * might be improved, but it would take a redesigned infrastructure.
+ * We cannot currently support cross-database comments (since other
+ * DBs cannot see pg_description of this database). So, we reject
+ * attempts to comment on a database other than the current one.
+ * Someday this might be improved, but it would take a redesigned
+ * infrastructure.
*
* When loading a dump, we may see a COMMENT ON DATABASE for the old name
- * of the database. Erroring out would prevent pg_restore from completing
- * (which is really pg_restore's fault, but for now we will work around
- * the problem here). Consensus is that the best fix is to treat wrong
- * database name as a WARNING not an ERROR.
+ * of the database. Erroring out would prevent pg_restore from
+ * completing (which is really pg_restore's fault, but for now we will
+ * work around the problem here). Consensus is that the best fix is
+ * to treat wrong database name as a WARNING not an ERROR.
*/
/* First get the database OID */
@@ -569,7 +570,7 @@ CommentRule(List *qualname, char *comment)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("there are multiple rules \"%s\"", rulename),
- errhint("Specify a relation name as well as a rule name.")));
+ errhint("Specify a relation name as well as a rule name.")));
heap_endscan(scanDesc);
heap_close(RewriteRelation, AccessShareLock);
@@ -811,8 +812,8 @@ CommentTrigger(List *qualname, char *comment)
if (!HeapTupleIsValid(triggertuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- trigname, RelationGetRelationName(relation))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ trigname, RelationGetRelationName(relation))));
oid = HeapTupleGetOid(triggertuple);
@@ -891,7 +892,7 @@ CommentConstraint(List *qualname, char *comment)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("relation \"%s\" has multiple constraints named \"%s\"",
- RelationGetRelationName(relation), conName)));
+ RelationGetRelationName(relation), conName)));
conOid = HeapTupleGetOid(tuple);
}
}
@@ -902,8 +903,8 @@ CommentConstraint(List *qualname, char *comment)
if (!OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" for relation \"%s\" does not exist",
- conName, RelationGetRelationName(relation))));
+ errmsg("constraint \"%s\" for relation \"%s\" does not exist",
+ conName, RelationGetRelationName(relation))));
/* Create the comment with the pg_constraint oid */
CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment);
diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c
index b917c527aca..e9afb956246 100644
--- a/src/backend/commands/conversioncmds.c
+++ b/src/backend/commands/conversioncmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.9 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.10 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,11 +148,11 @@ RenameConversion(List *name, const char *newname)
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("conversion \"%s\" already exists in schema \"%s\"",
- newname, get_namespace_name(namespaceOid))));
+ errmsg("conversion \"%s\" already exists in schema \"%s\"",
+ newname, get_namespace_name(namespaceOid))));
/* must be owner */
- if (!superuser() &&
+ if (!superuser() &&
((Form_pg_conversion) GETSTRUCT(tup))->conowner != GetUserId())
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
NameListToString(name));
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index fa91439a579..5c7238de8dc 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.205 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.206 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,7 +61,7 @@ typedef enum CopyDest
COPY_FILE, /* to/from file */
COPY_OLD_FE, /* to/from frontend (old protocol) */
COPY_NEW_FE /* to/from frontend (new protocol) */
-} CopyDest;
+} CopyDest;
/*
* Represents the type of data returned by CopyReadAttribute()
@@ -82,17 +82,17 @@ typedef enum EolType
EOL_NL,
EOL_CR,
EOL_CRNL
-} EolType;
+} EolType;
/* non-export function prototypes */
static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print);
+ char *delim, char *null_print);
static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print);
+ char *delim, char *null_print);
static char *CopyReadAttribute(const char *delim, CopyReadResult *result);
static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo,
- Oid typelem, bool *isnull);
+ Oid typelem, bool *isnull);
static void CopyAttributeOut(char *string, char *delim);
static List *CopyGetAttnums(Relation rel, List *attnamelist);
@@ -136,6 +136,7 @@ static void CopySendChar(char c);
static void CopySendEndOfRow(bool binary);
static void CopyGetData(void *databuf, int datasize);
static int CopyGetChar(void);
+
#define CopyGetEof() (fe_eof)
static int CopyPeekChar(void);
static void CopyDonePeek(int c, bool pickup);
@@ -155,14 +156,14 @@ SendCopyBegin(bool binary, int natts)
{
/* new way */
StringInfoData buf;
- int16 format = (binary ? 1 : 0);
- int i;
+ int16 format = (binary ? 1 : 0);
+ int i;
pq_beginmessage(&buf, 'H');
- pq_sendbyte(&buf, format); /* overall format */
+ pq_sendbyte(&buf, format); /* overall format */
pq_sendint(&buf, natts, 2);
for (i = 0; i < natts; i++)
- pq_sendint(&buf, format, 2); /* per-column formats */
+ pq_sendint(&buf, format, 2); /* per-column formats */
pq_endmessage(&buf);
copy_dest = COPY_NEW_FE;
copy_msgbuf = makeStringInfo();
@@ -200,14 +201,14 @@ ReceiveCopyBegin(bool binary, int natts)
{
/* new way */
StringInfoData buf;
- int16 format = (binary ? 1 : 0);
- int i;
+ int16 format = (binary ? 1 : 0);
+ int i;
pq_beginmessage(&buf, 'G');
- pq_sendbyte(&buf, format); /* overall format */
+ pq_sendbyte(&buf, format); /* overall format */
pq_sendint(&buf, natts, 2);
for (i = 0; i < natts; i++)
- pq_sendint(&buf, format, 2); /* per-column formats */
+ pq_sendint(&buf, format, 2); /* per-column formats */
pq_endmessage(&buf);
copy_dest = COPY_NEW_FE;
copy_msgbuf = makeStringInfo();
@@ -289,7 +290,7 @@ CopySendData(void *databuf, int datasize)
/* no hope of recovering connection sync, so FATAL */
ereport(FATAL,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("connection lost during COPY to stdout")));
+ errmsg("connection lost during COPY to stdout")));
}
break;
case COPY_NEW_FE:
@@ -378,7 +379,7 @@ CopyGetData(void *databuf, int datasize)
case COPY_NEW_FE:
while (datasize > 0 && !fe_eof)
{
- int avail;
+ int avail;
while (copy_msgbuf->cursor >= copy_msgbuf->len)
{
@@ -389,24 +390,24 @@ CopyGetData(void *databuf, int datasize)
if (mtype == EOF)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
if (pq_getmessage(copy_msgbuf, 0))
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
switch (mtype)
{
- case 'd': /* CopyData */
+ case 'd': /* CopyData */
break;
- case 'c': /* CopyDone */
+ case 'c': /* CopyDone */
/* COPY IN correctly terminated by frontend */
fe_eof = true;
return;
- case 'f': /* CopyFail */
+ case 'f': /* CopyFail */
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("COPY from stdin failed: %s",
- pq_getmsgstring(copy_msgbuf))));
+ pq_getmsgstring(copy_msgbuf))));
break;
default:
ereport(ERROR,
@@ -421,7 +422,7 @@ CopyGetData(void *databuf, int datasize)
avail = datasize;
pq_copymsgbytes(copy_msgbuf, databuf, avail);
databuf = (void *) ((char *) databuf + avail);
- datasize =- avail;
+ datasize = -avail;
}
break;
}
@@ -430,7 +431,7 @@ CopyGetData(void *databuf, int datasize)
static int
CopyGetChar(void)
{
- int ch;
+ int ch;
switch (copy_dest)
{
@@ -448,16 +449,16 @@ CopyGetChar(void)
}
break;
case COPY_NEW_FE:
- {
- unsigned char cc;
+ {
+ unsigned char cc;
- CopyGetData(&cc, 1);
- if (fe_eof)
- ch = EOF;
- else
- ch = cc;
- break;
- }
+ CopyGetData(&cc, 1);
+ if (fe_eof)
+ ch = EOF;
+ else
+ ch = cc;
+ break;
+ }
default:
ch = EOF;
break;
@@ -479,7 +480,7 @@ CopyGetChar(void)
static int
CopyPeekChar(void)
{
- int ch;
+ int ch;
switch (copy_dest)
{
@@ -497,16 +498,16 @@ CopyPeekChar(void)
}
break;
case COPY_NEW_FE:
- {
- unsigned char cc;
+ {
+ unsigned char cc;
- CopyGetData(&cc, 1);
- if (fe_eof)
- ch = EOF;
- else
- ch = cc;
- break;
- }
+ CopyGetData(&cc, 1);
+ if (fe_eof)
+ ch = EOF;
+ else
+ ch = cc;
+ break;
+ }
default:
ch = EOF;
break;
@@ -524,7 +525,7 @@ CopyDonePeek(int c, bool pickup)
switch (copy_dest)
{
case COPY_FILE:
- if (!pickup)
+ if (!pickup)
{
/* We don't want to pick it up - so put it back in there */
ungetc(c, copy_file);
@@ -537,7 +538,11 @@ CopyDonePeek(int c, bool pickup)
/* We want to pick it up */
(void) pq_getbyte();
}
- /* If we didn't want to pick it up, just leave it where it sits */
+
+ /*
+ * If we didn't want to pick it up, just leave it where it
+ * sits
+ */
break;
case COPY_NEW_FE:
if (!pickup)
@@ -737,7 +742,7 @@ DoCopy(const CopyStmt *stmt)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a file"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
/*
* Presently, only single-character delimiter strings are supported.
@@ -791,8 +796,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy to non-table relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("cannot copy to non-table relation \"%s\"",
+ RelationGetRelationName(rel))));
}
if (pipe)
{
@@ -810,8 +815,8 @@ DoCopy(const CopyStmt *stmt)
if (copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for reading: %m",
- filename)));
+ errmsg("could not open file \"%s\" for reading: %m",
+ filename)));
fstat(fileno(copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -841,8 +846,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy from non-table relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("cannot copy from non-table relation \"%s\"",
+ RelationGetRelationName(rel))));
}
if (pipe)
{
@@ -863,7 +868,7 @@ DoCopy(const CopyStmt *stmt)
if (!is_absolute_path(filename))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("relative path not allowed for COPY to file")));
+ errmsg("relative path not allowed for COPY to file")));
oumask = umask((mode_t) 022);
copy_file = AllocateFile(filename, PG_BINARY_W);
@@ -872,8 +877,8 @@ DoCopy(const CopyStmt *stmt)
if (copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for writing: %m",
- filename)));
+ errmsg("could not open file \"%s\" for writing: %m",
+ filename)));
fstat(fileno(copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -955,8 +960,8 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
}
/*
- * Create a temporary memory context that we can reset once per row
- * to recover palloc'd memory. This avoids any problems with leaks
+ * Create a temporary memory context that we can reset once per row to
+ * recover palloc'd memory. This avoids any problems with leaks
* inside datatype output routines, and should be faster than retail
* pfree's anyway. (We don't need a whole econtext as CopyFrom does.)
*/
@@ -1040,9 +1045,9 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
if (isnull)
{
if (!binary)
- CopySendString(null_print); /* null indicator */
+ CopySendString(null_print); /* null indicator */
else
- CopySendInt32(-1); /* null marker */
+ CopySendInt32(-1); /* null marker */
}
else
{
@@ -1060,7 +1065,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1],
value,
- ObjectIdGetDatum(elements[attnum - 1])));
+ ObjectIdGetDatum(elements[attnum - 1])));
/* We assume the result will not have been toasted */
CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ);
CopySendData(VARDATA(outputbytes),
@@ -1199,7 +1204,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
{
/* attribute is NOT to be copied from input */
/* use default value if one exists */
- Node *defexpr = build_column_default(rel, i + 1);
+ Node *defexpr = build_column_default(rel, i + 1);
if (defexpr != NULL)
{
@@ -1219,10 +1224,10 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Easiest way to do this is to use parse_coerce.c to set up
* an expression that checks the constraints. (At present,
- * the expression might contain a length-coercion-function call
- * and/or CoerceToDomain nodes.) The bottom of the expression
- * is a Param node so that we can fill in the actual datum during
- * the data input loop.
+ * the expression might contain a length-coercion-function
+ * call and/or CoerceToDomain nodes.) The bottom of the
+ * expression is a Param node so that we can fill in the
+ * actual datum during the data input loop.
*/
prm = makeNode(Param);
prm->paramkind = PARAM_EXEC;
@@ -1241,11 +1246,11 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
}
/*
- * Check BEFORE STATEMENT insertion triggers. It's debateable
- * whether we should do this for COPY, since it's not really an
- * "INSERT" statement as such. However, executing these triggers
- * maintains consistency with the EACH ROW triggers that we already
- * fire on COPY.
+ * Check BEFORE STATEMENT insertion triggers. It's debateable whether
+ * we should do this for COPY, since it's not really an "INSERT"
+ * statement as such. However, executing these triggers maintains
+ * consistency with the EACH ROW triggers that we already fire on
+ * COPY.
*/
ExecBSInsertTriggers(estate, resultRelInfo);
@@ -1276,13 +1281,13 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if ((tmp >> 16) != 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unrecognized critical flags in COPY file header")));
+ errmsg("unrecognized critical flags in COPY file header")));
/* Header extension length */
tmp = CopyGetInt32();
if (CopyGetEof() || tmp < 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (missing length)")));
+ errmsg("invalid COPY file header (missing length)")));
/* Skip extension header, if present */
while (tmp-- > 0)
{
@@ -1290,7 +1295,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (CopyGetEof())
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (wrong length)")));
+ errmsg("invalid COPY file header (wrong length)")));
}
}
@@ -1418,9 +1423,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Complain if there are more fields on the input line.
*
- * Special case: if we're reading a zero-column table, we
- * won't yet have called CopyReadAttribute() at all; so do that
- * and check we have an empty line. Fortunately we can keep that
+ * Special case: if we're reading a zero-column table, we won't
+ * yet have called CopyReadAttribute() at all; so do that and
+ * check we have an empty line. Fortunately we can keep that
* silly corner case out of the main line of execution.
*/
if (result == NORMAL_ATTR)
@@ -1431,7 +1436,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (result == NORMAL_ATTR || *string != '\0')
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
if (result == END_OF_FILE)
{
/* EOF at start of line: all is well */
@@ -1442,7 +1447,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
else
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
}
/*
@@ -1475,8 +1480,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
{
loaded_oid =
DatumGetObjectId(CopyReadBinaryAttribute(0,
- &oid_in_function,
- oid_in_element,
+ &oid_in_function,
+ oid_in_element,
&isnull));
if (isnull || loaded_oid == InvalidOid)
ereport(ERROR,
@@ -1531,9 +1536,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
prmdata->isnull = (nulls[i] == 'n');
/*
- * Execute the constraint expression. Allow the expression
- * to replace the value (consider e.g. a timestamp precision
- * restriction).
+ * Execute the constraint expression. Allow the
+ * expression to replace the value (consider e.g. a
+ * timestamp precision restriction).
*/
values[i] = ExecEvalExpr(exprstate, econtext,
&isnull, NULL);
@@ -1674,11 +1679,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("literal carriage return found in data"),
- errhint("Use \"\\r\" to represent carriage return.")));
- /* Check for \r\n on first line, _and_ handle \r\n. */
+ errhint("Use \"\\r\" to represent carriage return.")));
+ /* Check for \r\n on first line, _and_ handle \r\n. */
if (copy_lineno == 1 || eol_type == EOL_CRNL)
{
- int c2 = CopyPeekChar();
+ int c2 = CopyPeekChar();
+
if (c2 == '\n')
{
CopyDonePeek(c2, true); /* eat newline */
@@ -1690,9 +1696,13 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
if (eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("literal carriage return found in data"),
+ errmsg("literal carriage return found in data"),
errhint("Use \"\\r\" to represent carriage return.")));
- /* if we got here, it is the first line and we didn't get \n, so put it back */
+
+ /*
+ * if we got here, it is the first line and we didn't
+ * get \n, so put it back
+ */
CopyDonePeek(c2, false);
eol_type = EOL_CR;
}
@@ -1802,12 +1812,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
c = CopyGetChar();
if (c == '\n')
ereport(ERROR,
- (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("end-of-copy marker does not match previous newline style")));
+ (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
+ errmsg("end-of-copy marker does not match previous newline style")));
if (c != '\r')
ereport(ERROR,
- (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("end-of-copy marker corrupt")));
+ (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
+ errmsg("end-of-copy marker corrupt")));
}
c = CopyGetChar();
if (c != '\r' && c != '\n')
@@ -1816,21 +1826,20 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
errmsg("end-of-copy marker corrupt")));
if ((eol_type == EOL_NL && c != '\n') ||
(eol_type == EOL_CRNL && c != '\n') ||
- (eol_type == EOL_CR && c != '\r'))
+ (eol_type == EOL_CR && c != '\r'))
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("end-of-copy marker does not match previous newline style")));
+
/*
- * In protocol version 3, we should ignore anything after
- * \. up to the protocol end of copy data. (XXX maybe
- * better not to treat \. as special?)
+ * In protocol version 3, we should ignore anything
+ * after \. up to the protocol end of copy data. (XXX
+ * maybe better not to treat \. as special?)
*/
if (copy_dest == COPY_NEW_FE)
{
while (c != EOF)
- {
c = CopyGetChar();
- }
}
*result = END_OF_FILE;
goto copy_eof;
@@ -2045,8 +2054,8 @@ CopyGetAttnums(Relation rel, List *attnamelist)
if (intMember(attnum, attnums))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" specified more than once",
- name)));
+ errmsg("attribute \"%s\" specified more than once",
+ name)));
attnums = lappendi(attnums, attnum);
}
}
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 70678b26b08..547f3fb2f3f 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.119 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.120 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -200,7 +200,7 @@ createdb(const CreatedbStmt *stmt)
if (dbpath != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use an alternate location on this platform")));
+ errmsg("cannot use an alternate location on this platform")));
#endif
/*
@@ -260,8 +260,8 @@ createdb(const CreatedbStmt *stmt)
if (DatabaseHasActiveBackends(src_dboid, true))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("source database \"%s\" is being accessed by other users",
- dbtemplate)));
+ errmsg("source database \"%s\" is being accessed by other users",
+ dbtemplate)));
/* If encoding is defaulted, use source's encoding */
if (encoding < 0)
@@ -345,7 +345,7 @@ createdb(const CreatedbStmt *stmt)
/* Make the symlink, if needed */
if (alt_loc)
{
-#ifdef HAVE_SYMLINK /* already throws error above */
+#ifdef HAVE_SYMLINK /* already throws error above */
if (symlink(alt_loc, nominal_loc) != 0)
#endif
ereport(ERROR,
@@ -450,7 +450,7 @@ dropdb(const char *dbname)
char *nominal_loc;
char dbpath[MAXPGPATH];
Relation pgdbrel;
- SysScanDesc pgdbscan;
+ SysScanDesc pgdbscan;
ScanKeyData key;
HeapTuple tup;
@@ -503,8 +503,8 @@ dropdb(const char *dbname)
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- dbname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ dbname)));
/*
* Find the database's tuple by OID (should be unique).
@@ -577,10 +577,13 @@ dropdb(const char *dbname)
void
RenameDatabase(const char *oldname, const char *newname)
{
- HeapTuple tup, newtup;
+ HeapTuple tup,
+ newtup;
Relation rel;
- SysScanDesc scan, scan2;
- ScanKeyData key, key2;
+ SysScanDesc scan,
+ scan2;
+ ScanKeyData key,
+ key2;
/*
* Obtain AccessExclusiveLock so that no new session gets started
@@ -610,15 +613,14 @@ RenameDatabase(const char *oldname, const char *newname)
errmsg("current database may not be renamed")));
/*
- * Make sure the database does not have active sessions. Might
- * not be necessary, but it's consistent with other database
- * operations.
+ * Make sure the database does not have active sessions. Might not be
+ * necessary, but it's consistent with other database operations.
*/
if (DatabaseHasActiveBackends(HeapTupleGetOid(tup), false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- oldname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ oldname)));
/* make sure the new name doesn't exist */
ScanKeyEntryInitialize(&key2, 0, Anum_pg_database_datname,
@@ -651,10 +653,10 @@ RenameDatabase(const char *oldname, const char *newname)
heap_close(rel, NoLock);
/*
- * Force dirty buffers out to disk, so that newly-connecting
- * backends will see the renamed database in pg_database right
- * away. (They'll see an uncommitted tuple, but they don't care;
- * see GetRawDatabaseInfo.)
+ * Force dirty buffers out to disk, so that newly-connecting backends
+ * will see the renamed database in pg_database right away. (They'll
+ * see an uncommitted tuple, but they don't care; see
+ * GetRawDatabaseInfo.)
*/
BufferSync();
}
@@ -671,7 +673,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
newtuple;
Relation rel;
ScanKeyData scankey;
- SysScanDesc scan;
+ SysScanDesc scan;
Datum repl_val[Natts_pg_database];
char repl_null[Natts_pg_database];
char repl_repl[Natts_pg_database];
@@ -689,9 +691,9 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
errmsg("database \"%s\" does not exist", stmt->dbname)));
if (!(superuser()
- || ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
+ || ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
- stmt->dbname);
+ stmt->dbname);
MemSet(repl_repl, ' ', sizeof(repl_repl));
repl_repl[Anum_pg_database_datconfig - 1] = 'r';
@@ -750,7 +752,7 @@ get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
{
Relation relation;
ScanKeyData scanKey;
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple tuple;
bool gottuple;
@@ -862,7 +864,7 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
#ifndef ALLOW_ABSOLUTE_DBPATHS
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("absolute paths are not allowed as database locations")));
+ errmsg("absolute paths are not allowed as database locations")));
#endif
prefix = dbpath;
}
@@ -874,8 +876,8 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
if (!var)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("postmaster environment variable \"%s\" not found",
- dbpath)));
+ errmsg("postmaster environment variable \"%s\" not found",
+ dbpath)));
if (!is_absolute_path(var))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
@@ -955,7 +957,7 @@ get_database_oid(const char *dbname)
{
Relation pg_database;
ScanKeyData entry[1];
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple dbtuple;
Oid oid;
@@ -993,7 +995,7 @@ get_database_name(Oid dbid)
{
Relation pg_database;
ScanKeyData entry[1];
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple dbtuple;
char *result;
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index bf0c95a75ef..c924dcc7b77 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.82 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.83 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -51,7 +51,8 @@ case_translate_language_name(const char *input, char *output)
{
int i;
- MemSet(output, 0, NAMEDATALEN); /* ensure result Name is zero-filled */
+ MemSet(output, 0, NAMEDATALEN); /* ensure result Name is
+ * zero-filled */
for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
output[i] = tolower((unsigned char) input[i]);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index bc137b0eaca..916c1ff772f 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.111 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.112 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,11 +45,11 @@ typedef struct ExplainState
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
TupOutputState *tstate);
-static double elapsed_time(struct timeval *starttime);
+static double elapsed_time(struct timeval * starttime);
static void explain_outNode(StringInfo str,
- Plan *plan, PlanState *planstate,
- Plan *outer_plan,
- int indent, ExplainState *es);
+ Plan *plan, PlanState * planstate,
+ Plan *outer_plan,
+ int indent, ExplainState *es);
static void show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
int scanrelid, Plan *outer_plan,
StringInfo str, int indent, ExplainState *es);
@@ -58,8 +58,8 @@ static void show_upper_qual(List *qual, const char *qlabel,
const char *inner_name, int inner_varno, Plan *inner_plan,
StringInfo str, int indent, ExplainState *es);
static void show_sort_keys(List *tlist, int nkeys, AttrNumber *keycols,
- const char *qlabel,
- StringInfo str, int indent, ExplainState *es);
+ const char *qlabel,
+ StringInfo str, int indent, ExplainState *es);
static Node *make_ors_ands_explicit(List *orclauses);
/*
@@ -255,8 +255,8 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
}
/*
- * Close down the query and free resources. Include time for this
- * in the total runtime.
+ * Close down the query and free resources. Include time for this in
+ * the total runtime.
*/
gettimeofday(&starttime, NULL);
@@ -282,7 +282,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
/* Compute elapsed time in seconds since given gettimeofday() timestamp */
static double
-elapsed_time(struct timeval *starttime)
+elapsed_time(struct timeval * starttime)
{
struct timeval endtime;
@@ -313,7 +313,7 @@ elapsed_time(struct timeval *starttime)
*/
static void
explain_outNode(StringInfo str,
- Plan *plan, PlanState *planstate,
+ Plan *plan, PlanState * planstate,
Plan *outer_plan,
int indent, ExplainState *es)
{
@@ -542,8 +542,8 @@ explain_outNode(StringInfo str,
/*
* If the expression is still a function call, we can get
* the real name of the function. Otherwise, punt (this
- * can happen if the optimizer simplified away the function
- * call, for example).
+ * can happen if the optimizer simplified away the
+ * function call, for example).
*/
if (rte->funcexpr && IsA(rte->funcexpr, FuncExpr))
{
@@ -583,15 +583,13 @@ explain_outNode(StringInfo str,
double nloops = planstate->instrument->nloops;
appendStringInfo(str, " (actual time=%.2f..%.2f rows=%.0f loops=%.0f)",
- 1000.0 * planstate->instrument->startup / nloops,
- 1000.0 * planstate->instrument->total / nloops,
+ 1000.0 * planstate->instrument->startup / nloops,
+ 1000.0 * planstate->instrument->total / nloops,
planstate->instrument->ntuples / nloops,
planstate->instrument->nloops);
}
else if (es->printAnalyze)
- {
appendStringInfo(str, " (never executed)");
- }
}
appendStringInfoChar(str, '\n');
@@ -709,7 +707,7 @@ explain_outNode(StringInfo str,
foreach(lst, planstate->initPlan)
{
SubPlanState *sps = (SubPlanState *) lfirst(lst);
- SubPlan *sp = (SubPlan *) sps->xprstate.expr;
+ SubPlan *sp = (SubPlan *) sps->xprstate.expr;
es->rtable = sp->rtable;
for (i = 0; i < indent; i++)
@@ -807,7 +805,7 @@ explain_outNode(StringInfo str,
foreach(lst, planstate->subPlan)
{
SubPlanState *sps = (SubPlanState *) lfirst(lst);
- SubPlan *sp = (SubPlan *) sps->xprstate.expr;
+ SubPlan *sp = (SubPlan *) sps->xprstate.expr;
es->rtable = sp->rtable;
for (i = 0; i < indent; i++)
@@ -865,7 +863,7 @@ show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
*/
if (outer_plan)
{
- Relids varnos = pull_varnos(node);
+ Relids varnos = pull_varnos(node);
if (bms_is_member(OUTER, varnos))
outercontext = deparse_context_for_subplan("outer",
@@ -1037,9 +1035,7 @@ make_ors_ands_explicit(List *orclauses)
FastListInit(&args);
foreach(orptr, orclauses)
- {
FastAppend(&args, make_ands_explicit(lfirst(orptr)));
- }
return (Node *) make_orclause(FastListValue(&args));
}
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 7a6a3775d64..181f52e1143 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.31 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.32 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -80,8 +80,8 @@ compute_return_type(TypeName *returnType, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot return shell type %s",
- TypeNameToString(returnType))));
+ errmsg("SQL function cannot return shell type %s",
+ TypeNameToString(returnType))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -147,8 +147,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
if (parameterCount >= FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("functions cannot have more than %d arguments",
- FUNC_MAX_ARGS)));
+ errmsg("functions cannot have more than %d arguments",
+ FUNC_MAX_ARGS)));
toid = LookupTypeName(t);
if (OidIsValid(toid))
@@ -159,8 +159,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot accept shell type %s",
- TypeNameToString(t))));
+ errmsg("SQL function cannot accept shell type %s",
+ TypeNameToString(t))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -330,8 +330,8 @@ compute_attributes_with_style(List *parameters, bool *isStrict_p, char *volatili
else
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized function attribute \"%s\" ignored",
- param->defname)));
+ errmsg("unrecognized function attribute \"%s\" ignored",
+ param->defname)));
}
}
@@ -558,7 +558,7 @@ RemoveFunction(RemoveFuncStmt *stmt)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(functionName)),
- errhint("Use DROP AGGREGATE to drop aggregate functions.")));
+ errhint("Use DROP AGGREGATE to drop aggregate functions.")));
if (((Form_pg_proc) GETSTRUCT(tup))->prolang == INTERNALlanguageId)
{
@@ -664,7 +664,7 @@ RenameFunction(List *name, List *argtypes, const char *newname)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(name)),
- errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
+ errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
namespaceOid = procForm->pronamespace;
@@ -728,7 +728,7 @@ SetFunctionReturnType(Oid funcOid, Oid newRetType)
elog(ERROR, "cache lookup failed for function %u", funcOid);
procForm = (Form_pg_proc) GETSTRUCT(tup);
- if (procForm->prorettype != OPAQUEOID) /* caller messed up */
+ if (procForm->prorettype != OPAQUEOID) /* caller messed up */
elog(ERROR, "function %u doesn't return OPAQUE", funcOid);
/* okay to overwrite copied tuple */
@@ -815,7 +815,7 @@ CreateCast(CreateCastStmt *stmt)
if (sourcetypeid == targettypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("source data type and target data type are the same")));
+ errmsg("source data type and target data type are the same")));
/* No shells, no pseudo-types allowed */
if (!get_typisdefined(sourcetypeid))
@@ -878,10 +878,11 @@ CreateCast(CreateCastStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("return data type of cast function must match target data type")));
+
/*
* Restricting the volatility of a cast function may or may not be
* a good idea in the abstract, but it definitely breaks many old
- * user-defined types. Disable this check --- tgl 2/1/03
+ * user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
@@ -892,7 +893,7 @@ CreateCast(CreateCastStmt *stmt)
if (procstruct->proisagg)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must not be an aggregate function")));
+ errmsg("cast function must not be an aggregate function")));
if (procstruct->proretset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
@@ -902,12 +903,12 @@ CreateCast(CreateCastStmt *stmt)
}
else
{
- int16 typ1len;
- int16 typ2len;
- bool typ1byval;
- bool typ2byval;
- char typ1align;
- char typ2align;
+ int16 typ1len;
+ int16 typ2len;
+ bool typ1byval;
+ bool typ2byval;
+ char typ1align;
+ char typ2align;
/* indicates binary coercibility */
funcid = InvalidOid;
@@ -924,7 +925,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* Also, insist that the types match as to size, alignment, and
* pass-by-value attributes; this provides at least a crude check
- * that they have similar representations. A pair of types that
+ * that they have similar representations. A pair of types that
* fail this test should certainly not be equated.
*/
get_typlenbyvalalign(sourcetypeid, &typ1len, &typ1byval, &typ1align);
@@ -958,9 +959,9 @@ CreateCast(CreateCastStmt *stmt)
relation = heap_openr(CastRelationName, RowExclusiveLock);
/*
- * Check for duplicate. This is just to give a friendly error message,
- * the unique index would catch it anyway (so no need to sweat about
- * race conditions).
+ * Check for duplicate. This is just to give a friendly error
+ * message, the unique index would catch it anyway (so no need to
+ * sweat about race conditions).
*/
tuple = SearchSysCache(CASTSOURCETARGET,
ObjectIdGetDatum(sourcetypeid),
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 4cd66fd1b5d..5e3cec954d3 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.103 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.104 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,11 +44,11 @@
/* non-export function prototypes */
static void CheckPredicate(List *predList);
static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
+ List *attList,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
static Oid GetIndexOpClass(List *opclass, Oid attrType,
- char *accessMethodName, Oid accessMethodId);
+ char *accessMethodName, Oid accessMethodId);
static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId);
/*
@@ -157,8 +157,8 @@ DefineIndex(RangeVar *heapRelation,
if (unique && !accessMethodForm->amcanunique)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("access method \"%s\" does not support UNIQUE indexes",
- accessMethodName)));
+ errmsg("access method \"%s\" does not support UNIQUE indexes",
+ accessMethodName)));
if (numberOfAttributes > 1 && !accessMethodForm->amcanmulticol)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -192,16 +192,16 @@ DefineIndex(RangeVar *heapRelation,
}
/*
- * Check that all of the attributes in a primary key are marked
- * as not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
+ * Check that all of the attributes in a primary key are marked as not
+ * null, otherwise attempt to ALTER TABLE .. SET NOT NULL
*/
if (primary)
{
- List *keys;
+ List *keys;
foreach(keys, attributeList)
{
- IndexElem *key = (IndexElem *) lfirst(keys);
+ IndexElem *key = (IndexElem *) lfirst(keys);
HeapTuple atttuple;
if (!key->name)
@@ -216,15 +216,16 @@ DefineIndex(RangeVar *heapRelation,
atttuple = SearchSysCacheAttName(relationId, key->name);
if (HeapTupleIsValid(atttuple))
{
- if (! ((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
+ if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
{
/*
* Try to make it NOT NULL.
*
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade
* to child tables? Currently, since the PRIMARY KEY
- * itself doesn't cascade, we don't cascade the notnull
- * constraint either; but this is pretty debatable.
+ * itself doesn't cascade, we don't cascade the
+ * notnull constraint either; but this is pretty
+ * debatable.
*/
AlterTableAlterColumnSetNotNull(relationId, false,
key->name);
@@ -236,8 +237,8 @@ DefineIndex(RangeVar *heapRelation,
/* This shouldn't happen if parser did its job ... */
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- key->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ key->name)));
}
}
}
@@ -248,7 +249,7 @@ DefineIndex(RangeVar *heapRelation,
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_NumIndexAttrs = numberOfAttributes;
- indexInfo->ii_Expressions = NIL; /* for now */
+ indexInfo->ii_Expressions = NIL; /* for now */
indexInfo->ii_ExpressionsState = NIL;
indexInfo->ii_Predicate = cnfPred;
indexInfo->ii_PredicateState = NIL;
@@ -308,7 +309,7 @@ CheckPredicate(List *predList)
if (contain_mutable_functions((Node *) predList))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("functions in index predicate must be marked IMMUTABLE")));
+ errmsg("functions in index predicate must be marked IMMUTABLE")));
}
static void
@@ -351,7 +352,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
else if (attribute->expr && IsA(attribute->expr, Var))
{
/* Tricky tricky, he wrote (column) ... treat as simple attr */
- Var *var = (Var *) attribute->expr;
+ Var *var = (Var *) attribute->expr;
indexInfo->ii_KeyAttrNumbers[attn] = var->varattno;
atttype = get_atttype(relId, var->varattno);
@@ -360,30 +361,30 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
{
/* Index expression */
Assert(attribute->expr != NULL);
- indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
+ indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
attribute->expr);
atttype = exprType(attribute->expr);
/*
- * We don't currently support generation of an actual query plan
- * for an index expression, only simple scalar expressions;
- * hence these restrictions.
+ * We don't currently support generation of an actual query
+ * plan for an index expression, only simple scalar
+ * expressions; hence these restrictions.
*/
if (contain_subplans(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use sub-select in index expression")));
+ errmsg("cannot use sub-select in index expression")));
if (contain_agg_clause(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate in index expression")));
+ errmsg("cannot use aggregate in index expression")));
/*
* A expression using mutable functions is probably wrong,
- * since if you aren't going to get the same result for the same
- * data every time, it's not clear what the index entries mean at
- * all.
+ * since if you aren't going to get the same result for the
+ * same data every time, it's not clear what the index entries
+ * mean at all.
*/
if (contain_mutable_functions(attribute->expr))
ereport(ERROR,
@@ -413,21 +414,20 @@ GetIndexOpClass(List *opclass, Oid attrType,
opInputType;
/*
- * Release 7.0 removed network_ops, timespan_ops, and
- * datetime_ops, so we ignore those opclass names
- * so the default *_ops is used. This can be
- * removed in some later release. bjm 2000/02/07
+ * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so
+ * we ignore those opclass names so the default *_ops is used. This
+ * can be removed in some later release. bjm 2000/02/07
*
- * Release 7.1 removes lztext_ops, so suppress that too
- * for a while. tgl 2000/07/30
+ * Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
+ * 2000/07/30
*
- * Release 7.2 renames timestamp_ops to timestamptz_ops,
- * so suppress that too for awhile. I'm starting to
- * think we need a better approach. tgl 2000/10/01
+ * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
+ * too for awhile. I'm starting to think we need a better approach.
+ * tgl 2000/10/01
*/
if (length(opclass) == 1)
{
- char *claname = strVal(lfirst(opclass));
+ char *claname = strVal(lfirst(opclass));
if (strcmp(claname, "network_ops") == 0 ||
strcmp(claname, "timespan_ops") == 0 ||
@@ -499,8 +499,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
if (!IsBinaryCoercible(attrType, opInputType))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator class \"%s\" does not accept data type %s",
- NameListToString(opclass), format_type_be(attrType))));
+ errmsg("operator class \"%s\" does not accept data type %s",
+ NameListToString(opclass), format_type_be(attrType))));
ReleaseSysCache(tuple);
@@ -607,7 +607,7 @@ ReindexIndex(RangeVar *indexRelation, bool force /* currently unused */ )
tuple = SearchSysCache(RELOID,
ObjectIdGetDatum(indOid),
0, 0, 0);
- if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
+ if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", indOid);
if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_INDEX)
@@ -785,7 +785,8 @@ ReindexDatabase(const char *dbname, bool force, bool all)
for (i = 0; i < relcnt; i++)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
if (reindex_relation(relids[i], force))
ereport(NOTICE,
(errmsg("relation %u was reindexed", relids[i])));
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 60b041466f1..52792bc31ab 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.15 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.16 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,13 +103,13 @@ DefineOpClass(CreateOpClassStmt *stmt)
* Currently, we require superuser privileges to create an opclass.
* This seems necessary because we have no way to validate that the
* offered set of operators and functions are consistent with the AM's
- * expectations. It would be nice to provide such a check someday,
- * if it can be done without solving the halting problem :-(
+ * expectations. It would be nice to provide such a check someday, if
+ * it can be done without solving the halting problem :-(
*/
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create an operator class")));
+ errmsg("must be superuser to create an operator class")));
/* Look up the datatype */
typeoid = typenameTypeId(stmt->datatype);
@@ -157,8 +157,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (operators[item->number - 1] != InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("operator number %d appears more than once",
- item->number)));
+ errmsg("operator number %d appears more than once",
+ item->number)));
if (item->args != NIL)
{
TypeName *typeName1 = (TypeName *) lfirst(item->args);
@@ -211,7 +211,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (OidIsValid(storageoid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("storage type specified more than once")));
+ errmsg("storage type specified more than once")));
storageoid = typenameTypeId(item->storedtype);
break;
default:
@@ -532,7 +532,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
opcID = HeapTupleGetOid(tuple);
@@ -681,7 +681,7 @@ RenameOpClass(List *name, const char *access_method, const char *newname)
tup = SearchSysCacheCopy(CLAOID,
ObjectIdGetDatum(opcOid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
+ if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for opclass %u", opcOid);
namespaceOid = ((Form_pg_opclass) GETSTRUCT(tup))->opcnamespace;
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 6a4d479c121..ddc088fe2f5 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.10 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.11 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -103,7 +103,7 @@ DefineOperator(List *names, List *parameters)
if (typeName1->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (strcasecmp(defel->defname, "rightarg") == 0)
{
@@ -111,7 +111,7 @@ DefineOperator(List *names, List *parameters)
if (typeName2->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (strcasecmp(defel->defname, "procedure") == 0)
functionName = defGetQualifiedName(defel);
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index cf4a0638717..aa5a5b9ea61 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -4,17 +4,17 @@
* Utility commands affecting portals (that is, SQL cursor commands)
*
* Note: see also tcop/pquery.c, which implements portal operations for
- * the FE/BE protocol. This module uses pquery.c for some operations.
+ * the FE/BE protocol. This module uses pquery.c for some operations.
* And both modules depend on utils/mmgr/portalmem.c, which controls
* storage management for portals (but doesn't run any queries in them).
- *
+ *
*
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.19 2003/08/01 13:53:36 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.20 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,7 +36,7 @@
* Execute SQL DECLARE CURSOR command.
*/
void
-PerformCursorOpen(DeclareCursorStmt *stmt)
+PerformCursorOpen(DeclareCursorStmt * stmt)
{
List *rewritten;
Query *query;
@@ -64,7 +64,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
/*
* The query has been through parse analysis, but not rewriting or
* planning as yet. Note that the grammar ensured we have a SELECT
- * query, so we are not expecting rule rewriting to do anything strange.
+ * query, so we are not expecting rule rewriting to do anything
+ * strange.
*/
rewritten = QueryRewrite((Query *) stmt->query);
if (length(rewritten) != 1 || !IsA(lfirst(rewritten), Query))
@@ -86,8 +87,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
plan = planner(query, true, stmt->options);
/*
- * Create a portal and copy the query and plan into its memory context.
- * (If a duplicate cursor name already exists, warn and drop it.)
+ * Create a portal and copy the query and plan into its memory
+ * context. (If a duplicate cursor name already exists, warn and drop
+ * it.)
*/
portal = CreatePortal(stmt->portalname, true, false);
@@ -98,7 +100,7 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
PortalDefineQuery(portal,
NULL, /* unfortunately don't have sourceText */
- "SELECT", /* cursor's query is always a SELECT */
+ "SELECT", /* cursor's query is always a SELECT */
makeList1(query),
makeList1(plan),
PortalGetHeapMemory(portal));
@@ -108,9 +110,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
/*
* Set up options for portal.
*
- * If the user didn't specify a SCROLL type, allow or disallow
- * scrolling based on whether it would require any additional
- * runtime overhead to do so.
+ * If the user didn't specify a SCROLL type, allow or disallow scrolling
+ * based on whether it would require any additional runtime overhead
+ * to do so.
*/
portal->cursorOptions = stmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -129,8 +131,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
Assert(portal->strategy == PORTAL_ONE_SELECT);
/*
- * We're done; the query won't actually be run until PerformPortalFetch
- * is called.
+ * We're done; the query won't actually be run until
+ * PerformPortalFetch is called.
*/
}
@@ -169,7 +171,7 @@ PerformPortalFetch(FetchStmt *stmt,
/* FIXME: shouldn't this be an ERROR? */
ereport(WARNING,
(errcode(ERRCODE_UNDEFINED_CURSOR),
- errmsg("portal \"%s\" does not exist", stmt->portalname)));
+ errmsg("portal \"%s\" does not exist", stmt->portalname)));
if (completionTag)
strcpy(completionTag, stmt->ismove ? "MOVE 0" : "FETCH 0");
return;
@@ -219,7 +221,7 @@ PerformPortalClose(const char *name)
ereport(WARNING,
(errcode(ERRCODE_UNDEFINED_CURSOR),
errmsg("portal \"%s\" does not exist", name),
- errfunction("PerformPortalClose"))); /* for ecpg */
+ errfunction("PerformPortalClose"))); /* for ecpg */
return;
}
@@ -249,7 +251,8 @@ PortalCleanup(Portal portal, bool isError)
/*
* Shut down executor, if still running. We skip this during error
* abort, since other mechanisms will take care of releasing executor
- * resources, and we can't be sure that ExecutorEnd itself wouldn't fail.
+ * resources, and we can't be sure that ExecutorEnd itself wouldn't
+ * fail.
*/
queryDesc = PortalGetQueryDesc(portal);
if (queryDesc)
@@ -271,14 +274,14 @@ PortalCleanup(Portal portal, bool isError)
void
PersistHoldablePortal(Portal portal)
{
- QueryDesc *queryDesc = PortalGetQueryDesc(portal);
+ QueryDesc *queryDesc = PortalGetQueryDesc(portal);
MemoryContext savePortalContext;
MemoryContext saveQueryContext;
MemoryContext oldcxt;
/*
- * If we're preserving a holdable portal, we had better be
- * inside the transaction that originally created it.
+ * If we're preserving a holdable portal, we had better be inside the
+ * transaction that originally created it.
*/
Assert(portal->createXact == GetCurrentTransactionId());
Assert(queryDesc != NULL);
@@ -321,9 +324,8 @@ PersistHoldablePortal(Portal portal)
MemoryContextSwitchTo(PortalContext);
/*
- * Rewind the executor: we need to store the entire result set in
- * the tuplestore, so that subsequent backward FETCHs can be
- * processed.
+ * Rewind the executor: we need to store the entire result set in the
+ * tuplestore, so that subsequent backward FETCHs can be processed.
*/
ExecutorRewind(queryDesc);
@@ -351,17 +353,17 @@ PersistHoldablePortal(Portal portal)
/*
* Reset the position in the result set: ideally, this could be
* implemented by just skipping straight to the tuple # that we need
- * to be at, but the tuplestore API doesn't support that. So we
- * start at the beginning of the tuplestore and iterate through it
- * until we reach where we need to be. FIXME someday?
+ * to be at, but the tuplestore API doesn't support that. So we start
+ * at the beginning of the tuplestore and iterate through it until we
+ * reach where we need to be. FIXME someday?
*/
MemoryContextSwitchTo(portal->holdContext);
if (!portal->atEnd)
{
- long store_pos;
+ long store_pos;
- if (portal->posOverflow) /* oops, cannot trust portalPos */
+ if (portal->posOverflow) /* oops, cannot trust portalPos */
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not reposition held cursor")));
@@ -370,8 +372,8 @@ PersistHoldablePortal(Portal portal)
for (store_pos = 0; store_pos < portal->portalPos; store_pos++)
{
- HeapTuple tup;
- bool should_free;
+ HeapTuple tup;
+ bool should_free;
tup = tuplestore_gettuple(portal->holdStore, true,
&should_free);
@@ -389,8 +391,8 @@ PersistHoldablePortal(Portal portal)
/*
* We can now release any subsidiary memory of the portal's heap
* context; we'll never use it again. The executor already dropped
- * its context, but this will clean up anything that glommed onto
- * the portal's heap via PortalContext.
+ * its context, but this will clean up anything that glommed onto the
+ * portal's heap via PortalContext.
*/
MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
}
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index cd58d7fc7b6..d0fabd1ad31 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -10,7 +10,7 @@
* Copyright (c) 2002-2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.21 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.22 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@ static HTAB *prepared_queries = NULL;
static void InitQueryHashTable(void);
static ParamListInfo EvaluateParams(EState *estate,
- List *params, List *argtypes);
+ List *params, List *argtypes);
/*
* Implements the 'PREPARE' utility statement.
@@ -90,12 +90,12 @@ PrepareQuery(PrepareStmt *stmt)
/* Rewrite the query. The result could be 0, 1, or many queries. */
query_list = QueryRewrite(stmt->query);
- /* Generate plans for queries. Snapshot is already set. */
+ /* Generate plans for queries. Snapshot is already set. */
plan_list = pg_plan_queries(query_list, false);
/* Save the results. */
StorePreparedStatement(stmt->name,
- NULL, /* text form not available */
+ NULL, /* text form not available */
commandTag,
query_list,
plan_list,
@@ -131,8 +131,8 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it
- * till end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till
+ * end of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, stmt->params, entry->argtype_list);
@@ -144,15 +144,15 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
portal = CreateNewPortal();
/*
- * For CREATE TABLE / AS EXECUTE, make a copy of the stored query
- * so that we can modify its destination (yech, but this has
- * always been ugly). For regular EXECUTE we can just use the
- * stored query where it sits, since the executor is read-only.
+ * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so
+ * that we can modify its destination (yech, but this has always been
+ * ugly). For regular EXECUTE we can just use the stored query where
+ * it sits, since the executor is read-only.
*/
if (stmt->into)
{
MemoryContext oldContext;
- Query *query;
+ Query *query;
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
@@ -208,11 +208,11 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
static ParamListInfo
EvaluateParams(EState *estate, List *params, List *argtypes)
{
- int nargs = length(argtypes);
- ParamListInfo paramLI;
- List *exprstates;
- List *l;
- int i = 0;
+ int nargs = length(argtypes);
+ ParamListInfo paramLI;
+ List *exprstates;
+ List *l;
+ int i = 0;
/* Parser should have caught this error, but check for safety */
if (length(params) != nargs)
@@ -229,7 +229,7 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
bool isNull;
paramLI[i].value = ExecEvalExprSwitchContext(n,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
paramLI[i].kind = PARAM_NUM;
@@ -273,7 +273,7 @@ InitQueryHashTable(void)
* to the hash entry, so the caller can dispose of their copy.
*
* Exception: commandTag is presumed to be a pointer to a constant string,
- * or possibly NULL, so it need not be copied. Note that commandTag should
+ * or possibly NULL, so it need not be copied. Note that commandTag should
* be NULL only if the original query (before rewriting) was empty.
*/
void
@@ -367,9 +367,9 @@ FetchPreparedStatement(const char *stmt_name, bool throwError)
if (prepared_queries)
{
/*
- * We can't just use the statement name as supplied by the user: the
- * hash package is picky enough that it needs to be NULL-padded out to
- * the appropriate length to work correctly.
+ * We can't just use the statement name as supplied by the user:
+ * the hash package is picky enough that it needs to be
+ * NULL-padded out to the appropriate length to work correctly.
*/
MemSet(key, 0, sizeof(key));
strncpy(key, stmt_name, sizeof(key));
@@ -412,9 +412,9 @@ FetchPreparedStatementParams(const char *stmt_name)
* Note: the result is created or copied into current memory context.
*/
TupleDesc
-FetchPreparedStatementResultDesc(PreparedStatement *stmt)
+FetchPreparedStatementResultDesc(PreparedStatement * stmt)
{
- Query *query;
+ Query *query;
switch (ChoosePortalStrategy(stmt->query_list))
{
@@ -476,7 +476,7 @@ DropPreparedStatement(const char *stmt_name, bool showError)
void
ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
{
- ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
+ ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
PreparedStatement *entry;
List *l,
*query_list,
@@ -499,8 +499,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it
- * till end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till
+ * end of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, execstmt->params,
@@ -510,8 +510,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
/* Explain each query */
foreach(l, query_list)
{
- Query *query = (Query *) lfirst(l);
- Plan *plan = (Plan *) lfirst(plan_list);
+ Query *query = (Query *) lfirst(l);
+ Plan *plan = (Plan *) lfirst(plan_list);
bool is_last_query;
plan_list = lnext(plan_list);
@@ -533,7 +533,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (query->commandType != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("prepared statement is not a SELECT")));
+ errmsg("prepared statement is not a SELECT")));
/* Copy the query so we can modify it */
query = copyObject(query);
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 69000b29bc7..b0a4702a715 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.47 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.48 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,7 +60,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create procedural language")));
+ errmsg("must be superuser to create procedural language")));
/*
* Translate the language name and check that this language doesn't
@@ -85,7 +85,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a handler function declared OPAQUE, change it to
* LANGUAGE_HANDLER.
*/
@@ -183,7 +183,7 @@ DropProceduralLanguage(DropPLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to drop procedural language")));
+ errmsg("must be superuser to drop procedural language")));
/*
* Translate the language name, check that this language exist and is
@@ -225,7 +225,7 @@ DropProceduralLanguageById(Oid langOid)
langTup = SearchSysCache(LANGOID,
ObjectIdGetDatum(langOid),
0, 0, 0);
- if (!HeapTupleIsValid(langTup)) /* should not happen */
+ if (!HeapTupleIsValid(langTup)) /* should not happen */
elog(ERROR, "cache lookup failed for language %u", langOid);
simple_heap_delete(rel, &langTup->t_self);
@@ -266,7 +266,7 @@ RenameLanguage(const char *oldname, const char *newname)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to rename procedural language")));
+ errmsg("must be superuser to rename procedural language")));
/* rename */
namestrcpy(&(((Form_pg_language) GETSTRUCT(tup))->lanname), newname);
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 5ad81634f41..4eb285daa33 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.14 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.15 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -98,7 +98,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", schemaName),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* Create the schema's namespace */
namespaceId = NamespaceCreate(schemaName, owner_userid);
@@ -215,7 +215,7 @@ RemoveSchemaById(Oid schemaOid)
tup = SearchSysCache(NAMESPACEOID,
ObjectIdGetDatum(schemaOid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
+ if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for namespace %u", schemaOid);
simple_heap_delete(relation, &tup->t_self);
@@ -248,9 +248,9 @@ RenameSchema(const char *oldname, const char *newname)
/* make sure the new name doesn't exist */
if (HeapTupleIsValid(
- SearchSysCache(NAMESPACENAME,
- CStringGetDatum(newname),
- 0, 0, 0)))
+ SearchSysCache(NAMESPACENAME,
+ CStringGetDatum(newname),
+ 0, 0, 0)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_SCHEMA),
errmsg("schema \"%s\" already exists", newname)));
@@ -270,7 +270,7 @@ RenameSchema(const char *oldname, const char *newname)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* rename */
namestrcpy(&(((Form_pg_namespace) GETSTRUCT(tup))->nspname), newname);
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 7ce7810fbca..01544a015b3 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.99 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.100 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,7 +68,7 @@ static SeqTable seqtab = NULL; /* Head of list of SeqTable items */
static void init_sequence(RangeVar *relation,
- SeqTable *p_elm, Relation *p_rel);
+ SeqTable *p_elm, Relation *p_rel);
static Form_pg_sequence read_info(SeqTable elm, Relation rel, Buffer *buf);
static void init_params(List *options, Form_pg_sequence new);
static void do_setval(RangeVar *sequence, int64 next, bool iscalled);
@@ -97,10 +97,10 @@ DefineSequence(CreateSeqStmt *seq)
/* Values are NULL (or false) by default */
new.last_value = 0;
new.increment_by = 0;
- new.max_value = 0;
+ new.max_value = 0;
new.min_value = 0;
new.cache_value = 0;
- new.is_cycled = false;
+ new.is_cycled = false;
/* Check and set values */
init_params(seq->options, &new);
@@ -299,10 +299,10 @@ DefineSequence(CreateSeqStmt *seq)
/*
* AlterSequence
*
- * Modify the defition of a sequence relation
+ * Modify the defition of a sequence relation
*/
void
-AlterSequence(AlterSeqStmt *stmt)
+AlterSequence(AlterSeqStmt * stmt)
{
SeqTable elm;
Relation seqrel;
@@ -324,7 +324,7 @@ AlterSequence(AlterSeqStmt *stmt)
page = BufferGetPage(buf);
new.increment_by = seq->increment_by;
- new.max_value = seq->max_value;
+ new.max_value = seq->max_value;
new.min_value = seq->min_value;
new.cache_value = seq->cache_value;
new.is_cycled = seq->is_cycled;
@@ -346,9 +346,9 @@ AlterSequence(AlterSeqStmt *stmt)
}
/* save info in local cache */
- elm->last = new.last_value; /* last returned number */
- elm->cached = new.last_value; /* last cached number (forget cached
- * values) */
+ elm->last = new.last_value; /* last returned number */
+ elm->cached = new.last_value; /* last cached number (forget
+ * cached values) */
START_CRIT_SECTION();
@@ -494,9 +494,9 @@ nextval(PG_FUNCTION_ARGS)
snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("%s.nextval: reached MAXVALUE (%s)",
- sequence->relname, buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("%s.nextval: reached MAXVALUE (%s)",
+ sequence->relname, buf)));
}
next = minv;
}
@@ -517,9 +517,9 @@ nextval(PG_FUNCTION_ARGS)
snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("%s.nextval: reached MINVALUE (%s)",
- sequence->relname, buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("%s.nextval: reached MINVALUE (%s)",
+ sequence->relname, buf)));
}
next = maxv;
}
@@ -895,9 +895,9 @@ init_params(List *options, Form_pg_sequence new)
errmsg("conflicting or redundant options")));
increment_by = defel;
}
+
/*
- * start is for a new sequence
- * restart is for alter
+ * start is for a new sequence restart is for alter
*/
else if (strcmp(defel->defname, "start") == 0 ||
strcmp(defel->defname, "restart") == 0)
@@ -963,9 +963,9 @@ init_params(List *options, Form_pg_sequence new)
|| (max_value != (DefElem *) NULL && !max_value->arg))
{
if (new->increment_by > 0)
- new->max_value = SEQ_MAXVALUE; /* ascending seq */
+ new->max_value = SEQ_MAXVALUE; /* ascending seq */
else
- new->max_value = -1; /* descending seq */
+ new->max_value = -1; /* descending seq */
}
else if (max_value != (DefElem *) NULL)
new->max_value = defGetInt64(max_value);
@@ -975,9 +975,9 @@ init_params(List *options, Form_pg_sequence new)
|| (min_value != (DefElem *) NULL && !min_value->arg))
{
if (new->increment_by > 0)
- new->min_value = 1; /* ascending seq */
+ new->min_value = 1; /* ascending seq */
else
- new->min_value = SEQ_MINVALUE; /* descending seq */
+ new->min_value = SEQ_MINVALUE; /* descending seq */
}
else if (min_value != (DefElem *) NULL)
new->min_value = defGetInt64(min_value);
@@ -996,7 +996,7 @@ init_params(List *options, Form_pg_sequence new)
}
/* START WITH */
- if (new->last_value == 0 && last_value == (DefElem *) NULL)
+ if (new->last_value == 0 && last_value == (DefElem *) NULL)
{
if (new->increment_by > 0)
new->last_value = new->min_value; /* ascending seq */
@@ -1015,8 +1015,8 @@ init_params(List *options, Form_pg_sequence new)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->min_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be less than MINVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be less than MINVALUE (%s)",
+ bufs, bufm)));
}
if (new->last_value > new->max_value)
{
@@ -1027,8 +1027,8 @@ init_params(List *options, Form_pg_sequence new)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
+ bufs, bufm)));
}
/* CACHE */
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index b3108053d9d..6e503fdac54 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.76 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.77 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,18 +57,19 @@
*/
typedef struct OnCommitItem
{
- Oid relid; /* relid of relation */
- OnCommitAction oncommit; /* what to do at end of xact */
+ Oid relid; /* relid of relation */
+ OnCommitAction oncommit; /* what to do at end of xact */
/*
* If this entry was created during this xact, it should be deleted at
* xact abort. Conversely, if this entry was deleted during this
* xact, it should be removed at xact commit. We leave deleted
- * entries in the list until commit so that we can roll back if needed.
+ * entries in the list until commit so that we can roll back if
+ * needed.
*/
bool created_in_cur_xact;
bool deleted_in_cur_xact;
-} OnCommitItem;
+} OnCommitItem;
static List *on_commits = NIL;
@@ -82,14 +83,14 @@ static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static bool needs_toast_table(Relation rel);
static void AlterTableAddCheckConstraint(Relation rel, Constraint *constr);
static void AlterTableAddForeignKeyConstraint(Relation rel,
- FkConstraint *fkconstraint);
+ FkConstraint *fkconstraint);
static int transformColumnNameList(Oid relId, List *colList,
- int16 *attnums, Oid *atttypids);
+ int16 *attnums, Oid *atttypids);
static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
- List **attnamelist,
- int16 *attnums, Oid *atttypids);
-static Oid transformFkeyCheckAttrs(Relation pkrel,
- int numattrs, int16 *attnums);
+ List **attnamelist,
+ int16 *attnums, Oid *atttypids);
+static Oid transformFkeyCheckAttrs(Relation pkrel,
+ int numattrs, int16 *attnums);
static void validateForeignKeyConstraint(FkConstraint *fkconstraint,
Relation rel, Relation pkrel);
static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
@@ -206,8 +207,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (strcmp(check[i].ccname, cdef->name) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("duplicate CHECK constraint name \"%s\"",
- cdef->name)));
+ errmsg("duplicate CHECK constraint name \"%s\"",
+ cdef->name)));
}
check[ncheck].ccname = cdef->name;
}
@@ -399,7 +400,7 @@ TruncateRelation(const RangeVar *relation)
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temp tables of other processes")));
+ errmsg("cannot truncate temp tables of other processes")));
/*
* Don't allow truncate on tables which are referenced by foreign keys
@@ -435,8 +436,8 @@ TruncateRelation(const RangeVar *relation)
heap_close(fkeyRel, AccessShareLock);
/*
- * Do the real work using the same technique as cluster, but
- * without the data-copying portion
+ * Do the real work using the same technique as cluster, but without
+ * the data-copying portion
*/
rebuild_relation(rel, InvalidOid);
@@ -570,8 +571,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && isTempNamespace(RelationGetNamespace(relation)))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit from temporary relation \"%s\"",
- parent->relname)));
+ errmsg("cannot inherit from temporary relation \"%s\"",
+ parent->relname)));
/*
* We should have an UNDER permission flag for this, but for now,
@@ -652,7 +653,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- format_type_be(attribute->atttypid))));
+ format_type_be(attribute->atttypid))));
def->inhcount++;
/* Merge of NOT NULL constraints = OR 'em together */
def->is_not_null |= attribute->attnotnull;
@@ -803,11 +804,11 @@ MergeAttributes(List *schema, List *supers, bool istemp,
def->typename->typmod != newdef->typename->typmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("attribute \"%s\" has a type conflict",
- attributeName),
+ errmsg("attribute \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- TypeNameToString(newdef->typename))));
+ TypeNameToString(newdef->typename))));
/* Mark the column as locally defined */
def->is_local = true;
/* Merge of NOT NULL constraints = OR 'em together */
@@ -1230,8 +1231,8 @@ renameatt(Oid myrelid,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" already exists",
- newattname, RelationGetRelationName(targetrelation))));
+ errmsg("attribute \"%s\" of relation \"%s\" already exists",
+ newattname, RelationGetRelationName(targetrelation))));
namestrcpy(&(attform->attname), newattname);
@@ -1257,7 +1258,7 @@ renameatt(Oid myrelid,
/*
* Scan through index columns to see if there's any simple index
- * entries for this attribute. We ignore expressional entries.
+ * entries for this attribute. We ignore expressional entries.
*/
indextup = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexoid),
@@ -1270,6 +1271,7 @@ renameatt(Oid myrelid,
{
if (attnum != indexform->indkey[i])
continue;
+
/*
* Found one, rename it.
*/
@@ -1279,6 +1281,7 @@ renameatt(Oid myrelid,
0, 0);
if (!HeapTupleIsValid(atttup))
continue; /* should we raise an error? */
+
/*
* Update the (copied) attribute tuple.
*/
@@ -1366,7 +1369,7 @@ renamerel(Oid myrelid, const char *newrelname)
reltup = SearchSysCacheCopy(RELOID,
PointerGetDatum(myrelid),
0, 0, 0);
- if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
+ if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", myrelid);
if (get_relname_relid(newrelname, namespaceId) != InvalidOid)
@@ -1743,7 +1746,7 @@ AlterTableAddColumn(Oid myrelid,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- get_rel_name(childrelid), colDef->colname)));
+ get_rel_name(childrelid), colDef->colname)));
/*
* XXX if we supported NOT NULL or defaults, would need to do
@@ -1782,7 +1785,7 @@ AlterTableAddColumn(Oid myrelid,
if (find_inheritance_children(myrelid) != NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("attribute must be added to child tables too")));
+ errmsg("attribute must be added to child tables too")));
}
/*
@@ -1801,14 +1804,14 @@ AlterTableAddColumn(Oid myrelid,
if (colDef->raw_default || colDef->cooked_default)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("adding columns with defaults is not implemented"),
- errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
+ errmsg("adding columns with defaults is not implemented"),
+ errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
if (colDef->is_not_null)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("adding NOT NULL columns is not implemented"),
- errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
+ errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
pgclass = heap_openr(RelationRelationName, RowExclusiveLock);
@@ -1829,8 +1832,8 @@ AlterTableAddColumn(Oid myrelid,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" already exists",
- colDef->colname, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" already exists",
+ colDef->colname, RelationGetRelationName(rel))));
minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts;
maxatts = minattnum + 1;
@@ -2014,8 +2017,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2057,8 +2060,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (indexStruct->indkey[i] == attnum)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("attribute \"%s\" is in a primary key",
- colName)));
+ errmsg("attribute \"%s\" is in a primary key",
+ colName)));
}
}
@@ -2158,8 +2161,8 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2286,8 +2289,8 @@ AlterTableAlterColumnDefault(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2450,8 +2453,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
if (attrtuple->attnum < 0)
@@ -2476,8 +2479,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("column datatype %s can only have storage \"plain\"",
- format_type_be(attrtuple->atttypid))));
+ errmsg("column datatype %s can only have storage \"plain\"",
+ format_type_be(attrtuple->atttypid))));
}
simple_heap_update(attrelation, &tuple->t_self, tuple);
@@ -2573,7 +2576,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
(errmsg("table \"%s\" is already WITHOUT OIDS",
RelationGetRelationName(rel))));
heap_close(class_rel, RowExclusiveLock);
- heap_close(rel, NoLock); /* close rel, but keep lock! */
+ heap_close(rel, NoLock); /* close rel, but keep lock! */
return;
}
@@ -2601,8 +2604,8 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
attrel = heap_open(RelOid_pg_attribute, RowExclusiveLock);
/*
- * Oids are being removed from the relation, so we need
- * to remove the oid pg_attribute record relating.
+ * Oids are being removed from the relation, so we need to remove
+ * the oid pg_attribute record relating.
*/
atttup = SearchSysCache(ATTNUM,
ObjectIdGetDatum(myrelid),
@@ -2621,7 +2624,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
heap_close(class_rel, RowExclusiveLock);
- heap_close(rel, NoLock); /* close rel, but keep lock! */
+ heap_close(rel, NoLock); /* close rel, but keep lock! */
}
/*
@@ -2663,8 +2666,8 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Can't drop a system attribute */
/* XXX perhaps someday allow dropping OID? */
@@ -2712,7 +2715,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName, childrelid);
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
- if (childatt->attinhcount <= 0) /* shouldn't happen */
+ if (childatt->attinhcount <= 0) /* shouldn't happen */
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
childrelid, colName);
childatt->attinhcount--;
@@ -2731,9 +2734,9 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
}
/*
- * Propagate to children if desired. Unlike most other ALTER routines,
- * we have to do this one level of recursion at a time; we can't use
- * find_all_inheritors to do it in one pass.
+ * Propagate to children if desired. Unlike most other ALTER
+ * routines, we have to do this one level of recursion at a time; we
+ * can't use find_all_inheritors to do it in one pass.
*/
if (recurse)
{
@@ -2763,7 +2766,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName, childrelid);
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
- if (childatt->attinhcount <= 0) /* shouldn't happen */
+ if (childatt->attinhcount <= 0) /* shouldn't happen */
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
childrelid, colName);
@@ -2882,18 +2885,18 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
{
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
RelationGetRelid(rel),
- RelationGetNamespace(rel),
+ RelationGetNamespace(rel),
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("constraint \"%s\" for relation \"%s\" already exists",
constr->name,
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
}
else
constr->name = GenerateConstraintName(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
- RelationGetNamespace(rel),
+ RelationGetRelid(rel),
+ RelationGetNamespace(rel),
&counter);
/*
@@ -2923,14 +2926,14 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
if (fkconstraint->constr_name)
{
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
+ RelationGetRelid(rel),
RelationGetNamespace(rel),
fkconstraint->constr_name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("constraint \"%s\" for relation \"%s\" already exists",
fkconstraint->constr_name,
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
}
else
fkconstraint->constr_name = GenerateConstraintName(CONSTRAINT_RELATION,
@@ -2959,7 +2962,7 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
/*
* Add a check constraint to a single table
*
- * Subroutine for AlterTableAddConstraint. Must already hold exclusive
+ * Subroutine for AlterTableAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity/permissions checks
* for it.
*/
@@ -2979,13 +2982,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
Node *expr;
/*
- * We need to make a parse state and range
- * table to allow us to do transformExpr()
+ * We need to make a parse state and range table to allow us to do
+ * transformExpr()
*/
pstate = make_parsestate(NULL);
rte = addRangeTableEntryForRelation(pstate,
RelationGetRelid(rel),
- makeAlias(RelationGetRelationName(rel), NIL),
+ makeAlias(RelationGetRelationName(rel), NIL),
false,
true);
addRTEtoQuery(pstate, rte, true, true);
@@ -3006,8 +3009,8 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
if (length(pstate->p_rtable) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("CHECK constraint may only reference relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("CHECK constraint may only reference relation \"%s\"",
+ RelationGetRelationName(rel))));
/*
* No subplans or aggregates, either...
@@ -3070,15 +3073,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
if (!successful)
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
- errmsg("CHECK constraint \"%s\" is violated at some row(s)",
- constr->name)));
+ errmsg("CHECK constraint \"%s\" is violated at some row(s)",
+ constr->name)));
/*
- * Call AddRelationRawConstraints to do
- * the real adding -- It duplicates some
- * of the above, but does not check the
- * validity of the constraint against
- * tuples already in the table.
+ * Call AddRelationRawConstraints to do the real adding -- It
+ * duplicates some of the above, but does not check the validity of
+ * the constraint against tuples already in the table.
*/
AddRelationRawConstraints(rel, NIL, makeList1(constr));
}
@@ -3086,7 +3087,7 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
/*
* Add a foreign-key constraint to a single table
*
- * Subroutine for AlterTableAddConstraint. Must already hold exclusive
+ * Subroutine for AlterTableAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity/permissions checks
* for it.
*/
@@ -3106,12 +3107,11 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
Oid constrOid;
/*
- * Grab an exclusive lock on the pk table, so that
- * someone doesn't delete rows out from under us.
- * (Although a lesser lock would do for that purpose,
- * we'll need exclusive lock anyway to add triggers to
- * the pk table; trying to start with a lesser lock
- * will just create a risk of deadlock.)
+ * Grab an exclusive lock on the pk table, so that someone doesn't
+ * delete rows out from under us. (Although a lesser lock would do for
+ * that purpose, we'll need exclusive lock anyway to add triggers to
+ * the pk table; trying to start with a lesser lock will just create a
+ * risk of deadlock.)
*/
pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock);
@@ -3152,8 +3152,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
errmsg("cannot reference temporary table from permanent table constraint")));
/*
- * Look up the referencing attributes to make sure they
- * exist, and record their attnums and type OIDs.
+ * Look up the referencing attributes to make sure they exist, and
+ * record their attnums and type OIDs.
*/
for (i = 0; i < INDEX_MAX_KEYS; i++)
{
@@ -3166,10 +3166,10 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
fkattnum, fktypoid);
/*
- * If the attribute list for the referenced table was omitted,
- * lookup the definition of the primary key and use it. Otherwise,
- * validate the supplied attribute list. In either case, discover
- * the index OID and the attnums and type OIDs of the attributes.
+ * If the attribute list for the referenced table was omitted, lookup
+ * the definition of the primary key and use it. Otherwise, validate
+ * the supplied attribute list. In either case, discover the index
+ * OID and the attnums and type OIDs of the attributes.
*/
if (fkconstraint->pk_attrs == NIL)
{
@@ -3208,8 +3208,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
}
/*
- * Check that the constraint is satisfied by existing
- * rows (we can skip this during table creation).
+ * Check that the constraint is satisfied by existing rows (we can
+ * skip this during table creation).
*/
if (!fkconstraint->skip_validation)
validateForeignKeyConstraint(fkconstraint, rel, pkrel);
@@ -3225,7 +3225,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
RelationGetRelid(rel),
fkattnum,
numfks,
- InvalidOid, /* not a domain constraint */
+ InvalidOid, /* not a domain
+ * constraint */
RelationGetRelid(pkrel),
pkattnum,
numpks,
@@ -3233,7 +3234,7 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
fkconstraint->fk_del_action,
fkconstraint->fk_matchtype,
indexOid,
- NULL, /* no check constraint */
+ NULL, /* no check constraint */
NULL,
NULL);
@@ -3276,8 +3277,8 @@ transformColumnNameList(Oid relId, List *colList,
if (attnum >= INDEX_MAX_KEYS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
- errmsg("cannot have more than %d keys in a foreign key",
- INDEX_MAX_KEYS)));
+ errmsg("cannot have more than %d keys in a foreign key",
+ INDEX_MAX_KEYS)));
attnums[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->attnum;
atttypids[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->atttypid;
ReleaseSysCache(atttuple);
@@ -3291,7 +3292,7 @@ transformColumnNameList(Oid relId, List *colList,
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Used when the column list in the REFERENCES specification
+ * for the pkrel. Used when the column list in the REFERENCES specification
* is omitted.
*/
static int
@@ -3339,12 +3340,12 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
if (indexStruct == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
+ RelationGetRelationName(pkrel))));
/*
- * Now build the list of PK attributes from the indkey definition
- * (we assume a primary key cannot have expressional elements)
+ * Now build the list of PK attributes from the indkey definition (we
+ * assume a primary key cannot have expressional elements)
*/
*attnamelist = NIL;
for (i = 0; i < indexStruct->indnatts; i++)
@@ -3389,7 +3390,8 @@ transformFkeyCheckAttrs(Relation pkrel,
{
HeapTuple indexTuple;
Form_pg_index indexStruct;
- int i, j;
+ int i,
+ j;
indexoid = lfirsto(indexoidscan);
indexTuple = SearchSysCache(INDEXRELID,
@@ -3453,7 +3455,7 @@ transformFkeyCheckAttrs(Relation pkrel,
ereport(ERROR,
(errcode(ERRCODE_INVALID_FOREIGN_KEY),
errmsg("there is no UNIQUE constraint matching given keys for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ RelationGetRelationName(pkrel))));
freeList(indexoidlist);
@@ -3969,17 +3971,17 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
void
AlterTableClusterOn(Oid relOid, const char *indexName)
{
- Relation rel,
- pg_index;
- List *index;
- Oid indexOid;
- HeapTuple indexTuple;
- Form_pg_index indexForm;
-
+ Relation rel,
+ pg_index;
+ List *index;
+ Oid indexOid;
+ HeapTuple indexTuple;
+ Form_pg_index indexForm;
+
rel = heap_open(relOid, AccessExclusiveLock);
indexOid = get_relname_relid(indexName, rel->rd_rel->relnamespace);
-
+
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -3994,36 +3996,37 @@ AlterTableClusterOn(Oid relOid, const char *indexName)
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
/*
- * If this is the same index the relation was previously
- * clustered on, no need to do anything.
+ * If this is the same index the relation was previously clustered on,
+ * no need to do anything.
*/
if (indexForm->indisclustered)
{
ereport(NOTICE,
- (errmsg("table \"%s\" is already being clustered on index \"%s\"",
- NameStr(rel->rd_rel->relname), indexName)));
+ (errmsg("table \"%s\" is already being clustered on index \"%s\"",
+ NameStr(rel->rd_rel->relname), indexName)));
ReleaseSysCache(indexTuple);
heap_close(rel, NoLock);
return;
}
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
-
+
/*
* Now check each index in the relation and set the bit where needed.
*/
- foreach (index, RelationGetIndexList(rel))
+ foreach(index, RelationGetIndexList(rel))
{
- HeapTuple idxtuple;
- Form_pg_index idxForm;
-
+ HeapTuple idxtuple;
+ Form_pg_index idxForm;
+
indexOid = lfirsto(index);
idxtuple = SearchSysCacheCopy(INDEXRELID,
- ObjectIdGetDatum(indexOid),
+ ObjectIdGetDatum(indexOid),
0, 0, 0);
if (!HeapTupleIsValid(idxtuple))
elog(ERROR, "cache lookup failed for index %u", indexOid);
idxForm = (Form_pg_index) GETSTRUCT(idxtuple);
+
/*
* Unset the bit if set. We know it's wrong because we checked
* this earlier.
@@ -4100,7 +4103,7 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared relations cannot be toasted after initdb")));
+ errmsg("shared relations cannot be toasted after initdb")));
/*
* Is it already toasted?
@@ -4331,12 +4334,12 @@ needs_toast_table(Relation rel)
void
register_on_commit_action(Oid relid, OnCommitAction action)
{
- OnCommitItem *oc;
+ OnCommitItem *oc;
MemoryContext oldcxt;
/*
- * We needn't bother registering the relation unless there is an ON COMMIT
- * action we need to take.
+ * We needn't bother registering the relation unless there is an ON
+ * COMMIT action we need to take.
*/
if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS)
return;
@@ -4366,7 +4369,7 @@ remove_on_commit_action(Oid relid)
foreach(l, on_commits)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
if (oc->relid == relid)
{
@@ -4389,7 +4392,7 @@ PreCommit_on_commit_actions(void)
foreach(l, on_commits)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
/* Ignore entry if already dropped in this xact */
if (oc->deleted_in_cur_xact)
@@ -4403,23 +4406,25 @@ PreCommit_on_commit_actions(void)
break;
case ONCOMMIT_DELETE_ROWS:
heap_truncate(oc->relid);
- CommandCounterIncrement(); /* XXX needed? */
+ CommandCounterIncrement(); /* XXX needed? */
break;
case ONCOMMIT_DROP:
- {
- ObjectAddress object;
+ {
+ ObjectAddress object;
- object.classId = RelOid_pg_class;
- object.objectId = oc->relid;
- object.objectSubId = 0;
- performDeletion(&object, DROP_CASCADE);
- /*
- * Note that table deletion will call remove_on_commit_action,
- * so the entry should get marked as deleted.
- */
- Assert(oc->deleted_in_cur_xact);
- break;
- }
+ object.classId = RelOid_pg_class;
+ object.objectId = oc->relid;
+ object.objectSubId = 0;
+ performDeletion(&object, DROP_CASCADE);
+
+ /*
+ * Note that table deletion will call
+ * remove_on_commit_action, so the entry should get
+ * marked as deleted.
+ */
+ Assert(oc->deleted_in_cur_xact);
+ break;
+ }
}
}
}
@@ -4442,7 +4447,7 @@ AtEOXact_on_commit_actions(bool isCommit)
l = on_commits;
while (l != NIL)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
if (isCommit ? oc->deleted_in_cur_xact :
oc->created_in_cur_xact)
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 6e5b38804ff..d3e969c7e4f 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.153 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.154 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,17 +41,17 @@
static void InsertTrigger(TriggerDesc *trigdesc, Trigger *trigger, int indx);
static HeapTuple GetTupleForTrigger(EState *estate,
- ResultRelInfo *relinfo,
- ItemPointer tid,
- CommandId cid,
- TupleTableSlot **newSlot);
+ ResultRelInfo *relinfo,
+ ItemPointer tid,
+ CommandId cid,
+ TupleTableSlot **newSlot);
static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
FmgrInfo *finfo,
MemoryContext per_tuple_context);
static void DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event,
- bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
+ bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
+ Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
MemoryContext per_tuple_context);
@@ -97,18 +97,19 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
else if (stmt->isconstraint)
{
/*
- * If this trigger is a constraint (and a foreign key one)
- * then we really need a constrrelid. Since we don't have one,
- * we'll try to generate one from the argument information.
+ * If this trigger is a constraint (and a foreign key one) then we
+ * really need a constrrelid. Since we don't have one, we'll try
+ * to generate one from the argument information.
*
- * This is really just a workaround for a long-ago pg_dump bug
- * that omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
- * commands. We don't want to bomb out completely here if we can't
- * determine the correct relation, because that would prevent loading
- * the dump file. Instead, NOTICE here and ERROR in the trigger.
+ * This is really just a workaround for a long-ago pg_dump bug that
+ * omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
+ * commands. We don't want to bomb out completely here if we
+ * can't determine the correct relation, because that would
+ * prevent loading the dump file. Instead, NOTICE here and ERROR
+ * in the trigger.
*/
- bool needconstrrelid = false;
- void *elem = NULL;
+ bool needconstrrelid = false;
+ void *elem = NULL;
if (strncmp(strVal(llast(stmt->funcname)), "RI_FKey_check_", 14) == 0)
{
@@ -265,8 +266,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- trigname, stmt->relation->relname)));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ trigname, stmt->relation->relname)));
found++;
}
systable_endscan(tgscan);
@@ -280,7 +281,7 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
@@ -480,8 +481,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- trigname, get_rel_name(relid))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ trigname, get_rel_name(relid))));
if (!pg_class_ownercheck(relid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
@@ -576,7 +577,7 @@ RemoveTriggerById(Oid trigOid)
elog(ERROR, "cache lookup failed for relation %u", relid);
classForm = (Form_pg_class) GETSTRUCT(tuple);
- if (classForm->reltriggers == 0) /* should not happen */
+ if (classForm->reltriggers == 0) /* should not happen */
elog(ERROR, "relation \"%s\" has reltriggers = 0",
RelationGetRelationName(rel));
classForm->reltriggers--;
@@ -650,8 +651,8 @@ renametrig(Oid relid,
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- newname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ newname, RelationGetRelationName(targetrel))));
systable_endscan(tgscan);
/*
@@ -693,8 +694,8 @@ renametrig(Oid relid,
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- oldname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ oldname, RelationGetRelationName(targetrel))));
}
systable_endscan(tgscan);
@@ -762,7 +763,7 @@ RelationBuildTriggers(Relation relation)
build->tgoid = HeapTupleGetOid(htup);
build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname)));
+ NameGetDatum(&pg_trigger->tgname)));
build->tgfoid = pg_trigger->tgfoid;
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
@@ -927,8 +928,8 @@ CopyTriggerDesc(TriggerDesc *trigdesc)
trigger->tgname = pstrdup(trigger->tgname);
if (trigger->tgnargs > 0)
{
- char **newargs;
- int16 j;
+ char **newargs;
+ int16 j;
newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
for (j = 0; j < trigger->tgnargs; j++)
@@ -1101,7 +1102,7 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
return false;
return true;
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/*
* Call a trigger function.
@@ -1166,10 +1167,10 @@ ExecCallTriggerFunc(TriggerData *trigdata,
void
ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1190,10 +1191,10 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1209,7 +1210,7 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1242,8 +1243,8 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
for (i = 0; i < ntrigs; i++)
@@ -1279,10 +1280,10 @@ ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
void
ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1303,10 +1304,10 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1322,7 +1323,7 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1361,8 +1362,8 @@ ExecBRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
for (i = 0; i < ntrigs; i++)
@@ -1408,10 +1409,10 @@ ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
void
ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1432,10 +1433,10 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1451,7 +1452,7 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1498,8 +1499,8 @@ ExecBRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
for (i = 0; i < ntrigs; i++)
{
@@ -1639,19 +1640,20 @@ ltrmark:;
* ----------
*/
-typedef struct DeferredTriggersData {
- /* Internal data is held in a per-transaction memory context */
- MemoryContext deftrig_cxt;
- /* ALL DEFERRED or ALL IMMEDIATE */
- bool deftrig_all_isset;
- bool deftrig_all_isdeferred;
- /* Per trigger state */
- List *deftrig_trigstates;
- /* List of pending deferred triggers. Previous comment below */
- DeferredTriggerEvent deftrig_events;
- DeferredTriggerEvent deftrig_events_imm;
- DeferredTriggerEvent deftrig_event_tail;
-} DeferredTriggersData;
+typedef struct DeferredTriggersData
+{
+ /* Internal data is held in a per-transaction memory context */
+ MemoryContext deftrig_cxt;
+ /* ALL DEFERRED or ALL IMMEDIATE */
+ bool deftrig_all_isset;
+ bool deftrig_all_isdeferred;
+ /* Per trigger state */
+ List *deftrig_trigstates;
+ /* List of pending deferred triggers. Previous comment below */
+ DeferredTriggerEvent deftrig_events;
+ DeferredTriggerEvent deftrig_events_imm;
+ DeferredTriggerEvent deftrig_event_tail;
+} DeferredTriggersData;
/* ----------
* deftrig_events, deftrig_event_tail:
@@ -1661,8 +1663,8 @@ typedef struct DeferredTriggersData {
* Because this can grow pretty large, we don't use separate List nodes,
* but instead thread the list through the dte_next fields of the member
* nodes. Saves just a few bytes per entry, but that adds up.
- *
- * deftrig_events_imm holds the tail pointer as of the last
+ *
+ * deftrig_events_imm holds the tail pointer as of the last
* deferredTriggerInvokeEvents call; we can use this to avoid rescanning
* entries unnecessarily. It is NULL if deferredTriggerInvokeEvents
* hasn't run since the last state change.
@@ -1674,7 +1676,7 @@ typedef struct DeferredTriggersData {
typedef DeferredTriggersData *DeferredTriggers;
-static DeferredTriggers deferredTriggers;
+static DeferredTriggers deferredTriggers;
/* ----------
* deferredTriggerCheckState()
@@ -1783,7 +1785,7 @@ deferredTriggerAddEvent(DeferredTriggerEvent event)
*/
static void
DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
+ Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
MemoryContext per_tuple_context)
{
Oid tgoid = event->dte_item[itemno].dti_tgoid;
@@ -1817,7 +1819,7 @@ DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
*/
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
- (event->dte_event & TRIGGER_EVENT_ROW);
+ (event->dte_event & TRIGGER_EVENT_ROW);
LocTriggerData.tg_relation = rel;
LocTriggerData.tg_trigger = NULL;
@@ -1899,12 +1901,12 @@ deferredTriggerInvokeEvents(bool immediate_only)
* are going to discard the whole event queue on return anyway, so no
* need to bother with "retail" pfree's.
*
- * If immediate_only is true, we need only scan from where the end of
- * the queue was at the previous deferredTriggerInvokeEvents call;
- * any non-deferred events before that point are already fired.
- * (But if the deferral state changes, we must reset the saved position
- * to the beginning of the queue, so as to process all events once with
- * the new states. See DeferredTriggerSetState.)
+ * If immediate_only is true, we need only scan from where the end of the
+ * queue was at the previous deferredTriggerInvokeEvents call; any
+ * non-deferred events before that point are already fired. (But if
+ * the deferral state changes, we must reset the saved position to the
+ * beginning of the queue, so as to process all events once with the
+ * new states. See DeferredTriggerSetState.)
*/
/* Make a per-tuple memory context for trigger function calls */
@@ -1916,9 +1918,9 @@ deferredTriggerInvokeEvents(bool immediate_only)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * If immediate_only is true, then the only events that could need firing
- * are those since deftrig_events_imm. (But if deftrig_events_imm is
- * NULL, we must scan the entire list.)
+ * If immediate_only is true, then the only events that could need
+ * firing are those since deftrig_events_imm. (But if
+ * deftrig_events_imm is NULL, we must scan the entire list.)
*/
if (immediate_only && deferredTriggers->deftrig_events_imm != NULL)
{
@@ -1984,17 +1986,18 @@ deferredTriggerInvokeEvents(bool immediate_only)
rel = heap_open(event->dte_relid, NoLock);
/*
- * Copy relation's trigger info so that we have a stable
- * copy no matter what the called triggers do.
+ * Copy relation's trigger info so that we have a
+ * stable copy no matter what the called triggers do.
*/
trigdesc = CopyTriggerDesc(rel->trigdesc);
- if (trigdesc == NULL) /* should not happen */
+ if (trigdesc == NULL) /* should not happen */
elog(ERROR, "relation %u has no triggers",
event->dte_relid);
/*
- * Allocate space to cache fmgr lookup info for triggers.
+ * Allocate space to cache fmgr lookup info for
+ * triggers.
*/
finfo = (FmgrInfo *)
palloc0(trigdesc->numtriggers * sizeof(FmgrInfo));
@@ -2089,21 +2092,23 @@ void
DeferredTriggerBeginXact(void)
{
/*
- * This will be changed to a special context when
- * the nested transactions project moves forward.
+ * This will be changed to a special context when the nested
+ * transactions project moves forward.
*/
MemoryContext cxt = TopTransactionContext;
+
deferredTriggers = (DeferredTriggers) MemoryContextAlloc(TopTransactionContext,
- sizeof(DeferredTriggersData));
+ sizeof(DeferredTriggersData));
/*
* Create the per transaction memory context
*/
deferredTriggers->deftrig_cxt = AllocSetContextCreate(cxt,
- "DeferredTriggerXact",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "DeferredTriggerXact",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
+
/*
* If unspecified, constraints default to IMMEDIATE, per SQL
*/
@@ -2174,7 +2179,7 @@ DeferredTriggerAbortXact(void)
* Ignore call if we aren't in a transaction.
*/
if (deferredTriggers == NULL)
- return;
+ return;
/*
* Forget everything we know about deferred triggers.
@@ -2255,7 +2260,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
if (strlen(cname) == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("unnamed constraints cannot be set explicitly")));
+ errmsg("unnamed constraints cannot be set explicitly")));
/*
* Setup to scan pg_trigger by tgconstrname ...
@@ -2304,7 +2309,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
if (!found)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" does not exist", cname)));
+ errmsg("constraint \"%s\" does not exist", cname)));
}
heap_close(tgrel, AccessShareLock);
@@ -2349,9 +2354,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
* CONSTRAINTS command applies retroactively. This happens "for free"
* since we have already made the necessary modifications to the
* constraints, and deferredTriggerEndQuery() is called by
- * finish_xact_command(). But we must reset deferredTriggerInvokeEvents'
- * tail pointer to make it rescan the entire list, in case some deferred
- * events are now immediately invokable.
+ * finish_xact_command(). But we must reset
+ * deferredTriggerInvokeEvents' tail pointer to make it rescan the
+ * entire list, in case some deferred events are now immediately
+ * invokable.
*/
deferredTriggers->deftrig_events_imm = NULL;
}
@@ -2416,7 +2422,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
*/
for (i = 0; i < ntriggers; i++)
{
- Trigger *trigger = &trigdesc->triggers[tgindx[i]];
+ Trigger *trigger = &trigdesc->triggers[tgindx[i]];
if (trigger->tgenabled)
n_enabled_triggers++;
@@ -2455,7 +2461,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
ev_item = &(new_event->dte_item[i]);
ev_item->dti_tgoid = trigger->tgoid;
- ev_item->dti_state =
+ ev_item->dti_state =
((trigger->tgdeferrable) ?
TRIGGER_DEFERRED_DEFERRABLE : 0) |
((trigger->tginitdeferred) ?
@@ -2464,9 +2470,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
if (row_trigger && (trigdesc->n_before_row[event] > 0))
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
else if (!row_trigger && (trigdesc->n_before_statement[event] > 0))
- {
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
- }
}
MemoryContextSwitchTo(oldcxt);
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 275143c1517..57bc7c5f71f 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.40 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.41 2003/08/04 00:43:17 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -66,11 +66,11 @@
/* result structure for get_rels_with_domain() */
typedef struct
{
- Relation rel; /* opened and locked relation */
- int natts; /* number of attributes of interest */
- int *atts; /* attribute numbers */
+ Relation rel; /* opened and locked relation */
+ int natts; /* number of attributes of interest */
+ int *atts; /* attribute numbers */
/* atts[] is of allocated length RelationGetNumberOfAttributes(rel) */
-} RelToCheck;
+} RelToCheck;
static Oid findTypeInputFunction(List *procname, Oid typeOid);
@@ -80,9 +80,9 @@ static Oid findTypeSendFunction(List *procname, Oid typeOid);
static List *get_rels_with_domain(Oid domainOid, LOCKMODE lockmode);
static void domainOwnerCheck(HeapTuple tup, TypeName *typename);
static char *domainAddConstraint(Oid domainOid, Oid domainNamespace,
- Oid baseTypeOid,
- int typMod, Constraint *constr,
- int *counter, char *domainName);
+ Oid baseTypeOid,
+ int typMod, Constraint *constr,
+ int *counter, char *domainName);
/*
@@ -105,7 +105,7 @@ DefineType(List *names, List *parameters)
bool byValue = false;
char delimiter = DEFAULT_TYPDELIM;
char alignment = 'i'; /* default alignment */
- char storage = 'p'; /* default TOAST storage method */
+ char storage = 'p'; /* default TOAST storage method */
Oid inputOid;
Oid outputOid;
Oid receiveOid = InvalidOid;
@@ -237,8 +237,8 @@ DefineType(List *names, List *parameters)
/*
* Look to see if type already exists (presumably as a shell; if not,
- * TypeCreate will complain). If it doesn't, create it as a shell,
- * so that the OID is known for use in the I/O function definitions.
+ * TypeCreate will complain). If it doesn't, create it as a shell, so
+ * that the OID is known for use in the I/O function definitions.
*/
typoid = GetSysCacheOid(TYPENAMENSP,
CStringGetDatum(typeName),
@@ -492,7 +492,7 @@ DefineDomain(CreateDomainStmt *stmt)
List *listptr;
Oid basetypeoid;
Oid domainoid;
- Form_pg_type baseType;
+ Form_pg_type baseType;
int counter = 0;
/* Convert list of names to a name and namespace */
@@ -508,10 +508,11 @@ DefineDomain(CreateDomainStmt *stmt)
/*
* Domainnames, unlike typenames don't need to account for the '_'
- * prefix. So they can be one character longer. (This test is presently
- * useless since the parser will have truncated the name to fit. But
- * leave it here since we may someday support arrays of domains, in
- * which case we'll be back to needing to enforce NAMEDATALEN-2.)
+ * prefix. So they can be one character longer. (This test is
+ * presently useless since the parser will have truncated the name to
+ * fit. But leave it here since we may someday support arrays of
+ * domains, in which case we'll be back to needing to enforce
+ * NAMEDATALEN-2.)
*/
if (strlen(domainName) > (NAMEDATALEN - 1))
ereport(ERROR,
@@ -581,8 +582,8 @@ DefineDomain(CreateDomainStmt *stmt)
basetypelem = baseType->typelem;
/*
- * Run through constraints manually to avoid the additional
- * processing conducted by DefineRelation() and friends.
+ * Run through constraints manually to avoid the additional processing
+ * conducted by DefineRelation() and friends.
*/
foreach(listptr, schema)
{
@@ -594,7 +595,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("FOREIGN KEY constraints not supported for domains")));
+ errmsg("FOREIGN KEY constraints not supported for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -606,6 +607,7 @@ DefineDomain(CreateDomainStmt *stmt)
switch (constr->contype)
{
case CONSTR_DEFAULT:
+
/*
* The inherited default value may be overridden by the
* user with the DEFAULT <expr> statement.
@@ -643,7 +645,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && !typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = true;
nullDefined = true;
break;
@@ -652,41 +654,42 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = false;
nullDefined = true;
- break;
+ break;
+
+ case CONSTR_CHECK:
- case CONSTR_CHECK:
/*
- * Check constraints are handled after domain creation, as they
- * require the Oid of the domain
+ * Check constraints are handled after domain creation, as
+ * they require the Oid of the domain
*/
- break;
+ break;
/*
* All else are error cases
*/
- case CONSTR_UNIQUE:
- ereport(ERROR,
+ case CONSTR_UNIQUE:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("UNIQUE constraints not supported for domains")));
- break;
+ errmsg("UNIQUE constraints not supported for domains")));
+ break;
- case CONSTR_PRIMARY:
- ereport(ERROR,
+ case CONSTR_PRIMARY:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("PRIMARY KEY constraints not supported for domains")));
- break;
+ break;
- case CONSTR_ATTR_DEFERRABLE:
- case CONSTR_ATTR_NOT_DEFERRABLE:
- case CONSTR_ATTR_DEFERRED:
- case CONSTR_ATTR_IMMEDIATE:
- ereport(ERROR,
+ case CONSTR_ATTR_DEFERRABLE:
+ case CONSTR_ATTR_NOT_DEFERRABLE:
+ case CONSTR_ATTR_DEFERRED:
+ case CONSTR_ATTR_IMMEDIATE:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("deferrability constraints not supported for domains")));
- break;
+ break;
default:
elog(ERROR, "unrecognized constraint subtype: %d",
@@ -715,15 +718,16 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeoid, /* base type ID */
defaultValue, /* default type value (text) */
defaultValueBin, /* default type value (binary) */
- byValue, /* passed by value */
- alignment, /* required alignment */
- storage, /* TOAST strategy */
- stmt->typename->typmod, /* typeMod value */
- typNDims, /* Array dimensions for base type */
- typNotNull); /* Type NOT NULL */
+ byValue, /* passed by value */
+ alignment, /* required alignment */
+ storage, /* TOAST strategy */
+ stmt->typename->typmod, /* typeMod value */
+ typNDims, /* Array dimensions for base type */
+ typNotNull); /* Type NOT NULL */
/*
- * Process constraints which refer to the domain ID returned by TypeCreate
+ * Process constraints which refer to the domain ID returned by
+ * TypeCreate
*/
foreach(listptr, schema)
{
@@ -733,16 +737,16 @@ DefineDomain(CreateDomainStmt *stmt)
switch (constr->contype)
{
- case CONSTR_CHECK:
+ case CONSTR_CHECK:
domainAddConstraint(domainoid, domainNamespace,
basetypeoid, stmt->typename->typmod,
constr, &counter, domainName);
- break;
+ break;
- /* Other constraint types were fully processed above */
+ /* Other constraint types were fully processed above */
default:
- break;
+ break;
}
}
@@ -834,8 +838,8 @@ findTypeInputFunction(List *procname, Oid typeOid)
* Input functions can take a single argument of type CSTRING, or
* three arguments (string, element OID, typmod).
*
- * For backwards compatibility we allow OPAQUE in place of CSTRING;
- * if we see this, we issue a NOTICE and fix up the pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of CSTRING; if we
+ * see this, we issue a NOTICE and fix up the pg_proc entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -874,9 +878,10 @@ findTypeInputFunction(List *procname, Oid typeOid)
(errmsg("changing argument type of function %s from OPAQUE to CSTRING",
NameListToString(procname))));
SetFunctionArgType(procOid, 0, CSTRINGOID);
+
/*
- * Need CommandCounterIncrement since DefineType will likely
- * try to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try
+ * to alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -905,8 +910,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
* arguments (data value, element OID).
*
* For backwards compatibility we allow OPAQUE in place of the actual
- * type name; if we see this, we issue a NOTICE and fix up the
- * pg_proc entry.
+ * type name; if we see this, we issue a NOTICE and fix up the pg_proc
+ * entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -940,12 +945,13 @@ findTypeOutputFunction(List *procname, Oid typeOid)
{
/* Found, but must complain and fix the pg_proc entry */
ereport(NOTICE,
- (errmsg("changing argument type of function %s from OPAQUE to %s",
- NameListToString(procname), format_type_be(typeOid))));
+ (errmsg("changing argument type of function %s from OPAQUE to %s",
+ NameListToString(procname), format_type_be(typeOid))));
SetFunctionArgType(procOid, 0, typeOid);
+
/*
- * Need CommandCounterIncrement since DefineType will likely
- * try to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try
+ * to alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -1050,7 +1056,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
if (coldeflist == NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("composite type must have at least one attribute")));
+ errmsg("composite type must have at least one attribute")));
/*
* now create the parameters for keys/inheritance etc. All of them are
@@ -1072,7 +1078,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
/*
* AlterDomainDefault
*
- * Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
+ * Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
*/
void
AlterDomainDefault(List *names, Node *defaultRaw)
@@ -1083,12 +1089,12 @@ AlterDomainDefault(List *names, Node *defaultRaw)
ParseState *pstate;
Relation rel;
char *defaultValue;
- Node *defaultExpr = NULL; /* NULL if no default specified */
+ Node *defaultExpr = NULL; /* NULL if no default specified */
Datum new_record[Natts_pg_type];
char new_record_nulls[Natts_pg_type];
char new_record_repl[Natts_pg_type];
HeapTuple newtuple;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
@@ -1113,7 +1119,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for type %u", domainoid);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Setup new tuple */
@@ -1129,9 +1135,10 @@ AlterDomainDefault(List *names, Node *defaultRaw)
{
/* Create a dummy ParseState for transformExpr */
pstate = make_parsestate(NULL);
+
/*
- * Cook the colDef->raw_expr into an expression. Note:
- * Name is strictly for error message
+ * Cook the colDef->raw_expr into an expression. Note: Name is
+ * strictly for error message
*/
defaultExpr = cookDefault(pstate, defaultRaw,
typTup->typbasetype,
@@ -1139,27 +1146,29 @@ AlterDomainDefault(List *names, Node *defaultRaw)
NameStr(typTup->typname));
/*
- * Expression must be stored as a nodeToString result, but
- * we also require a valid textual representation (mainly
- * to make life easier for pg_dump).
+ * Expression must be stored as a nodeToString result, but we also
+ * require a valid textual representation (mainly to make life
+ * easier for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
- deparse_context_for(NameStr(typTup->typname),
- InvalidOid),
+ deparse_context_for(NameStr(typTup->typname),
+ InvalidOid),
false, false);
+
/*
* Form an updated tuple with the new default and write it back.
*/
new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(
- nodeToString(defaultExpr)));
+ CStringGetDatum(
+ nodeToString(defaultExpr)));
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(defaultValue));
+ CStringGetDatum(defaultValue));
new_record_repl[Anum_pg_type_typdefault - 1] = 'r';
}
- else /* Default is NULL, drop it */
+ else
+/* Default is NULL, drop it */
{
new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n';
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
@@ -1168,7 +1177,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
}
newtuple = heap_modifytuple(tup, rel,
- new_record, new_record_nulls, new_record_repl);
+ new_record, new_record_nulls, new_record_repl);
simple_heap_update(rel, &tup->t_self, newtuple);
@@ -1178,7 +1187,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
GenerateTypeDependencies(typTup->typnamespace,
domainoid,
typTup->typrelid,
- 0, /* relation kind is n/a */
+ 0, /* relation kind is n/a */
typTup->typinput,
typTup->typoutput,
typTup->typreceive,
@@ -1186,7 +1195,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
typTup->typelem,
typTup->typbasetype,
defaultExpr,
- true); /* Rebuild is true */
+ true); /* Rebuild is true */
/* Clean up */
heap_close(rel, NoLock);
@@ -1196,7 +1205,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
/*
* AlterDomainNotNull
*
- * Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
+ * Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
*/
void
AlterDomainNotNull(List *names, bool notNull)
@@ -1205,7 +1214,7 @@ AlterDomainNotNull(List *names, bool notNull)
Oid domainoid;
Relation typrel;
HeapTuple tup;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
@@ -1231,7 +1240,7 @@ AlterDomainNotNull(List *names, bool notNull)
elog(ERROR, "cache lookup failed for type %u", domainoid);
typTup = (Form_pg_type) GETSTRUCT(tup);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Is the domain already set to the desired constraint? */
@@ -1248,15 +1257,15 @@ AlterDomainNotNull(List *names, bool notNull)
/* Adding a NOT NULL constraint requires checking existing columns */
if (notNull)
{
- List *rels;
- List *rt;
+ List *rels;
+ List *rt;
/* Fetch relation list with attributes based on this domain */
/* ShareLock is sufficient to prevent concurrent data changes */
rels = get_rels_with_domain(domainoid, ShareLock);
- foreach (rt, rels)
+ foreach(rt, rels)
{
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
Relation testrel = rtc->rel;
@@ -1268,14 +1277,14 @@ AlterDomainNotNull(List *names, bool notNull)
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- int i;
+ int i;
/* Test attributes that are of the domain */
for (i = 0; i < rtc->natts; i++)
{
- int attnum = rtc->atts[i];
- Datum d;
- bool isNull;
+ int attnum = rtc->atts[i];
+ Datum d;
+ bool isNull;
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
@@ -1284,7 +1293,7 @@ AlterDomainNotNull(List *names, bool notNull)
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("relation \"%s\" attribute \"%s\" contains NULL values",
RelationGetRelationName(testrel),
- NameStr(tupdesc->attrs[attnum - 1]->attname))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname))));
}
}
heap_endscan(scan);
@@ -1295,7 +1304,7 @@ AlterDomainNotNull(List *names, bool notNull)
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's
+ * Okay to update pg_type row. We can scribble on typTup because it's
* a copy.
*/
typTup->typnotnull = notNull;
@@ -1321,7 +1330,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
Oid domainoid;
HeapTuple tup;
Relation rel;
- Form_pg_type typTup;
+ Form_pg_type typTup;
Relation conrel;
SysScanDesc conscan;
ScanKeyData key[1];
@@ -1350,7 +1359,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for type %u", domainoid);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Grab an appropriate lock on the pg_constraint relation */
@@ -1403,15 +1412,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
Oid domainoid;
Relation typrel;
HeapTuple tup;
- Form_pg_type typTup;
- List *rels;
- List *rt;
- EState *estate;
+ Form_pg_type typTup;
+ List *rels;
+ List *rt;
+ EState *estate;
ExprContext *econtext;
- char *ccbin;
- Expr *expr;
- ExprState *exprstate;
- int counter = 0;
+ char *ccbin;
+ Expr *expr;
+ ExprState *exprstate;
+ int counter = 0;
Constraint *constr;
/* Make a TypeName so we can use standard type lookup machinery */
@@ -1438,14 +1447,14 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
elog(ERROR, "cache lookup failed for type %u", domainoid);
typTup = (Form_pg_type) GETSTRUCT(tup);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Check for unsupported constraint types */
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("FOREIGN KEY constraints not supported for domains")));
+ errmsg("FOREIGN KEY constraints not supported for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -1469,20 +1478,20 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
errmsg("use ALTER DOMAIN .. [ SET | DROP ] NOT NULL instead")));
break;
- case CONSTR_CHECK:
+ case CONSTR_CHECK:
/* processed below */
- break;
+ break;
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("UNIQUE constraints not supported for domains")));
+ errmsg("UNIQUE constraints not supported for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("PRIMARY KEY constraints not supported for domains")));
+ errmsg("PRIMARY KEY constraints not supported for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
@@ -1501,18 +1510,18 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
}
/*
- * Since all other constraint types throw errors, this must be
- * a check constraint. First, process the constraint expression
- * and add an entry to pg_constraint.
+ * Since all other constraint types throw errors, this must be a check
+ * constraint. First, process the constraint expression and add an
+ * entry to pg_constraint.
*/
ccbin = domainAddConstraint(HeapTupleGetOid(tup), typTup->typnamespace,
typTup->typbasetype, typTup->typtypmod,
- constr, &counter, NameStr(typTup->typname));
+ constr, &counter, NameStr(typTup->typname));
/*
- * Test all values stored in the attributes based on the domain
- * the constraint is being added to.
+ * Test all values stored in the attributes based on the domain the
+ * constraint is being added to.
*/
expr = (Expr *) stringToNode(ccbin);
@@ -1528,7 +1537,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
rels = get_rels_with_domain(domainoid, ShareLock);
- foreach (rt, rels)
+ foreach(rt, rels)
{
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
Relation testrel = rtc->rel;
@@ -1540,15 +1549,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- int i;
+ int i;
/* Test attributes that are of the domain */
for (i = 0; i < rtc->natts; i++)
{
- int attnum = rtc->atts[i];
- Datum d;
- bool isNull;
- Datum conResult;
+ int attnum = rtc->atts[i];
+ Datum d;
+ bool isNull;
+ Datum conResult;
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
@@ -1564,7 +1573,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("relation \"%s\" attribute \"%s\" contains values that violate the new constraint",
RelationGetRelationName(testrel),
- NameStr(tupdesc->attrs[attnum - 1]->attname))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname))));
}
ResetExprContext(econtext);
@@ -1610,7 +1619,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
static List *
get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
{
- List *result = NIL;
+ List *result = NIL;
Relation depRel;
ScanKeyData key[2];
SysScanDesc depScan;
@@ -1634,10 +1643,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
while (HeapTupleIsValid(depTup = systable_getnext(depScan)))
{
- Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
+ Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
RelToCheck *rtc = NULL;
List *rellist;
- Form_pg_attribute pg_att;
+ Form_pg_attribute pg_att;
int ptr;
/* Ignore dependees that aren't user columns of tables */
@@ -1675,10 +1684,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
}
/*
- * Confirm column has not been dropped, and is of the expected type.
- * This defends against an ALTER DROP COLUMN occuring just before
- * we acquired lock ... but if the whole table were dropped, we'd
- * still have a problem.
+ * Confirm column has not been dropped, and is of the expected
+ * type. This defends against an ALTER DROP COLUMN occuring just
+ * before we acquired lock ... but if the whole table were
+ * dropped, we'd still have a problem.
*/
if (pg_depend->objsubid > RelationGetNumberOfAttributes(rtc->rel))
continue;
@@ -1687,16 +1696,16 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
continue;
/*
- * Okay, add column to result. We store the columns in column-number
- * order; this is just a hack to improve predictability of regression
- * test output ...
+ * Okay, add column to result. We store the columns in
+ * column-number order; this is just a hack to improve
+ * predictability of regression test output ...
*/
Assert(rtc->natts < RelationGetNumberOfAttributes(rtc->rel));
ptr = rtc->natts++;
- while (ptr > 0 && rtc->atts[ptr-1] > pg_depend->objsubid)
+ while (ptr > 0 && rtc->atts[ptr - 1] > pg_depend->objsubid)
{
- rtc->atts[ptr] = rtc->atts[ptr-1];
+ rtc->atts[ptr] = rtc->atts[ptr - 1];
ptr--;
}
rtc->atts[ptr] = pg_depend->objsubid;
@@ -1719,7 +1728,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
static void
domainOwnerCheck(HeapTuple tup, TypeName *typename)
{
- Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
+ Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
/* Check that this is actually a domain */
if (typTup->typtype != 'd')
@@ -1746,7 +1755,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
char *ccsrc;
char *ccbin;
ParseState *pstate;
- CoerceToDomainValue *domVal;
+ CoerceToDomainValue *domVal;
/*
* Assign or validate constraint name
@@ -1759,8 +1768,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("constraint \"%s\" for domain \"%s\" already exists",
- constr->name, domainName)));
+ errmsg("constraint \"%s\" for domain \"%s\" already exists",
+ constr->name, domainName)));
}
else
constr->name = GenerateConstraintName(CONSTRAINT_DOMAIN,
@@ -1775,10 +1784,10 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Set up a CoerceToDomainValue to represent the occurrence of VALUE
- * in the expression. Note that it will appear to have the type of the
- * base type, not the domain. This seems correct since within the
- * check expression, we should not assume the input value can be considered
- * a member of the domain.
+ * in the expression. Note that it will appear to have the type of
+ * the base type, not the domain. This seems correct since within the
+ * check expression, we should not assume the input value can be
+ * considered a member of the domain.
*/
domVal = makeNode(CoerceToDomainValue);
domVal->typeId = baseTypeOid;
@@ -1841,13 +1850,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Store the constraint in pg_constraint
*/
- CreateConstraintEntry(constr->name, /* Constraint Name */
- domainNamespace, /* namespace */
+ CreateConstraintEntry(constr->name, /* Constraint Name */
+ domainNamespace, /* namespace */
CONSTRAINT_CHECK, /* Constraint Type */
false, /* Is Deferrable */
false, /* Is Deferred */
- InvalidOid, /* not a relation constraint */
- NULL,
+ InvalidOid, /* not a relation constraint */
+ NULL,
0,
domainOid, /* domain constraint */
InvalidOid, /* Foreign key fields */
@@ -1857,13 +1866,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
' ',
' ',
InvalidOid,
- expr, /* Tree form check constraint */
+ expr, /* Tree form check constraint */
ccbin, /* Binary form check constraint */
- ccsrc); /* Source form check constraint */
+ ccsrc); /* Source form check constraint */
/*
- * Return the compiled constraint expression so the calling routine can
- * perform any additional required tests.
+ * Return the compiled constraint expression so the calling routine
+ * can perform any additional required tests.
*/
return ccbin;
}
@@ -1893,7 +1902,7 @@ GetDomainConstraints(Oid typeOid)
Form_pg_type typTup;
ScanKeyData key[1];
SysScanDesc scan;
-
+
tup = SearchSysCache(TYPEOID,
ObjectIdGetDatum(typeOid),
0, 0, 0);
@@ -1915,17 +1924,20 @@ GetDomainConstraints(Oid typeOid)
while (HeapTupleIsValid(conTup = systable_getnext(scan)))
{
- Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
- Datum val;
- bool isNull;
- Expr *check_expr;
+ Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
+ Datum val;
+ bool isNull;
+ Expr *check_expr;
DomainConstraintState *r;
/* Ignore non-CHECK constraints (presently, shouldn't be any) */
if (c->contype != CONSTRAINT_CHECK)
continue;
- /* Not expecting conbin to be NULL, but we'll test for it anyway */
+ /*
+ * Not expecting conbin to be NULL, but we'll test for it
+ * anyway
+ */
val = fastgetattr(conTup, Anum_pg_constraint_conbin,
conRel->rd_att, &isNull);
if (isNull)
@@ -1945,8 +1957,8 @@ GetDomainConstraints(Oid typeOid)
r->check_expr = ExecInitExpr(check_expr, NULL);
/*
- * use lcons() here because constraints of lower domains should
- * be applied earlier.
+ * use lcons() here because constraints of lower domains
+ * should be applied earlier.
*/
result = lcons(r, result);
}
@@ -2003,7 +2015,7 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
Oid typeOid;
Relation rel;
HeapTuple tup;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 36416a5232f..117eef1e750 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.122 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.123 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -146,12 +146,12 @@ write_group_file(Relation grel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
/*
- * Read pg_group and write the file. Note we use SnapshotSelf to ensure
- * we see all effects of current transaction. (Perhaps could do a
- * CommandCounterIncrement beforehand, instead?)
+ * Read pg_group and write the file. Note we use SnapshotSelf to
+ * ensure we see all effects of current transaction. (Perhaps could
+ * do a CommandCounterIncrement beforehand, instead?)
*/
scan = heap_beginscan(grel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -212,7 +212,7 @@ write_group_file(Relation grel)
if (usename[j] != '\0')
{
ereport(LOG,
- (errmsg("invalid user name \"%s\"", usename)));
+ (errmsg("invalid user name \"%s\"", usename)));
continue;
}
@@ -245,7 +245,7 @@ write_group_file(Relation grel)
if (ferror(fp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
FreeFile(fp);
/*
@@ -294,12 +294,12 @@ write_user_file(Relation urel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
/*
- * Read pg_shadow and write the file. Note we use SnapshotSelf to ensure
- * we see all effects of current transaction. (Perhaps could do a
- * CommandCounterIncrement beforehand, instead?)
+ * Read pg_shadow and write the file. Note we use SnapshotSelf to
+ * ensure we see all effects of current transaction. (Perhaps could
+ * do a CommandCounterIncrement beforehand, instead?)
*/
scan = heap_beginscan(urel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -376,7 +376,7 @@ write_user_file(Relation urel)
if (ferror(fp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
FreeFile(fp);
/*
@@ -430,10 +430,10 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
Relation urel = NULL;
Relation grel = NULL;
- if (! (user_file_update_needed || group_file_update_needed))
+ if (!(user_file_update_needed || group_file_update_needed))
return;
- if (! isCommit)
+ if (!isCommit)
{
user_file_update_needed = false;
group_file_update_needed = false;
@@ -441,12 +441,12 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
}
/*
- * We use ExclusiveLock to ensure that only one backend writes the flat
- * file(s) at a time. That's sufficient because it's okay to allow plain
- * reads of the tables in parallel. There is some chance of a deadlock
- * here (if we were triggered by a user update of pg_shadow or pg_group,
- * which likely won't have gotten a strong enough lock), so get the locks
- * we need before writing anything.
+ * We use ExclusiveLock to ensure that only one backend writes the
+ * flat file(s) at a time. That's sufficient because it's okay to
+ * allow plain reads of the tables in parallel. There is some chance
+ * of a deadlock here (if we were triggered by a user update of
+ * pg_shadow or pg_group, which likely won't have gotten a strong
+ * enough lock), so get the locks we need before writing anything.
*/
if (user_file_update_needed)
urel = heap_openr(ShadowRelationName, ExclusiveLock);
@@ -1088,7 +1088,7 @@ DropUser(DropUserStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("user \"%s\" cannot be dropped", user),
- errdetail("The user owns database \"%s\".", dbname)));
+ errdetail("The user owns database \"%s\".", dbname)));
}
heap_endscan(scan);
@@ -1172,10 +1172,10 @@ RenameUser(const char *oldname, const char *newname)
errmsg("user \"%s\" does not exist", oldname)));
/*
- * XXX Client applications probably store the session user
- * somewhere, so renaming it could cause confusion. On the other
- * hand, there may not be an actual problem besides a little
- * confusion, so think about this and decide.
+ * XXX Client applications probably store the session user somewhere,
+ * so renaming it could cause confusion. On the other hand, there may
+ * not be an actual problem besides a little confusion, so think about
+ * this and decide.
*/
if (((Form_pg_shadow) GETSTRUCT(tup))->usesysid == GetSessionUserId())
ereport(ERROR,
@@ -1221,14 +1221,14 @@ CheckPgUserAclNotNull(void)
htup = SearchSysCache(RELOID,
ObjectIdGetDatum(RelOid_pg_shadow),
0, 0, 0);
- if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
+ if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
elog(ERROR, "cache lookup failed for relation %u", RelOid_pg_shadow);
if (heap_attisnull(htup, Anum_pg_class_relacl))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("before using passwords you must revoke permissions on %s",
- ShadowRelationName),
+ errmsg("before using passwords you must revoke permissions on %s",
+ ShadowRelationName),
errdetail("This restriction is to prevent unprivileged users from reading the passwords."),
errhint("Try 'REVOKE ALL ON \"%s\" FROM PUBLIC'.",
ShadowRelationName)));
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index e73ace27c27..9dc0d9a8996 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.257 2003/07/20 21:56:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.258 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -287,24 +287,25 @@ vacuum(VacuumStmt *vacstmt)
if (vacstmt->vacuum)
{
- if (! vacuum_rel(relid, vacstmt, RELKIND_RELATION))
- all_rels = false; /* forget about updating dbstats */
+ if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
+ all_rels = false; /* forget about updating dbstats */
}
if (vacstmt->analyze)
{
MemoryContext old_context = NULL;
/*
- * If we vacuumed, use new transaction for analyze.
- * Otherwise, we can use the outer transaction, but we still
- * need to call analyze_rel in a memory context that will be
- * cleaned up on return (else we leak memory while processing
- * multiple tables).
+ * If we vacuumed, use new transaction for analyze. Otherwise,
+ * we can use the outer transaction, but we still need to call
+ * analyze_rel in a memory context that will be cleaned up on
+ * return (else we leak memory while processing multiple
+ * tables).
*/
if (vacstmt->vacuum)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions
+ * in indexes */
}
else
old_context = MemoryContextSwitchTo(anl_context);
@@ -734,7 +735,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/* Begin a transaction for vacuuming this relation */
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
/*
* Check for user-requested abort. Note we want this to be inside a
@@ -812,7 +814,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
{
relation_close(onerel, lmode);
CommitTransactionCommand();
- return true; /* assume no long-lived data in temp tables */
+ return true; /* assume no long-lived data in temp
+ * tables */
}
/*
@@ -860,7 +863,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
*/
if (toast_relid != InvalidOid)
{
- if (! vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
+ if (!vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
result = false; /* failed to vacuum the TOAST table? */
}
@@ -1087,8 +1090,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (PageIsNew(page))
{
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
@@ -1314,7 +1317,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Include the page in empty_end_pages if it will be empty after
- * vacuuming; this is to keep us from using it as a move destination.
+ * vacuuming; this is to keep us from using it as a move
+ * destination.
*/
if (notup)
{
@@ -1382,9 +1386,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead tuples cannot be removed yet.\n"
- "Nonremovable tuples range from %lu to %lu bytes long.\n"
+ "Nonremovable tuples range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n"
- "Total free space (including removable tuples) is %.0f bytes.\n"
+ "Total free space (including removable tuples) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n"
"%u pages containing %.0f free bytes are potential move destinations.\n"
"%s",
@@ -2380,8 +2384,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* It'd be cleaner to make this report at the bottom of this routine,
* but then the rusage would double-count the second pass of index
- * vacuuming. So do it here and ignore the relatively small amount
- * of processing that occurs below.
+ * vacuuming. So do it here and ignore the relatively small amount of
+ * processing that occurs below.
*/
ereport(elevel,
(errmsg("\"%s\": moved %u tuples, truncated %u to %u pages",
@@ -2735,7 +2739,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
stats->num_index_tuples,
stats->num_pages),
errdetail("%.0f index tuples were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
"%s",
stats->tuples_removed,
stats->pages_deleted, stats->pages_free,
@@ -2752,7 +2756,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
ereport(WARNING,
(errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples + keep_tuples),
+ stats->num_index_tuples, num_tuples + keep_tuples),
errhint("Rebuild the index with REINDEX.")));
}
@@ -2837,13 +2841,14 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
/*
* We only report pages with free space at least equal to the average
- * request size --- this avoids cluttering FSM with uselessly-small bits
- * of space. Although FSM would discard pages with little free space
- * anyway, it's important to do this prefiltering because (a) it reduces
- * the time spent holding the FSM lock in RecordRelationFreeSpace, and
- * (b) FSM uses the number of pages reported as a statistic for guiding
- * space management. If we didn't threshold our reports the same way
- * vacuumlazy.c does, we'd be skewing that statistic.
+ * request size --- this avoids cluttering FSM with uselessly-small
+ * bits of space. Although FSM would discard pages with little free
+ * space anyway, it's important to do this prefiltering because (a) it
+ * reduces the time spent holding the FSM lock in
+ * RecordRelationFreeSpace, and (b) FSM uses the number of pages
+ * reported as a statistic for guiding space management. If we didn't
+ * threshold our reports the same way vacuumlazy.c does, we'd be
+ * skewing that statistic.
*/
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index f0be98a23ed..65af960be8e 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.29 2003/07/20 21:56:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.30 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,7 +79,7 @@ typedef struct LVRelStats
bool fs_is_heap; /* are we using heap organization? */
int num_free_pages; /* current # of entries */
int max_free_pages; /* # slots allocated in array */
- PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
+ PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
} LVRelStats;
@@ -162,7 +162,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
*/
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
- possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
+ possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
lazy_truncate_heap(onerel, vacrelstats);
/* Update shared free space map with final free space info */
@@ -659,7 +659,7 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats)
stats->num_index_tuples,
stats->num_pages),
errdetail("%.0f index tuples were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
"%s",
stats->tuples_removed,
stats->pages_deleted, stats->pages_free,
@@ -966,16 +966,18 @@ lazy_record_free_space(LVRelStats *vacrelstats,
/*
* A page with less than stats->threshold free space will be forgotten
* immediately, and never passed to the free space map. Removing the
- * uselessly small entries early saves cycles, and in particular reduces
- * the amount of time we spend holding the FSM lock when we finally call
- * RecordRelationFreeSpace. Since the FSM will probably drop pages with
- * little free space anyway, there's no point in making this really small.
+ * uselessly small entries early saves cycles, and in particular
+ * reduces the amount of time we spend holding the FSM lock when we
+ * finally call RecordRelationFreeSpace. Since the FSM will probably
+ * drop pages with little free space anyway, there's no point in
+ * making this really small.
*
- * XXX Is it worth trying to measure average tuple size, and using that to
- * adjust the threshold? Would be worthwhile if FSM has no stats yet
- * for this relation. But changing the threshold as we scan the rel
- * might lead to bizarre behavior, too. Also, it's probably better if
- * vacuum.c has the same thresholding behavior as we do here.
+ * XXX Is it worth trying to measure average tuple size, and using that
+ * to adjust the threshold? Would be worthwhile if FSM has no stats
+ * yet for this relation. But changing the threshold as we scan the
+ * rel might lead to bizarre behavior, too. Also, it's probably
+ * better if vacuum.c has the same thresholding behavior as we do
+ * here.
*/
if (avail < vacrelstats->threshold)
return;
@@ -996,7 +998,7 @@ lazy_record_free_space(LVRelStats *vacrelstats,
/*----------
* The rest of this routine works with "heap" organization of the
* free space arrays, wherein we maintain the heap property
- * avail[(j-1) div 2] <= avail[j] for 0 < j < n.
+ * avail[(j-1) div 2] <= avail[j] for 0 < j < n.
* In particular, the zero'th element always has the smallest available
* space and can be discarded to make room for a new page with more space.
* See Knuth's discussion of heap-based priority queues, sec 5.2.3;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index e0b041636e6..07dfca13c84 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.85 2003/07/29 00:03:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.86 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
* to duplicate the test in AC_STRUCT_TIMEZONE.
*/
#ifdef HAVE_TZNAME
-#ifndef tzname /* For SGI. */
+#ifndef tzname /* For SGI. */
extern char *tzname[];
#endif
#endif
@@ -273,12 +273,11 @@ static void
clear_tz(void)
{
/*
- * unsetenv() works fine, but is BSD, not POSIX, and is not
- * available under Solaris, among others. Apparently putenv()
- * called as below clears the process-specific environment
- * variables. Other reasonable arguments to putenv() (e.g.
- * "TZ=", "TZ", "") result in a core dump (under Linux
- * anyway). - thomas 1998-01-26
+ * unsetenv() works fine, but is BSD, not POSIX, and is not available
+ * under Solaris, among others. Apparently putenv() called as below
+ * clears the process-specific environment variables. Other
+ * reasonable arguments to putenv() (e.g. "TZ=", "TZ", "") result in a
+ * core dump (under Linux anyway). - thomas 1998-01-26
*/
if (tzbuf[0] == 'T')
{
@@ -298,14 +297,14 @@ clear_tz(void)
*
* If tzname[1] is a nonempty string, *or* the global timezone variable is
* not zero, then tzset must have recognized the TZ value as something
- * different from UTC. Return true.
+ * different from UTC. Return true.
*
* Otherwise, check to see if the TZ name is a known spelling of "UTC"
* (ie, appears in our internal tables as a timezone equivalent to UTC).
* If so, accept it.
*
* This will reject nonstandard spellings of UTC unless tzset() chose to
- * set tzname[1] as well as tzname[0]. The glibc version of tzset() will
+ * set tzname[1] as well as tzname[0]. The glibc version of tzset() will
* do so, but on other systems we may be tightening the spec a little.
*
* Another problem is that on some platforms (eg HPUX), if tzset thinks the
@@ -337,8 +336,8 @@ tzset_succeeded(const char *tz)
return true;
/*
- * Check for known spellings of "UTC". Note we must downcase the input
- * before passing it to DecodePosixTimezone().
+ * Check for known spellings of "UTC". Note we must downcase the
+ * input before passing it to DecodePosixTimezone().
*/
StrNCpy(tztmp, tz, sizeof(tztmp));
for (cp = tztmp; *cp; cp++)
@@ -368,7 +367,7 @@ tz_acceptable(void)
/*
* To detect leap-second timekeeping, compute the time_t value for
- * local midnight, 2000-01-01. Insist that this be a multiple of 60;
+ * local midnight, 2000-01-01. Insist that this be a multiple of 60;
* any partial-minute offset has to be due to leap seconds.
*/
MemSet(&tt, 0, sizeof(tt));
@@ -399,7 +398,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
*/
if (!have_saved_tz)
{
- char *orig_tz = getenv("TZ");
+ char *orig_tz = getenv("TZ");
if (orig_tz)
StrNCpy(orig_tzbuf, orig_tz, sizeof(orig_tzbuf));
@@ -434,9 +433,9 @@ assign_timezone(const char *value, bool doit, bool interactive)
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport, which is not desirable for GUC. We did what we could to
- * guard against this in flatten_set_variable_args, but a string
- * coming in from postgresql.conf might contain anything.
+ * ereport, which is not desirable for GUC. We did what we could
+ * to guard against this in flatten_set_variable_args, but a
+ * string coming in from postgresql.conf might contain anything.
*/
interval = DatumGetIntervalP(DirectFunctionCall3(interval_in,
CStringGetDatum(val),
@@ -455,7 +454,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
if (doit)
{
/* Here we change from SQL to Unix sign convention */
- CTimeZone = - interval->time;
+ CTimeZone = -interval->time;
HasCTZSet = true;
}
pfree(interval);
@@ -471,22 +470,22 @@ assign_timezone(const char *value, bool doit, bool interactive)
if (doit)
{
/* Here we change from SQL to Unix sign convention */
- CTimeZone = - hours * 3600;
+ CTimeZone = -hours * 3600;
HasCTZSet = true;
}
}
else if (strcasecmp(value, "UNKNOWN") == 0)
{
/*
- * UNKNOWN is the value shown as the "default" for TimeZone
- * in guc.c. We interpret it as meaning the original TZ
- * inherited from the environment. Note that if there is an
- * original TZ setting, we will return that rather than UNKNOWN
- * as the canonical spelling.
+ * UNKNOWN is the value shown as the "default" for TimeZone in
+ * guc.c. We interpret it as meaning the original TZ
+ * inherited from the environment. Note that if there is an
+ * original TZ setting, we will return that rather than
+ * UNKNOWN as the canonical spelling.
*/
if (doit)
{
- bool ok;
+ bool ok;
/* Revert to original setting of TZ, whatever it was */
if (orig_tzbuf[0])
@@ -516,14 +515,14 @@ assign_timezone(const char *value, bool doit, bool interactive)
* Otherwise assume it is a timezone name.
*
* We have to actually apply the change before we can have any
- * hope of checking it. So, save the old value in case we have
- * to back out. Note that it's possible the old setting is in
- * tzbuf, so we'd better copy it.
+ * hope of checking it. So, save the old value in case we
+ * have to back out. Note that it's possible the old setting
+ * is in tzbuf, so we'd better copy it.
*/
- char save_tzbuf[TZBUF_LEN];
- char *save_tz;
- bool known,
- acceptable;
+ char save_tzbuf[TZBUF_LEN];
+ char *save_tz;
+ bool known,
+ acceptable;
save_tz = getenv("TZ");
if (save_tz)
@@ -563,8 +562,8 @@ assign_timezone(const char *value, bool doit, bool interactive)
{
ereport(interactive ? ERROR : LOG,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timezone \"%s\" appears to use leap seconds",
- value),
+ errmsg("timezone \"%s\" appears to use leap seconds",
+ value),
errdetail("PostgreSQL does not support leap seconds")));
return NULL;
}
@@ -609,7 +608,7 @@ show_timezone(void)
Interval interval;
interval.month = 0;
- interval.time = - CTimeZone;
+ interval.time = -CTimeZone;
tzn = DatumGetCString(DirectFunctionCall1(interval_out,
IntervalPGetDatum(&interval)));
@@ -703,16 +702,16 @@ assign_client_encoding(const char *value, bool doit, bool interactive)
/*
* Note: if we are in startup phase then SetClientEncoding may not be
* able to really set the encoding. In this case we will assume that
- * the encoding is okay, and InitializeClientEncoding() will fix things
- * once initialization is complete.
+ * the encoding is okay, and InitializeClientEncoding() will fix
+ * things once initialization is complete.
*/
if (SetClientEncoding(encoding, doit) < 0)
{
if (interactive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("conversion between %s and %s is not supported",
- value, GetDatabaseEncodingName())));
+ errmsg("conversion between %s and %s is not supported",
+ value, GetDatabaseEncodingName())));
return NULL;
}
return value;
@@ -758,12 +757,12 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
/* not a saved ID, so look it up */
HeapTuple userTup;
- if (! IsTransactionState())
+ if (!IsTransactionState())
{
/*
* Can't do catalog lookups, so fail. The upshot of this is
- * that session_authorization cannot be set in postgresql.conf,
- * which seems like a good thing anyway.
+ * that session_authorization cannot be set in
+ * postgresql.conf, which seems like a good thing anyway.
*/
return NULL;
}
@@ -782,7 +781,7 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
usesysid = ((Form_pg_shadow) GETSTRUCT(userTup))->usesysid;
is_superuser = ((Form_pg_shadow) GETSTRUCT(userTup))->usesuper;
-
+
ReleaseSysCache(userTup);
}
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index e12ae0af686..9c3b372b3f7 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.75 2003/08/01 00:15:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.76 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -190,8 +190,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
newattr->atttypmod != oldattr->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("cannot change datatype of view column \"%s\"",
- NameStr(oldattr->attname))));
+ errmsg("cannot change datatype of view column \"%s\"",
+ NameStr(oldattr->attname))));
/* We can ignore the remaining attributes of an attribute... */
}
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 61974827b3b..9267d362ddd 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.72 2003/07/21 17:05:00 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.73 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,7 +48,7 @@
* ----------------------------------------------------------------
*/
void
-ExecReScan(PlanState *node, ExprContext *exprCtxt)
+ExecReScan(PlanState * node, ExprContext *exprCtxt)
{
/* If collecting timing stats, update them */
if (node->instrument)
@@ -61,7 +61,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
foreach(lst, node->initPlan)
{
- SubPlanState *sstate = (SubPlanState *) lfirst(lst);
+ SubPlanState *sstate = (SubPlanState *) lfirst(lst);
PlanState *splan = sstate->planstate;
if (splan->plan->extParam != NULL) /* don't care about child
@@ -72,7 +72,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
}
foreach(lst, node->subPlan)
{
- SubPlanState *sstate = (SubPlanState *) lfirst(lst);
+ SubPlanState *sstate = (SubPlanState *) lfirst(lst);
PlanState *splan = sstate->planstate;
if (splan->plan->extParam != NULL)
@@ -177,7 +177,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
* Marks the current scan position.
*/
void
-ExecMarkPos(PlanState *node)
+ExecMarkPos(PlanState * node)
{
switch (nodeTag(node))
{
@@ -218,7 +218,7 @@ ExecMarkPos(PlanState *node)
* restores the scan position previously saved with ExecMarkPos()
*/
void
-ExecRestrPos(PlanState *node)
+ExecRestrPos(PlanState * node)
{
switch (nodeTag(node))
{
@@ -302,16 +302,16 @@ ExecSupportsBackwardScan(Plan *node)
return false;
case T_Append:
- {
- List *l;
-
- foreach(l, ((Append *) node)->appendplans)
{
- if (!ExecSupportsBackwardScan((Plan *) lfirst(l)))
- return false;
+ List *l;
+
+ foreach(l, ((Append *) node)->appendplans)
+ {
+ if (!ExecSupportsBackwardScan((Plan *) lfirst(l)))
+ return false;
+ }
+ return true;
}
- return true;
- }
case T_SeqScan:
case T_IndexScan:
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 3f9c6d0d47d..8b0962ba9be 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.4 2003/07/21 17:05:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.5 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -249,7 +249,7 @@ execTuplesHashPrepare(TupleDesc tupdesc,
eq_function = oprfuncid(optup);
ReleaseSysCache(optup);
hash_function = get_op_hash_function(eq_opr);
- if (!OidIsValid(hash_function)) /* should not happen */
+ if (!OidIsValid(hash_function)) /* should not happen */
elog(ERROR, "could not find hash function for hash operator %u",
eq_opr);
fmgr_info(eq_function, &(*eqfunctions)[i]);
@@ -289,8 +289,8 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
int nbuckets, Size entrysize,
MemoryContext tablecxt, MemoryContext tempcxt)
{
- TupleHashTable hashtable;
- Size tabsize;
+ TupleHashTable hashtable;
+ Size tabsize;
Assert(nbuckets > 0);
Assert(entrysize >= sizeof(TupleHashEntryData));
@@ -411,9 +411,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
* Iterator state must be initialized with ResetTupleHashIterator() macro.
*/
TupleHashEntry
-ScanTupleHashTable(TupleHashTable hashtable, TupleHashIterator *state)
+ScanTupleHashTable(TupleHashTable hashtable, TupleHashIterator * state)
{
- TupleHashEntry entry;
+ TupleHashEntry entry;
entry = state->next_entry;
while (entry == NULL)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index ded748d5bf8..ae58bb130f7 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.212 2003/08/01 00:15:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.213 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,7 +68,7 @@ static void initResultRelInfo(ResultRelInfo *resultRelInfo,
Index resultRelationIndex,
List *rangeTable,
CmdType operation);
-static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
+static TupleTableSlot *ExecutePlan(EState *estate, PlanState * planstate,
CmdType operation,
long numberTuples,
ScanDirection direction,
@@ -87,7 +87,7 @@ static void EndEvalPlanQual(EState *estate);
static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
static void ExecCheckXactReadOnly(Query *parsetree, CmdType operation);
static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
- evalPlanQual *priorepq);
+ evalPlanQual *priorepq);
static void EvalPlanQualStop(evalPlanQual *epq);
/* end of local decls */
@@ -100,7 +100,7 @@ static void EvalPlanQualStop(evalPlanQual *epq);
* query plan
*
* Takes a QueryDesc previously created by CreateQueryDesc (it's not real
- * clear why we bother to separate the two functions, but...). The tupDesc
+ * clear why we bother to separate the two functions, but...). The tupDesc
* field of the QueryDesc is filled in to describe the tuples that will be
* returned, and the internal fields (estate and planstate) are set up.
*
@@ -122,8 +122,8 @@ ExecutorStart(QueryDesc *queryDesc, bool explainOnly)
Assert(queryDesc->estate == NULL);
/*
- * If the transaction is read-only, we need to check if any writes
- * are planned to non-temporary tables.
+ * If the transaction is read-only, we need to check if any writes are
+ * planned to non-temporary tables.
*/
if (!explainOnly)
ExecCheckXactReadOnly(queryDesc->parsetree, queryDesc->operation);
@@ -362,8 +362,8 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
/*
* Otherwise, only plain-relation RTEs need to be checked here.
- * Function RTEs are checked by init_fcache when the function is prepared
- * for execution. Join and special RTEs need no checks.
+ * Function RTEs are checked by init_fcache when the function is
+ * prepared for execution. Join and special RTEs need no checks.
*/
if (rte->rtekind != RTE_RELATION)
return;
@@ -435,7 +435,7 @@ ExecCheckXactReadOnly(Query *parsetree, CmdType operation)
if (operation == CMD_DELETE || operation == CMD_INSERT
|| operation == CMD_UPDATE)
{
- List *lp;
+ List *lp;
foreach(lp, parsetree->rtable)
{
@@ -474,9 +474,9 @@ static void
InitPlan(QueryDesc *queryDesc, bool explainOnly)
{
CmdType operation = queryDesc->operation;
- Query *parseTree = queryDesc->parsetree;
- Plan *plan = queryDesc->plantree;
- EState *estate = queryDesc->estate;
+ Query *parseTree = queryDesc->parsetree;
+ Plan *plan = queryDesc->plantree;
+ EState *estate = queryDesc->estate;
PlanState *planstate;
List *rangeTable;
Relation intoRelationDesc;
@@ -484,8 +484,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
TupleDesc tupType;
/*
- * Do permissions checks. It's sufficient to examine the query's
- * top rangetable here --- subplan RTEs will be checked during
+ * Do permissions checks. It's sufficient to examine the query's top
+ * rangetable here --- subplan RTEs will be checked during
* ExecInitSubPlan().
*/
ExecCheckRTPerms(parseTree->rtable, operation);
@@ -570,10 +570,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
if (operation == CMD_SELECT && parseTree->into != NULL)
{
do_select_into = true;
+
/*
- * For now, always create OIDs in SELECT INTO; this is for backwards
- * compatibility with pre-7.3 behavior. Eventually we might want
- * to allow the user to choose.
+ * For now, always create OIDs in SELECT INTO; this is for
+ * backwards compatibility with pre-7.3 behavior. Eventually we
+ * might want to allow the user to choose.
*/
estate->es_force_oids = true;
}
@@ -640,12 +641,12 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
tupType = ExecGetResultType(planstate);
/*
- * Initialize the junk filter if needed. SELECT and INSERT queries need a
- * filter if there are any junk attrs in the tlist. INSERT and SELECT
- * INTO also need a filter if the top plan node is a scan node that's not
- * doing projection (else we'll be scribbling on the scan tuple!) UPDATE
- * and DELETE always need a filter, since there's always a junk 'ctid'
- * attribute present --- no need to look first.
+ * Initialize the junk filter if needed. SELECT and INSERT queries
+ * need a filter if there are any junk attrs in the tlist. INSERT and
+ * SELECT INTO also need a filter if the top plan node is a scan node
+ * that's not doing projection (else we'll be scribbling on the scan
+ * tuple!) UPDATE and DELETE always need a filter, since there's
+ * always a junk 'ctid' attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@@ -752,8 +753,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* If doing SELECT INTO, initialize the "into" relation. We must wait
- * till now so we have the "clean" result tuple type to create the
- * new table from.
+ * till now so we have the "clean" result tuple type to create the new
+ * table from.
*
* If EXPLAIN, skip creating the "into" relation.
*/
@@ -795,16 +796,16 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
FreeTupleDesc(tupdesc);
/*
- * Advance command counter so that the newly-created
- * relation's catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's
+ * catalog tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the into
- * relation. Note that AlterTableCreateToastTable ends
- * with CommandCounterIncrement(), so that the TOAST table
- * will be visible for insertion.
+ * If necessary, create a TOAST table for the into relation. Note
+ * that AlterTableCreateToastTable ends with
+ * CommandCounterIncrement(), so that the TOAST table will be
+ * visible for insertion.
*/
AlterTableCreateToastTable(intoRelationId, true);
@@ -841,19 +842,19 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change sequence relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_TOASTVALUE:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change toast relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_VIEW:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change view relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
}
@@ -894,7 +895,7 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
* ----------------------------------------------------------------
*/
void
-ExecEndPlan(PlanState *planstate, EState *estate)
+ExecEndPlan(PlanState * planstate, EState *estate)
{
ResultRelInfo *resultRelInfo;
int i;
@@ -964,18 +965,18 @@ ExecEndPlan(PlanState *planstate, EState *estate)
*/
static TupleTableSlot *
ExecutePlan(EState *estate,
- PlanState *planstate,
+ PlanState * planstate,
CmdType operation,
long numberTuples,
ScanDirection direction,
DestReceiver *dest)
{
- JunkFilter *junkfilter;
- TupleTableSlot *slot;
- ItemPointer tupleid = NULL;
- ItemPointerData tuple_ctid;
- long current_tuple_count;
- TupleTableSlot *result;
+ JunkFilter *junkfilter;
+ TupleTableSlot *slot;
+ ItemPointer tupleid = NULL;
+ ItemPointerData tuple_ctid;
+ long current_tuple_count;
+ TupleTableSlot *result;
/*
* initialize local variables
@@ -1199,7 +1200,7 @@ lnext: ;
/*
* check our tuple count.. if we've processed the proper number
- * then quit, else loop again and process more tuples. Zero
+ * then quit, else loop again and process more tuples. Zero
* numberTuples means no limit.
*/
current_tuple_count++;
@@ -1309,7 +1310,7 @@ ExecInsert(TupleTableSlot *slot,
/* BEFORE ROW INSERT Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
@@ -1686,13 +1687,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("null value for attribute \"%s\" violates NOT NULL constraint",
- NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
+ NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
}
}
if (constr->num_check > 0)
{
- const char *failed;
+ const char *failed;
if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
ereport(ERROR,
@@ -1884,10 +1885,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
* integrated with the Param mechanism somehow, so that the upper plan
* nodes know that their children's outputs have changed.
*
- * Note that the stack of free evalPlanQual nodes is quite useless at
- * the moment, since it only saves us from pallocing/releasing the
+ * Note that the stack of free evalPlanQual nodes is quite useless at the
+ * moment, since it only saves us from pallocing/releasing the
* evalPlanQual nodes themselves. But it will be useful once we
- * implement ReScan instead of end/restart for re-using PlanQual nodes.
+ * implement ReScan instead of end/restart for re-using PlanQual
+ * nodes.
*/
if (endNode)
{
@@ -1898,10 +1900,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
/*
* Initialize new recheck query.
*
- * Note: if we were re-using PlanQual plans via ExecReScan, we'd need
- * to instead copy down changeable state from the top plan (including
- * es_result_relation_info, es_junkFilter) and reset locally changeable
- * state in the epq (including es_param_exec_vals, es_evTupleNull).
+ * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
+ * instead copy down changeable state from the top plan (including
+ * es_result_relation_info, es_junkFilter) and reset locally
+ * changeable state in the epq (including es_param_exec_vals,
+ * es_evTupleNull).
*/
EvalPlanQualStart(epq, estate, epq->next);
@@ -2016,9 +2019,9 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
/*
* The epqstates share the top query's copy of unchanging state such
- * as the snapshot, rangetable, result-rel info, and external Param info.
- * They need their own copies of local state, including a tuple table,
- * es_param_exec_vals, etc.
+ * as the snapshot, rangetable, result-rel info, and external Param
+ * info. They need their own copies of local state, including a tuple
+ * table, es_param_exec_vals, etc.
*/
epqstate->es_direction = ForwardScanDirection;
epqstate->es_snapshot = estate->es_snapshot;
@@ -2036,11 +2039,11 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
epqstate->es_instrument = estate->es_instrument;
epqstate->es_force_oids = estate->es_force_oids;
epqstate->es_topPlan = estate->es_topPlan;
+
/*
- * Each epqstate must have its own es_evTupleNull state, but
- * all the stack entries share es_evTuple state. This allows
- * sub-rechecks to inherit the value being examined by an
- * outer recheck.
+ * Each epqstate must have its own es_evTupleNull state, but all the
+ * stack entries share es_evTuple state. This allows sub-rechecks to
+ * inherit the value being examined by an outer recheck.
*/
epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
if (priorepq == NULL)
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index f73f2d71852..1c34e1d1a4b 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.37 2003/07/21 17:05:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.38 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,7 @@
* * ExecInitNode() notices that it is looking at a nest loop and
* as the code below demonstrates, it calls ExecInitNestLoop().
* Eventually this calls ExecInitNode() on the right and left subplans
- * and so forth until the entire plan is initialized. The result
+ * and so forth until the entire plan is initialized. The result
* of ExecInitNode() is a plan state tree built with the same structure
* as the underlying plan tree.
*
@@ -226,7 +226,7 @@ ExecInitNode(Plan *node, EState *estate)
subps = NIL;
foreach(subp, node->initPlan)
{
- SubPlan *subplan = (SubPlan *) lfirst(subp);
+ SubPlan *subplan = (SubPlan *) lfirst(subp);
SubPlanState *sstate;
Assert(IsA(subplan, SubPlan));
@@ -237,9 +237,9 @@ ExecInitNode(Plan *node, EState *estate)
result->initPlan = subps;
/*
- * Initialize any subPlans present in this node. These were found
- * by ExecInitExpr during initialization of the PlanState. Note we
- * must do this after initializing initPlans, in case their arguments
+ * Initialize any subPlans present in this node. These were found by
+ * ExecInitExpr during initialization of the PlanState. Note we must
+ * do this after initializing initPlans, in case their arguments
* contain subPlans (is that actually possible? perhaps not).
*/
subps = NIL;
@@ -268,7 +268,7 @@ ExecInitNode(Plan *node, EState *estate)
* ----------------------------------------------------------------
*/
TupleTableSlot *
-ExecProcNode(PlanState *node)
+ExecProcNode(PlanState * node)
{
TupleTableSlot *result;
@@ -280,7 +280,7 @@ ExecProcNode(PlanState *node)
if (node == NULL)
return NULL;
- if (node->chgParam != NULL) /* something changed */
+ if (node->chgParam != NULL) /* something changed */
ExecReScan(node, NULL); /* let ReScan handle this */
if (node->instrument)
@@ -484,7 +484,7 @@ ExecCountSlotsNode(Plan *node)
* ----------------------------------------------------------------
*/
void
-ExecEndNode(PlanState *node)
+ExecEndNode(PlanState * node)
{
List *subp;
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 891019f0ae7..d509122f29b 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.138 2003/08/01 00:15:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.139 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,55 +50,55 @@
/* static function decls */
-static Datum ExecEvalAggref(AggrefExprState *aggref,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalArrayRef(ArrayRefExprState *astate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalAggref(AggrefExprState * aggref,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalArrayRef(ArrayRefExprState * astate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull);
static Datum ExecEvalParam(Param *expression, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalFunc(FuncExprState * fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalOper(FuncExprState *fcache, ExprContext *econtext,
+static Datum ExecEvalOper(FuncExprState * fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalDistinct(FuncExprState *fcache, ExprContext *econtext,
+static Datum ExecEvalDistinct(FuncExprState * fcache, ExprContext *econtext,
bool *isNull);
-static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
- ExprContext *econtext, bool *isNull);
+static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState * sstate,
+ ExprContext *econtext, bool *isNull);
static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo,
List *argList, ExprContext *econtext);
-static Datum ExecEvalNot(BoolExprState *notclause, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
+static Datum ExecEvalNot(BoolExprState * notclause, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalOr(BoolExprState * orExpr, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalAnd(BoolExprState * andExpr, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalCase(CaseExprState * caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalArray(ArrayExprState *astate,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalNullIf(FuncExprState *nullIfExpr, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalNullTest(GenericExprState *nstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalBooleanTest(GenericExprState *bstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalCoerceToDomain(CoerceToDomainState *cstate,
+static Datum ExecEvalArray(ArrayExprState * astate,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalCoalesce(CoalesceExprState * coalesceExpr,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalNullIf(FuncExprState * nullIfExpr, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalNullTest(GenericExprState * nstate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalBooleanTest(GenericExprState * bstate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalCoerceToDomain(CoerceToDomainState * cstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalCoerceToDomainValue(CoerceToDomainValue *conVal,
- ExprContext *econtext, bool *isNull);
-static Datum ExecEvalFieldSelect(GenericExprState *fstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalCoerceToDomainValue(CoerceToDomainValue * conVal,
+ ExprContext *econtext, bool *isNull);
+static Datum ExecEvalFieldSelect(GenericExprState * fstate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
/*----------
@@ -127,7 +127,7 @@ static Datum ExecEvalFieldSelect(GenericExprState *fstate,
*----------
*/
static Datum
-ExecEvalArrayRef(ArrayRefExprState *astate,
+ExecEvalArrayRef(ArrayRefExprState * astate,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -301,7 +301,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalAggref(AggrefExprState *aggref, ExprContext *econtext, bool *isNull)
+ExecEvalAggref(AggrefExprState * aggref, ExprContext *econtext, bool *isNull)
{
if (econtext->ecxt_aggvalues == NULL) /* safety check */
elog(ERROR, "no aggregates in this expression context");
@@ -382,8 +382,8 @@ ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull)
*
* XXX this is a horrid crock: since the pointer to the slot might live
* longer than the current evaluation context, we are forced to copy
- * the tuple and slot into a long-lived context --- we use
- * the econtext's per-query memory which should be safe enough. This
+ * the tuple and slot into a long-lived context --- we use the
+ * econtext's per-query memory which should be safe enough. This
* represents a serious memory leak if many such tuples are processed
* in one command, however. We ought to redesign the representation
* of whole-tuple datums so that this is not necessary.
@@ -439,7 +439,8 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
{
/*
* PARAM_EXEC params (internal executor parameters) are stored in
- * the ecxt_param_exec_vals array, and can be accessed by array index.
+ * the ecxt_param_exec_vals array, and can be accessed by array
+ * index.
*/
ParamExecData *prm;
@@ -457,9 +458,9 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
else
{
/*
- * All other parameter types must be sought in ecxt_param_list_info.
- * NOTE: The last entry in the param array is always an
- * entry with kind == PARAM_INVALID.
+ * All other parameter types must be sought in
+ * ecxt_param_list_info. NOTE: The last entry in the param array
+ * is always an entry with kind == PARAM_INVALID.
*/
ParamListInfo paramList = econtext->ecxt_param_list_info;
char *thisParamName = expression->paramname;
@@ -488,8 +489,8 @@ ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull)
}
if (!matchFound)
paramList++;
- } /* while */
- } /* if */
+ } /* while */
+ } /* if */
if (!matchFound)
{
@@ -605,7 +606,7 @@ GetAttributeByName(TupleTableSlot *slot, char *attname, bool *isNull)
* init_fcache - initialize a FuncExprState node during first use
*/
void
-init_fcache(Oid foid, FuncExprState *fcache, MemoryContext fcacheCxt)
+init_fcache(Oid foid, FuncExprState * fcache, MemoryContext fcacheCxt)
{
AclResult aclresult;
@@ -678,7 +679,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo,
* Evaluate the arguments to a function and then the function itself.
*/
Datum
-ExecMakeFunctionResult(FuncExprState *fcache,
+ExecMakeFunctionResult(FuncExprState * fcache,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -881,7 +882,7 @@ ExecMakeFunctionResult(FuncExprState *fcache,
* object. (If function returns an empty set, we just return NULL instead.)
*/
Tuplestorestate *
-ExecMakeTableFunctionResult(ExprState *funcexpr,
+ExecMakeTableFunctionResult(ExprState * funcexpr,
ExprContext *econtext,
TupleDesc expectedDesc,
TupleDesc *returnDesc)
@@ -899,14 +900,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
bool returnsTuple = false;
/*
- * Normally the passed expression tree will be a FuncExprState, since the
- * grammar only allows a function call at the top level of a table
- * function reference. However, if the function doesn't return set then
- * the planner might have replaced the function call via constant-folding
- * or inlining. So if we see any other kind of expression node, execute
- * it via the general ExecEvalExpr() code; the only difference is that
- * we don't get a chance to pass a special ReturnSetInfo to any functions
- * buried in the expression.
+ * Normally the passed expression tree will be a FuncExprState, since
+ * the grammar only allows a function call at the top level of a table
+ * function reference. However, if the function doesn't return set
+ * then the planner might have replaced the function call via
+ * constant-folding or inlining. So if we see any other kind of
+ * expression node, execute it via the general ExecEvalExpr() code;
+ * the only difference is that we don't get a chance to pass a special
+ * ReturnSetInfo to any functions buried in the expression.
*/
if (funcexpr && IsA(funcexpr, FuncExprState) &&
IsA(funcexpr->expr, FuncExpr))
@@ -924,7 +925,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
*/
if (fcache->func.fn_oid == InvalidOid)
{
- FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
+ FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
init_fcache(func->funcid, fcache, econtext->ecxt_per_query_memory);
}
@@ -933,9 +934,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* Evaluate the function's argument list.
*
* Note: ideally, we'd do this in the per-tuple context, but then the
- * argument values would disappear when we reset the context in the
- * inner loop. So do it in caller context. Perhaps we should make a
- * separate context just to hold the evaluated arguments?
+ * argument values would disappear when we reset the context in
+ * the inner loop. So do it in caller context. Perhaps we should
+ * make a separate context just to hold the evaluated arguments?
*/
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.flinfo = &(fcache->func);
@@ -990,7 +991,8 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
rsinfo.setDesc = NULL;
/*
- * Switch to short-lived context for calling the function or expression.
+ * Switch to short-lived context for calling the function or
+ * expression.
*/
callerContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
@@ -1004,9 +1006,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
HeapTuple tuple;
/*
- * reset per-tuple memory context before each call of the
- * function or expression. This cleans up any local memory the
- * function may leak when called.
+ * reset per-tuple memory context before each call of the function
+ * or expression. This cleans up any local memory the function may
+ * leak when called.
*/
ResetExprContext(econtext);
@@ -1157,7 +1159,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalFunc(FuncExprState *fcache,
+ExecEvalFunc(FuncExprState * fcache,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -1167,7 +1169,7 @@ ExecEvalFunc(FuncExprState *fcache,
*/
if (fcache->func.fn_oid == InvalidOid)
{
- FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
+ FuncExpr *func = (FuncExpr *) fcache->xprstate.expr;
init_fcache(func->funcid, fcache, econtext->ecxt_per_query_memory);
}
@@ -1180,7 +1182,7 @@ ExecEvalFunc(FuncExprState *fcache,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalOper(FuncExprState *fcache,
+ExecEvalOper(FuncExprState * fcache,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -1190,7 +1192,7 @@ ExecEvalOper(FuncExprState *fcache,
*/
if (fcache->func.fn_oid == InvalidOid)
{
- OpExpr *op = (OpExpr *) fcache->xprstate.expr;
+ OpExpr *op = (OpExpr *) fcache->xprstate.expr;
init_fcache(op->opfuncid, fcache, econtext->ecxt_per_query_memory);
}
@@ -1210,7 +1212,7 @@ ExecEvalOper(FuncExprState *fcache,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalDistinct(FuncExprState *fcache,
+ExecEvalDistinct(FuncExprState * fcache,
ExprContext *econtext,
bool *isNull)
{
@@ -1242,7 +1244,7 @@ ExecEvalDistinct(FuncExprState *fcache,
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM does not support set arguments")));
+ errmsg("IS DISTINCT FROM does not support set arguments")));
Assert(fcinfo.nargs == 2);
if (fcinfo.argnull[0] && fcinfo.argnull[1])
@@ -1272,11 +1274,11 @@ ExecEvalDistinct(FuncExprState *fcache,
*
* Evaluate "scalar op ANY/ALL (array)". The operator always yields boolean,
* and we combine the results across all array elements using OR and AND
- * (for ANY and ALL respectively). Of course we short-circuit as soon as
+ * (for ANY and ALL respectively). Of course we short-circuit as soon as
* the result is known.
*/
static Datum
-ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
+ExecEvalScalarArrayOp(ScalarArrayOpExprState * sstate,
ExprContext *econtext, bool *isNull)
{
ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) sstate->fxprstate.xprstate.expr;
@@ -1310,12 +1312,12 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
if (argDone != ExprSingleResult)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("op ANY/ALL (array) does not support set arguments")));
+ errmsg("op ANY/ALL (array) does not support set arguments")));
Assert(fcinfo.nargs == 2);
/*
- * If the array is NULL then we return NULL --- it's not very meaningful
- * to do anything else, even if the operator isn't strict.
+ * If the array is NULL then we return NULL --- it's not very
+ * meaningful to do anything else, even if the operator isn't strict.
*/
if (fcinfo.argnull[1])
{
@@ -1334,6 +1336,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
nitems = ArrayGetNItems(ARR_NDIM(arr), ARR_DIMS(arr));
if (nitems <= 0)
return BoolGetDatum(!useOr);
+
/*
* If the scalar is NULL, and the function is strict, return NULL.
* This is just to avoid having to test for strictness inside the
@@ -1347,8 +1350,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
}
/*
- * We arrange to look up info about the element type only
- * once per series of calls, assuming the element type doesn't change
+ * We arrange to look up info about the element type only once per
+ * series of calls, assuming the element type doesn't change
* underneath us.
*/
if (sstate->element_type != ARR_ELEMTYPE(arr))
@@ -1370,8 +1373,8 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
s = (char *) ARR_DATA_PTR(arr);
for (i = 0; i < nitems; i++)
{
- Datum elt;
- Datum thisresult;
+ Datum elt;
+ Datum thisresult;
/* Get array element */
elt = fetch_att(s, typbyval, typlen);
@@ -1394,7 +1397,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
{
result = BoolGetDatum(true);
resultnull = false;
- break; /* needn't look at any more elements */
+ break; /* needn't look at any more elements */
}
}
else
@@ -1403,7 +1406,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
{
result = BoolGetDatum(false);
resultnull = false;
- break; /* needn't look at any more elements */
+ break; /* needn't look at any more elements */
}
}
}
@@ -1428,7 +1431,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalNot(BoolExprState *notclause, ExprContext *econtext, bool *isNull)
+ExecEvalNot(BoolExprState * notclause, ExprContext *econtext, bool *isNull)
{
ExprState *clause;
Datum expr_value;
@@ -1456,7 +1459,7 @@ ExecEvalNot(BoolExprState *notclause, ExprContext *econtext, bool *isNull)
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, bool *isNull)
+ExecEvalOr(BoolExprState * orExpr, ExprContext *econtext, bool *isNull)
{
List *clauses;
List *clause;
@@ -1504,7 +1507,7 @@ ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, bool *isNull)
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, bool *isNull)
+ExecEvalAnd(BoolExprState * andExpr, ExprContext *econtext, bool *isNull)
{
List *clauses;
List *clause;
@@ -1552,7 +1555,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, bool *isNull)
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
+ExecEvalCase(CaseExprState * caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
List *clauses;
@@ -1610,22 +1613,22 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
+ExecEvalArray(ArrayExprState * astate, ExprContext *econtext,
bool *isNull)
{
- ArrayExpr *arrayExpr = (ArrayExpr *) astate->xprstate.expr;
+ ArrayExpr *arrayExpr = (ArrayExpr *) astate->xprstate.expr;
ArrayType *result;
- List *element;
- Oid element_type = arrayExpr->element_typeid;
- int ndims = arrayExpr->ndims;
- int dims[MAXDIM];
- int lbs[MAXDIM];
+ List *element;
+ Oid element_type = arrayExpr->element_typeid;
+ int ndims = arrayExpr->ndims;
+ int dims[MAXDIM];
+ int lbs[MAXDIM];
if (ndims == 1)
{
- int nelems;
- Datum *dvalues;
- int i = 0;
+ int nelems;
+ Datum *dvalues;
+ int i = 0;
nelems = length(astate->elements);
@@ -1683,7 +1686,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
/* loop through and get data area from each element */
foreach(element, astate->elements)
{
- ExprState *e = (ExprState *) lfirst(element);
+ ExprState *e = (ExprState *) lfirst(element);
bool eisnull;
Datum arraydatum;
ArrayType *array;
@@ -1718,8 +1721,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
elem_ndims * sizeof(int)) != 0)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("multidimensional arrays must have array "
- "expressions with matching dimensions")));
+ errmsg("multidimensional arrays must have array "
+ "expressions with matching dimensions")));
}
elem_ndatabytes = ARR_SIZE(array) - ARR_OVERHEAD(elem_ndims);
@@ -1767,16 +1770,16 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext,
+ExecEvalCoalesce(CoalesceExprState * coalesceExpr, ExprContext *econtext,
bool *isNull)
{
- List *arg;
+ List *arg;
/* Simply loop through until something NOT NULL is found */
foreach(arg, coalesceExpr->args)
{
- ExprState *e = (ExprState *) lfirst(arg);
- Datum value;
+ ExprState *e = (ExprState *) lfirst(arg);
+ Datum value;
value = ExecEvalExpr(e, econtext, isNull, NULL);
if (!*isNull)
@@ -1787,7 +1790,7 @@ ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext,
*isNull = true;
return (Datum) 0;
}
-
+
/* ----------------------------------------------------------------
* ExecEvalNullIf
*
@@ -1797,7 +1800,7 @@ ExecEvalCoalesce(CoalesceExprState *coalesceExpr, ExprContext *econtext,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalNullIf(FuncExprState *fcache, ExprContext *econtext,
+ExecEvalNullIf(FuncExprState * fcache, ExprContext *econtext,
bool *isNull)
{
Datum result;
@@ -1856,7 +1859,7 @@ ExecEvalNullIf(FuncExprState *fcache, ExprContext *econtext,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalNullTest(GenericExprState *nstate,
+ExecEvalNullTest(GenericExprState * nstate,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -1901,7 +1904,7 @@ ExecEvalNullTest(GenericExprState *nstate,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalBooleanTest(GenericExprState *bstate,
+ExecEvalBooleanTest(GenericExprState * bstate,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -1987,7 +1990,7 @@ ExecEvalBooleanTest(GenericExprState *bstate,
* datum) otherwise throw an error.
*/
static Datum
-ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
+ExecEvalCoerceToDomain(CoerceToDomainState * cstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone)
{
CoerceToDomain *ctest = (CoerceToDomain *) cstate->xprstate.expr;
@@ -2009,43 +2012,44 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
if (*isNull)
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
- errmsg("domain %s does not allow NULL values",
- format_type_be(ctest->resulttype))));
+ errmsg("domain %s does not allow NULL values",
+ format_type_be(ctest->resulttype))));
break;
case DOM_CONSTRAINT_CHECK:
- {
- Datum conResult;
- bool conIsNull;
- Datum save_datum;
- bool save_isNull;
-
- /*
- * Set up value to be returned by CoerceToDomainValue nodes.
- * We must save and restore prior setting of econtext's
- * domainValue fields, in case this node is itself within
- * a check expression for another domain.
- */
- save_datum = econtext->domainValue_datum;
- save_isNull = econtext->domainValue_isNull;
+ {
+ Datum conResult;
+ bool conIsNull;
+ Datum save_datum;
+ bool save_isNull;
- econtext->domainValue_datum = result;
- econtext->domainValue_isNull = *isNull;
+ /*
+ * Set up value to be returned by CoerceToDomainValue
+ * nodes. We must save and restore prior setting of
+ * econtext's domainValue fields, in case this node is
+ * itself within a check expression for another
+ * domain.
+ */
+ save_datum = econtext->domainValue_datum;
+ save_isNull = econtext->domainValue_isNull;
- conResult = ExecEvalExpr(con->check_expr,
- econtext, &conIsNull, NULL);
+ econtext->domainValue_datum = result;
+ econtext->domainValue_isNull = *isNull;
- if (!conIsNull &&
- !DatumGetBool(conResult))
- ereport(ERROR,
- (errcode(ERRCODE_CHECK_VIOLATION),
- errmsg("value for domain %s violates CHECK constraint \"%s\"",
- format_type_be(ctest->resulttype),
- con->name)));
- econtext->domainValue_datum = save_datum;
- econtext->domainValue_isNull = save_isNull;
+ conResult = ExecEvalExpr(con->check_expr,
+ econtext, &conIsNull, NULL);
- break;
- }
+ if (!conIsNull &&
+ !DatumGetBool(conResult))
+ ereport(ERROR,
+ (errcode(ERRCODE_CHECK_VIOLATION),
+ errmsg("value for domain %s violates CHECK constraint \"%s\"",
+ format_type_be(ctest->resulttype),
+ con->name)));
+ econtext->domainValue_datum = save_datum;
+ econtext->domainValue_isNull = save_isNull;
+
+ break;
+ }
default:
elog(ERROR, "unrecognized constraint type: %d",
(int) con->constrainttype);
@@ -2063,7 +2067,7 @@ ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext,
* Return the value stored by CoerceToDomain.
*/
static Datum
-ExecEvalCoerceToDomainValue(CoerceToDomainValue *conVal,
+ExecEvalCoerceToDomainValue(CoerceToDomainValue * conVal,
ExprContext *econtext, bool *isNull)
{
*isNull = econtext->domainValue_isNull;
@@ -2077,7 +2081,7 @@ ExecEvalCoerceToDomainValue(CoerceToDomainValue *conVal,
* ----------------------------------------------------------------
*/
static Datum
-ExecEvalFieldSelect(GenericExprState *fstate,
+ExecEvalFieldSelect(GenericExprState * fstate,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -2141,7 +2145,7 @@ ExecEvalFieldSelect(GenericExprState *fstate,
* ----------------------------------------------------------------
*/
Datum
-ExecEvalExpr(ExprState *expression,
+ExecEvalExpr(ExprState * expression,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -2308,7 +2312,7 @@ ExecEvalExpr(ExprState *expression,
* Same as above, but get into the right allocation context explicitly.
*/
Datum
-ExecEvalExprSwitchContext(ExprState *expression,
+ExecEvalExprSwitchContext(ExprState * expression,
ExprContext *econtext,
bool *isNull,
ExprDoneCond *isDone)
@@ -2327,7 +2331,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* ExecInitExpr: prepare an expression tree for execution
*
* This function builds and returns an ExprState tree paralleling the given
- * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
+ * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr
* for execution. Because the Expr tree itself is read-only as far as
* ExecInitExpr and ExecEvalExpr are concerned, several different executions
* of the same plan tree can occur concurrently.
@@ -2337,7 +2341,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* the same as the per-query context of the associated ExprContext.
*
* Any Aggref and SubPlan nodes found in the tree are added to the lists
- * of such nodes held by the parent PlanState. Otherwise, we do very little
+ * of such nodes held by the parent PlanState. Otherwise, we do very little
* initialization here other than building the state-node tree. Any nontrivial
* work associated with initializing runtime info for a node should happen
* during the first actual evaluation of that node. (This policy lets us
@@ -2356,7 +2360,7 @@ ExecEvalExprSwitchContext(ExprState *expression,
* This case should usually come through ExecPrepareExpr, not directly here.
*/
ExprState *
-ExecInitExpr(Expr *node, PlanState *parent)
+ExecInitExpr(Expr *node, PlanState * parent)
{
ExprState *state;
@@ -2373,7 +2377,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_Aggref:
{
- Aggref *aggref = (Aggref *) node;
+ Aggref *aggref = (Aggref *) node;
AggrefExprState *astate = makeNode(AggrefExprState);
if (parent && IsA(parent, AggState))
@@ -2389,8 +2393,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
/*
* Complain if the aggregate's argument contains any
* aggregates; nested agg functions are semantically
- * nonsensical. (This should have been caught earlier,
- * but we defend against it here anyway.)
+ * nonsensical. (This should have been caught
+ * earlier, but we defend against it here anyway.)
*/
if (naggs != aggstate->numaggs)
ereport(ERROR,
@@ -2433,41 +2437,41 @@ ExecInitExpr(Expr *node, PlanState *parent)
fstate->args = (List *)
ExecInitExpr((Expr *) funcexpr->args, parent);
- fstate->func.fn_oid = InvalidOid; /* not initialized */
+ fstate->func.fn_oid = InvalidOid; /* not initialized */
state = (ExprState *) fstate;
}
break;
case T_OpExpr:
{
- OpExpr *opexpr = (OpExpr *) node;
+ OpExpr *opexpr = (OpExpr *) node;
FuncExprState *fstate = makeNode(FuncExprState);
fstate->args = (List *)
ExecInitExpr((Expr *) opexpr->args, parent);
- fstate->func.fn_oid = InvalidOid; /* not initialized */
+ fstate->func.fn_oid = InvalidOid; /* not initialized */
state = (ExprState *) fstate;
}
break;
case T_DistinctExpr:
{
- DistinctExpr *distinctexpr = (DistinctExpr *) node;
+ DistinctExpr *distinctexpr = (DistinctExpr *) node;
FuncExprState *fstate = makeNode(FuncExprState);
fstate->args = (List *)
ExecInitExpr((Expr *) distinctexpr->args, parent);
- fstate->func.fn_oid = InvalidOid; /* not initialized */
+ fstate->func.fn_oid = InvalidOid; /* not initialized */
state = (ExprState *) fstate;
}
break;
case T_ScalarArrayOpExpr:
{
- ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
+ ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
ScalarArrayOpExprState *sstate = makeNode(ScalarArrayOpExprState);
sstate->fxprstate.args = (List *)
ExecInitExpr((Expr *) opexpr->args, parent);
- sstate->fxprstate.func.fn_oid = InvalidOid; /* not initialized */
- sstate->element_type = InvalidOid; /* ditto */
+ sstate->fxprstate.func.fn_oid = InvalidOid; /* not initialized */
+ sstate->element_type = InvalidOid; /* ditto */
state = (ExprState *) sstate;
}
break;
@@ -2484,7 +2488,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
case T_SubPlan:
{
/* Keep this in sync with ExecInitExprInitPlan, below */
- SubPlan *subplan = (SubPlan *) node;
+ SubPlan *subplan = (SubPlan *) node;
SubPlanState *sstate = makeNode(SubPlanState);
if (!parent)
@@ -2492,7 +2496,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
/*
* Here we just add the SubPlanState nodes to
- * parent->subPlan. The subplans will be initialized later.
+ * parent->subPlan. The subplans will be initialized
+ * later.
*/
parent->subPlan = lcons(sstate, parent->subPlan);
sstate->sub_estate = NULL;
@@ -2508,7 +2513,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_FieldSelect:
{
- FieldSelect *fselect = (FieldSelect *) node;
+ FieldSelect *fselect = (FieldSelect *) node;
GenericExprState *gstate = makeNode(GenericExprState);
gstate->arg = ExecInitExpr(fselect->arg, parent);
@@ -2517,7 +2522,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_RelabelType:
{
- RelabelType *relabel = (RelabelType *) node;
+ RelabelType *relabel = (RelabelType *) node;
GenericExprState *gstate = makeNode(GenericExprState);
gstate->arg = ExecInitExpr(relabel->arg, parent);
@@ -2552,10 +2557,10 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_ArrayExpr:
{
- ArrayExpr *arrayexpr = (ArrayExpr *) node;
+ ArrayExpr *arrayexpr = (ArrayExpr *) node;
ArrayExprState *astate = makeNode(ArrayExprState);
- FastList outlist;
- List *inlist;
+ FastList outlist;
+ List *inlist;
FastListInit(&outlist);
foreach(inlist, arrayexpr->elements)
@@ -2585,8 +2590,8 @@ ExecInitExpr(Expr *node, PlanState *parent)
FastListInit(&outlist);
foreach(inlist, coalesceexpr->args)
{
- Expr *e = (Expr *) lfirst(inlist);
- ExprState *estate;
+ Expr *e = (Expr *) lfirst(inlist);
+ ExprState *estate;
estate = ExecInitExpr(e, parent);
FastAppend(&outlist, estate);
@@ -2602,7 +2607,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
fstate->args = (List *)
ExecInitExpr((Expr *) nullifexpr->args, parent);
- fstate->func.fn_oid = InvalidOid; /* not initialized */
+ fstate->func.fn_oid = InvalidOid; /* not initialized */
state = (ExprState *) fstate;
}
break;
@@ -2617,7 +2622,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_BooleanTest:
{
- BooleanTest *btest = (BooleanTest *) node;
+ BooleanTest *btest = (BooleanTest *) node;
GenericExprState *gstate = makeNode(GenericExprState);
gstate->arg = ExecInitExpr(btest->arg, parent);
@@ -2626,7 +2631,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_CoerceToDomain:
{
- CoerceToDomain *ctest = (CoerceToDomain *) node;
+ CoerceToDomain *ctest = (CoerceToDomain *) node;
CoerceToDomainState *cstate = makeNode(CoerceToDomainState);
cstate->arg = ExecInitExpr(ctest->arg, parent);
@@ -2636,7 +2641,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
break;
case T_TargetEntry:
{
- TargetEntry *tle = (TargetEntry *) node;
+ TargetEntry *tle = (TargetEntry *) node;
GenericExprState *gstate = makeNode(GenericExprState);
gstate->arg = ExecInitExpr(tle->expr, parent);
@@ -2673,12 +2678,12 @@ ExecInitExpr(Expr *node, PlanState *parent)
/*
* ExecInitExprInitPlan --- initialize a subplan expr that's being handled
- * as an InitPlan. This is identical to ExecInitExpr's handling of a regular
+ * as an InitPlan. This is identical to ExecInitExpr's handling of a regular
* subplan expr, except we do NOT want to add the node to the parent's
* subplan list.
*/
SubPlanState *
-ExecInitExprInitPlan(SubPlan *node, PlanState *parent)
+ExecInitExprInitPlan(SubPlan *node, PlanState * parent)
{
SubPlanState *sstate = makeNode(SubPlanState);
@@ -2704,7 +2709,7 @@ ExecInitExprInitPlan(SubPlan *node, PlanState *parent)
* This differs from ExecInitExpr in that we don't assume the caller is
* already running in the EState's per-query context. Also, we apply
* fix_opfuncids() to the passed expression tree to be sure it is ready
- * to run. (In ordinary Plan trees the planner will have fixed opfuncids,
+ * to run. (In ordinary Plan trees the planner will have fixed opfuncids,
* but callers outside the executor will not have done this.)
*/
ExprState *
@@ -2988,8 +2993,8 @@ ExecTargetList(List *targetlist,
if (itemIsDone[resind] == ExprEndResult)
{
/*
- * Oh dear, this item is returning an empty
- * set. Guess we can't make a tuple after all.
+ * Oh dear, this item is returning an empty set.
+ * Guess we can't make a tuple after all.
*/
*isDone = ExprEndResult;
break;
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 9352c79d81e..35007cf0cc0 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.23 2003/02/03 15:07:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.24 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ static bool tlist_matches_tupdesc(List *tlist, Index varno, TupleDesc tupdesc);
* ----------------------------------------------------------------
*/
TupleTableSlot *
-ExecScan(ScanState *node,
+ExecScan(ScanState * node,
ExecScanAccessMtd accessMtd) /* function returning a tuple */
{
EState *estate;
@@ -134,9 +134,10 @@ ExecScan(ScanState *node,
if (projInfo)
{
/*
- * Form a projection tuple, store it in the result tuple slot
- * and return it --- unless we find we can project no tuples
- * from this scan tuple, in which case continue scan.
+ * Form a projection tuple, store it in the result tuple
+ * slot and return it --- unless we find we can project no
+ * tuples from this scan tuple, in which case continue
+ * scan.
*/
resultSlot = ExecProject(projInfo, &isDone);
if (isDone != ExprEndResult)
@@ -175,13 +176,13 @@ ExecScan(ScanState *node,
* ExecAssignScanType must have been called already.
*/
void
-ExecAssignScanProjectionInfo(ScanState *node)
+ExecAssignScanProjectionInfo(ScanState * node)
{
- Scan *scan = (Scan *) node->ps.plan;
+ Scan *scan = (Scan *) node->ps.plan;
if (tlist_matches_tupdesc(scan->plan.targetlist,
scan->scanrelid,
- node->ss_ScanTupleSlot->ttc_tupleDescriptor))
+ node->ss_ScanTupleSlot->ttc_tupleDescriptor))
node->ps.ps_ProjInfo = NULL;
else
ExecAssignProjectionInfo(&node->ps);
@@ -190,13 +191,13 @@ ExecAssignScanProjectionInfo(ScanState *node)
static bool
tlist_matches_tupdesc(List *tlist, Index varno, TupleDesc tupdesc)
{
- int numattrs = tupdesc->natts;
- int attrno;
+ int numattrs = tupdesc->natts;
+ int attrno;
for (attrno = 1; attrno <= numattrs; attrno++)
{
Form_pg_attribute att_tup = tupdesc->attrs[attrno - 1];
- Var *var;
+ Var *var;
if (tlist == NIL)
return false; /* tlist too short */
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 976c152236c..c2145b7eca8 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.67 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.68 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -482,7 +482,7 @@ ExecSetSlotDescriptorIsNew(TupleTableSlot *slot, /* slot to change */
* ----------------
*/
void
-ExecInitResultTupleSlot(EState *estate, PlanState *planstate)
+ExecInitResultTupleSlot(EState *estate, PlanState * planstate)
{
INIT_SLOT_DEFS;
INIT_SLOT_ALLOC;
@@ -494,7 +494,7 @@ ExecInitResultTupleSlot(EState *estate, PlanState *planstate)
* ----------------
*/
void
-ExecInitScanTupleSlot(EState *estate, ScanState *scanstate)
+ExecInitScanTupleSlot(EState *estate, ScanState * scanstate)
{
INIT_SLOT_DEFS;
INIT_SLOT_ALLOC;
@@ -807,7 +807,7 @@ do_text_output_multiline(TupOutputState *tstate, char *text)
if (eol)
*eol++ = '\0';
else
- eol = text + strlen(text);
+ eol = text +strlen(text);
do_tup_output(tstate, &text);
text = eol;
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index ca4ff192f44..f87708d3927 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.100 2003/05/28 16:03:56 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.101 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -166,8 +166,8 @@ CreateExecutorState(void)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Make the EState node within the per-query context. This way,
- * we don't need a separate pfree() operation for it at shutdown.
+ * Make the EState node within the per-query context. This way, we
+ * don't need a separate pfree() operation for it at shutdown.
*/
oldcontext = MemoryContextSwitchTo(qcontext);
@@ -248,6 +248,7 @@ FreeExecutorState(EState *estate)
FreeExprContext((ExprContext *) lfirst(estate->es_exprcontexts));
/* FreeExprContext removed the list link for us */
}
+
/*
* Free the per-query memory context, thereby releasing all working
* memory, including the EState node itself.
@@ -310,10 +311,10 @@ CreateExprContext(EState *estate)
econtext->ecxt_callbacks = NULL;
/*
- * Link the ExprContext into the EState to ensure it is shut down
- * when the EState is freed. Because we use lcons(), shutdowns will
- * occur in reverse order of creation, which may not be essential
- * but can't hurt.
+ * Link the ExprContext into the EState to ensure it is shut down when
+ * the EState is freed. Because we use lcons(), shutdowns will occur
+ * in reverse order of creation, which may not be essential but can't
+ * hurt.
*/
estate->es_exprcontexts = lcons(econtext, estate->es_exprcontexts);
@@ -377,14 +378,14 @@ MakePerTupleExprContext(EState *estate)
/* ----------------
* ExecAssignExprContext
*
- * This initializes the ps_ExprContext field. It is only necessary
+ * This initializes the ps_ExprContext field. It is only necessary
* to do this for nodes which use ExecQual or ExecProject
- * because those routines require an econtext. Other nodes that
+ * because those routines require an econtext. Other nodes that
* don't have to evaluate expressions don't need to do this.
* ----------------
*/
void
-ExecAssignExprContext(EState *estate, PlanState *planstate)
+ExecAssignExprContext(EState *estate, PlanState * planstate)
{
planstate->ps_ExprContext = CreateExprContext(estate);
}
@@ -394,7 +395,7 @@ ExecAssignExprContext(EState *estate, PlanState *planstate)
* ----------------
*/
void
-ExecAssignResultType(PlanState *planstate,
+ExecAssignResultType(PlanState * planstate,
TupleDesc tupDesc, bool shouldFree)
{
TupleTableSlot *slot = planstate->ps_ResultTupleSlot;
@@ -407,7 +408,7 @@ ExecAssignResultType(PlanState *planstate,
* ----------------
*/
void
-ExecAssignResultTypeFromOuterPlan(PlanState *planstate)
+ExecAssignResultTypeFromOuterPlan(PlanState * planstate)
{
PlanState *outerPlan;
TupleDesc tupDesc;
@@ -423,7 +424,7 @@ ExecAssignResultTypeFromOuterPlan(PlanState *planstate)
* ----------------
*/
void
-ExecAssignResultTypeFromTL(PlanState *planstate)
+ExecAssignResultTypeFromTL(PlanState * planstate)
{
bool hasoid = false;
TupleDesc tupDesc;
@@ -445,9 +446,9 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
* each of the child plans of the topmost Append plan. So, this is
* ugly but it works, for now ...
*
- * SELECT INTO is also pretty grotty, because we don't yet have the
- * INTO relation's descriptor at this point; we have to look aside
- * at a flag set by InitPlan().
+ * SELECT INTO is also pretty grotty, because we don't yet have the INTO
+ * relation's descriptor at this point; we have to look aside at a
+ * flag set by InitPlan().
*/
if (planstate->state->es_force_oids)
hasoid = true;
@@ -465,9 +466,9 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
}
/*
- * ExecTypeFromTL needs the parse-time representation of the tlist, not
- * a list of ExprStates. This is good because some plan nodes don't
- * bother to set up planstate->targetlist ...
+ * ExecTypeFromTL needs the parse-time representation of the tlist,
+ * not a list of ExprStates. This is good because some plan nodes
+ * don't bother to set up planstate->targetlist ...
*/
tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid);
ExecAssignResultType(planstate, tupDesc, true);
@@ -478,7 +479,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate)
* ----------------
*/
TupleDesc
-ExecGetResultType(PlanState *planstate)
+ExecGetResultType(PlanState * planstate)
{
TupleTableSlot *slot = planstate->ps_ResultTupleSlot;
@@ -524,7 +525,7 @@ ExecBuildProjectionInfo(List *targetList,
* ----------------
*/
void
-ExecAssignProjectionInfo(PlanState *planstate)
+ExecAssignProjectionInfo(PlanState * planstate)
{
planstate->ps_ProjInfo =
ExecBuildProjectionInfo(planstate->targetlist,
@@ -543,7 +544,7 @@ ExecAssignProjectionInfo(PlanState *planstate)
* ----------------
*/
void
-ExecFreeExprContext(PlanState *planstate)
+ExecFreeExprContext(PlanState * planstate)
{
ExprContext *econtext;
@@ -575,7 +576,7 @@ ExecFreeExprContext(PlanState *planstate)
* ----------------
*/
TupleDesc
-ExecGetScanType(ScanState *scanstate)
+ExecGetScanType(ScanState * scanstate)
{
TupleTableSlot *slot = scanstate->ss_ScanTupleSlot;
@@ -587,7 +588,7 @@ ExecGetScanType(ScanState *scanstate)
* ----------------
*/
void
-ExecAssignScanType(ScanState *scanstate,
+ExecAssignScanType(ScanState * scanstate,
TupleDesc tupDesc, bool shouldFree)
{
TupleTableSlot *slot = scanstate->ss_ScanTupleSlot;
@@ -600,7 +601,7 @@ ExecAssignScanType(ScanState *scanstate,
* ----------------
*/
void
-ExecAssignScanTypeFromOuterPlan(ScanState *scanstate)
+ExecAssignScanTypeFromOuterPlan(ScanState * scanstate)
{
PlanState *outerPlan;
TupleDesc tupDesc;
@@ -795,8 +796,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
/*
* We will use the EState's per-tuple context for evaluating
- * predicates and index expressions (creating it if it's not
- * already there).
+ * predicates and index expressions (creating it if it's not already
+ * there).
*/
econtext = GetPerTupleExprContext(estate);
@@ -841,8 +842,8 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
/*
* FormIndexDatum fills in its datum and null parameters with
- * attribute information taken from the given heap tuple.
- * It also computes any expressions needed.
+ * attribute information taken from the given heap tuple. It also
+ * computes any expressions needed.
*/
FormIndexDatum(indexInfo,
heapTuple,
@@ -878,7 +879,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
* Add changed parameters to a plan node's chgParam set
*/
void
-UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
+UpdateChangedParamSet(PlanState * node, Bitmapset * newchg)
{
Bitmapset *parmset;
@@ -887,6 +888,7 @@ UpdateChangedParamSet(PlanState *node, Bitmapset *newchg)
* Don't include anything else into its chgParam set.
*/
parmset = bms_intersect(node->plan->allParam, newchg);
+
/*
* Keep node->chgParam == NULL if there's not actually any members;
* this allows the simplest possible tests in executor node files.
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index c8df7ccb83c..ebc3cbcac37 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.69 2003/07/28 18:33:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.70 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,8 +29,8 @@
/*
- * We have an execution_state record for each query in a function. Each
- * record contains a querytree and plantree for its query. If the query
+ * We have an execution_state record for each query in a function. Each
+ * record contains a querytree and plantree for its query. If the query
* is currently in F_EXEC_RUN state then there's a QueryDesc too.
*/
typedef enum
@@ -83,7 +83,7 @@ static void postquel_start(execution_state *es, SQLFunctionCachePtr fcache);
static TupleTableSlot *postquel_getnext(execution_state *es);
static void postquel_end(execution_state *es);
static void postquel_sub_params(SQLFunctionCachePtr fcache,
- FunctionCallInfo fcinfo);
+ FunctionCallInfo fcinfo);
static Datum postquel_execute(execution_state *es,
FunctionCallInfo fcinfo,
SQLFunctionCachePtr fcache);
@@ -177,11 +177,11 @@ init_sql_fcache(FmgrInfo *finfo)
if (rettype == ANYARRAYOID || rettype == ANYELEMENTOID)
{
rettype = get_fn_expr_rettype(finfo);
- if (rettype == InvalidOid) /* this probably should not happen */
+ if (rettype == InvalidOid) /* this probably should not happen */
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("could not determine actual result type for function declared %s",
- format_type_be(procedureStruct->prorettype))));
+ format_type_be(procedureStruct->prorettype))));
}
/* Now look up the actual result type */
@@ -226,7 +226,7 @@ init_sql_fcache(FmgrInfo *finfo)
fcache->funcSlot = NULL;
/*
- * Parse and plan the queries. We need the argument type info to pass
+ * Parse and plan the queries. We need the argument type info to pass
* to the parser.
*/
nargs = procedureStruct->pronargs;
@@ -234,7 +234,7 @@ init_sql_fcache(FmgrInfo *finfo)
if (nargs > 0)
{
- int argnum;
+ int argnum;
argOidVect = (Oid *) palloc(nargs * sizeof(Oid));
memcpy(argOidVect,
@@ -243,7 +243,7 @@ init_sql_fcache(FmgrInfo *finfo)
/* Resolve any polymorphic argument types */
for (argnum = 0; argnum < nargs; argnum++)
{
- Oid argtype = argOidVect[argnum];
+ Oid argtype = argOidVect[argnum];
if (argtype == ANYARRAYOID || argtype == ANYELEMENTOID)
{
@@ -309,7 +309,7 @@ postquel_getnext(execution_state *es)
/*
* If it's the function's last command, and it's a SELECT, fetch one
- * row at a time so we can return the results. Otherwise just run it
+ * row at a time so we can return the results. Otherwise just run it
* to completion.
*/
if (LAST_POSTQUEL_COMMAND(es) && es->qd->operation == CMD_SELECT)
@@ -655,14 +655,14 @@ sql_exec_error_callback(void *arg)
/*
* Try to determine where in the function we failed. If there is a
* query with non-null QueryDesc, finger it. (We check this rather
- * than looking for F_EXEC_RUN state, so that errors during ExecutorStart
- * or ExecutorEnd are blamed on the appropriate query; see postquel_start
- * and postquel_end.)
+ * than looking for F_EXEC_RUN state, so that errors during
+ * ExecutorStart or ExecutorEnd are blamed on the appropriate query;
+ * see postquel_start and postquel_end.)
*/
if (fcache)
{
execution_state *es;
- int query_num;
+ int query_num;
es = fcache->func_state;
query_num = 1;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index f0537cf5d90..5cf448e772a 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -45,7 +45,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.112 2003/08/01 00:15:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.113 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -173,12 +173,12 @@ typedef struct AggStatePerGroupData
* later input value. Only the first non-NULL input will be
* auto-substituted.
*/
-} AggStatePerGroupData;
+} AggStatePerGroupData;
/*
* To implement hashed aggregation, we need a hashtable that stores a
* representative tuple and an array of AggStatePerGroup structs for each
- * distinct set of GROUP BY column values. We compute the hash key from
+ * distinct set of GROUP BY column values. We compute the hash key from
* the GROUP BY columns.
*/
typedef struct AggHashEntryData *AggHashEntry;
@@ -188,27 +188,27 @@ typedef struct AggHashEntryData
TupleHashEntryData shared; /* common header for hash table entries */
/* per-aggregate transition status array - must be last! */
AggStatePerGroupData pergroup[1]; /* VARIABLE LENGTH ARRAY */
-} AggHashEntryData; /* VARIABLE LENGTH STRUCT */
+} AggHashEntryData; /* VARIABLE LENGTH STRUCT */
static void initialize_aggregates(AggState *aggstate,
- AggStatePerAgg peragg,
- AggStatePerGroup pergroup);
+ AggStatePerAgg peragg,
+ AggStatePerGroup pergroup);
static void advance_transition_function(AggState *aggstate,
- AggStatePerAgg peraggstate,
- AggStatePerGroup pergroupstate,
- Datum newVal, bool isNull);
+ AggStatePerAgg peraggstate,
+ AggStatePerGroup pergroupstate,
+ Datum newVal, bool isNull);
static void advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup);
static void process_sorted_aggregate(AggState *aggstate,
- AggStatePerAgg peraggstate,
- AggStatePerGroup pergroupstate);
+ AggStatePerAgg peraggstate,
+ AggStatePerGroup pergroupstate);
static void finalize_aggregate(AggState *aggstate,
- AggStatePerAgg peraggstate,
- AggStatePerGroup pergroupstate,
- Datum *resultVal, bool *resultIsNull);
+ AggStatePerAgg peraggstate,
+ AggStatePerGroup pergroupstate,
+ Datum *resultVal, bool *resultIsNull);
static void build_hash_table(AggState *aggstate);
static AggHashEntry lookup_hash_entry(AggState *aggstate,
- TupleTableSlot *slot);
+ TupleTableSlot *slot);
static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
static void agg_fill_hash_table(AggState *aggstate);
static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
@@ -231,7 +231,7 @@ initialize_aggregates(AggState *aggstate,
{
AggStatePerAgg peraggstate = &peragg[aggno];
AggStatePerGroup pergroupstate = &pergroup[aggno];
- Aggref *aggref = peraggstate->aggref;
+ Aggref *aggref = peraggstate->aggref;
/*
* Start a fresh sort operation for each DISTINCT aggregate.
@@ -265,18 +265,18 @@ initialize_aggregates(AggState *aggstate,
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(peraggstate->initValue,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
MemoryContextSwitchTo(oldContext);
}
pergroupstate->transValueIsNull = peraggstate->initValueIsNull;
/*
- * If the initial value for the transition state doesn't exist in the
- * pg_aggregate table then we will let the first non-NULL value
- * returned from the outer procNode become the initial value. (This is
- * useful for aggregates like max() and min().) The noTransValue flag
- * signals that we still need to do this.
+ * If the initial value for the transition state doesn't exist in
+ * the pg_aggregate table then we will let the first non-NULL
+ * value returned from the outer procNode become the initial
+ * value. (This is useful for aggregates like max() and min().)
+ * The noTransValue flag signals that we still need to do this.
*/
pergroupstate->noTransValue = peraggstate->initValueIsNull;
}
@@ -299,8 +299,8 @@ advance_transition_function(AggState *aggstate,
if (peraggstate->transfn.fn_strict)
{
/*
- * For a strict transfn, nothing happens at a NULL input
- * tuple; we just keep the prior transValue.
+ * For a strict transfn, nothing happens at a NULL input tuple; we
+ * just keep the prior transValue.
*/
if (isNull)
return;
@@ -314,12 +314,13 @@ advance_transition_function(AggState *aggstate,
* here is OK.)
*
* We must copy the datum into aggcontext if it is pass-by-ref.
- * We do not need to pfree the old transValue, since it's NULL.
+ * We do not need to pfree the old transValue, since it's
+ * NULL.
*/
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(newVal,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
pergroupstate->transValueIsNull = false;
pergroupstate->noTransValue = false;
MemoryContextSwitchTo(oldContext);
@@ -363,12 +364,12 @@ advance_transition_function(AggState *aggstate,
newVal = FunctionCallInvoke(&fcinfo);
/*
- * If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if transfn returned a pointer to its
- * first input, we don't need to do anything.
+ * If pass-by-ref datatype, must copy the new value into aggcontext
+ * and pfree the prior transValue. But if transfn returned a pointer
+ * to its first input, we don't need to do anything.
*/
if (!peraggstate->transtypeByVal &&
- DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
+ DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->transValue))
{
if (!fcinfo.isnull)
{
@@ -388,7 +389,7 @@ advance_transition_function(AggState *aggstate,
}
/*
- * Advance all the aggregates for one input tuple. The input tuple
+ * Advance all the aggregates for one input tuple. The input tuple
* has been stored in tmpcontext->ecxt_scantuple, so that it is accessible
* to ExecEvalExpr. pergroup is the array of per-group structs to use
* (this might be in a hashtable entry).
@@ -467,8 +468,8 @@ process_sorted_aggregate(AggState *aggstate,
continue;
/*
- * Clear and select the working context for evaluation of
- * the equality function and transition function.
+ * Clear and select the working context for evaluation of the
+ * equality function and transition function.
*/
MemoryContextReset(workcontext);
oldContext = MemoryContextSwitchTo(workcontext);
@@ -570,9 +571,9 @@ finalize_aggregate(AggState *aggstate,
static void
build_hash_table(AggState *aggstate)
{
- Agg *node = (Agg *) aggstate->ss.ps.plan;
- MemoryContext tmpmem = aggstate->tmpcontext->ecxt_per_tuple_memory;
- Size entrysize;
+ Agg *node = (Agg *) aggstate->ss.ps.plan;
+ MemoryContext tmpmem = aggstate->tmpcontext->ecxt_per_tuple_memory;
+ Size entrysize;
Assert(node->aggstrategy == AGG_HASHED);
Assert(node->numGroups > 0);
@@ -622,9 +623,9 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *slot)
* the appropriate attribute for each aggregate function use (Aggref
* node) appearing in the targetlist or qual of the node. The number
* of tuples to aggregate over depends on whether grouped or plain
- * aggregation is selected. In grouped aggregation, we produce a result
+ * aggregation is selected. In grouped aggregation, we produce a result
* row for each group; in plain aggregation there's a single result row
- * for the whole query. In either case, the value of each aggregate is
+ * for the whole query. In either case, the value of each aggregate is
* stored in the expression context to be used when ExecProject evaluates
* the result tuple.
*/
@@ -641,9 +642,7 @@ ExecAgg(AggState *node)
return agg_retrieve_hash_table(node);
}
else
- {
return agg_retrieve_direct(node);
- }
}
/*
@@ -736,7 +735,7 @@ agg_retrieve_direct(AggState *aggstate)
firstSlot,
InvalidBuffer,
true);
- aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
+ aggstate->grp_firstTuple = NULL; /* don't keep two pointers */
/* set up for first advance_aggregates call */
tmpcontext->ecxt_scantuple = firstSlot;
@@ -773,7 +772,7 @@ agg_retrieve_direct(AggState *aggstate)
firstSlot->ttc_tupleDescriptor,
node->numCols, node->grpColIdx,
aggstate->eqfunctions,
- tmpcontext->ecxt_per_tuple_memory))
+ tmpcontext->ecxt_per_tuple_memory))
{
/*
* Save the first input tuple of the next group.
@@ -806,15 +805,15 @@ agg_retrieve_direct(AggState *aggstate)
* anything), create a dummy all-nulls input tuple for use by
* ExecProject. 99.44% of the time this is a waste of cycles,
* because ordinarily the projected output tuple's targetlist
- * cannot contain any direct (non-aggregated) references to
- * input columns, so the dummy tuple will not be referenced.
- * However there are special cases where this isn't so --- in
- * particular an UPDATE involving an aggregate will have a
- * targetlist reference to ctid. We need to return a null for
- * ctid in that situation, not coredump.
+ * cannot contain any direct (non-aggregated) references to input
+ * columns, so the dummy tuple will not be referenced. However
+ * there are special cases where this isn't so --- in particular
+ * an UPDATE involving an aggregate will have a targetlist
+ * reference to ctid. We need to return a null for ctid in that
+ * situation, not coredump.
*
- * The values returned for the aggregates will be the initial
- * values of the transition functions.
+ * The values returned for the aggregates will be the initial values
+ * of the transition functions.
*/
if (TupIsNull(firstSlot))
{
@@ -872,7 +871,7 @@ agg_fill_hash_table(AggState *aggstate)
{
PlanState *outerPlan;
ExprContext *tmpcontext;
- AggHashEntry entry;
+ AggHashEntry entry;
TupleTableSlot *outerslot;
/*
@@ -883,8 +882,8 @@ agg_fill_hash_table(AggState *aggstate)
tmpcontext = aggstate->tmpcontext;
/*
- * Process each outer-plan tuple, and then fetch the next one,
- * until we exhaust the outer plan.
+ * Process each outer-plan tuple, and then fetch the next one, until
+ * we exhaust the outer plan.
*/
for (;;)
{
@@ -921,8 +920,8 @@ agg_retrieve_hash_table(AggState *aggstate)
bool *aggnulls;
AggStatePerAgg peragg;
AggStatePerGroup pergroup;
- TupleHashTable hashtable;
- AggHashEntry entry;
+ TupleHashTable hashtable;
+ AggHashEntry entry;
TupleTableSlot *firstSlot;
TupleTableSlot *resultSlot;
int aggno;
@@ -1045,20 +1044,20 @@ ExecInitAgg(Agg *node, EState *estate)
aggstate->hashtable = NULL;
/*
- * Create expression contexts. We need two, one for per-input-tuple
- * processing and one for per-output-tuple processing. We cheat a little
- * by using ExecAssignExprContext() to build both.
+ * Create expression contexts. We need two, one for per-input-tuple
+ * processing and one for per-output-tuple processing. We cheat a
+ * little by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, &aggstate->ss.ps);
aggstate->tmpcontext = aggstate->ss.ps.ps_ExprContext;
ExecAssignExprContext(estate, &aggstate->ss.ps);
/*
- * We also need a long-lived memory context for holding hashtable
- * data structures and transition values. NOTE: the details of what
- * is stored in aggcontext and what is stored in the regular per-query
- * memory context are driven by a simple decision: we want to reset the
- * aggcontext in ExecReScanAgg to recover no-longer-wanted space.
+ * We also need a long-lived memory context for holding hashtable data
+ * structures and transition values. NOTE: the details of what is
+ * stored in aggcontext and what is stored in the regular per-query
+ * memory context are driven by a simple decision: we want to reset
+ * the aggcontext in ExecReScanAgg to recover no-longer-wanted space.
*/
aggstate->aggcontext =
AllocSetContextCreate(CurrentMemoryContext,
@@ -1079,10 +1078,10 @@ ExecInitAgg(Agg *node, EState *estate)
* initialize child expressions
*
* Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs
- * contain other agg calls in their arguments. This would make no sense
- * under SQL semantics anyway (and it's forbidden by the spec). Because
- * that is true, we don't need to worry about evaluating the aggs in any
- * particular order.
+ * contain other agg calls in their arguments. This would make no
+ * sense under SQL semantics anyway (and it's forbidden by the spec).
+ * Because that is true, we don't need to worry about evaluating the
+ * aggs in any particular order.
*/
aggstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->plan.targetlist,
@@ -1116,19 +1115,20 @@ ExecInitAgg(Agg *node, EState *estate)
if (numaggs <= 0)
{
/*
- * This is not an error condition: we might be using the Agg node just
- * to do hash-based grouping. Even in the regular case,
- * constant-expression simplification could optimize away all of the
- * Aggrefs in the targetlist and qual. So keep going, but force local
- * copy of numaggs positive so that palloc()s below don't choke.
+ * This is not an error condition: we might be using the Agg node
+ * just to do hash-based grouping. Even in the regular case,
+ * constant-expression simplification could optimize away all of
+ * the Aggrefs in the targetlist and qual. So keep going, but
+ * force local copy of numaggs positive so that palloc()s below
+ * don't choke.
*/
numaggs = 1;
}
/*
- * If we are grouping, precompute fmgr lookup data for inner loop.
- * We need both equality and hashing functions to do it by hashing,
- * but only equality if not hashing.
+ * If we are grouping, precompute fmgr lookup data for inner loop. We
+ * need both equality and hashing functions to do it by hashing, but
+ * only equality if not hashing.
*/
if (node->numCols > 0)
{
@@ -1146,8 +1146,8 @@ ExecInitAgg(Agg *node, EState *estate)
}
/*
- * Set up aggregate-result storage in the output expr context, and also
- * allocate my private per-agg working storage
+ * Set up aggregate-result storage in the output expr context, and
+ * also allocate my private per-agg working storage
*/
econtext = aggstate->ss.ps.ps_ExprContext;
econtext->ecxt_aggvalues = (Datum *) palloc0(sizeof(Datum) * numaggs);
@@ -1174,8 +1174,8 @@ ExecInitAgg(Agg *node, EState *estate)
* unchanging fields of the per-agg data. We also detect duplicate
* aggregates (for example, "SELECT sum(x) ... HAVING sum(x) > 0").
* When duplicates are detected, we only make an AggStatePerAgg struct
- * for the first one. The clones are simply pointed at the same result
- * entry by giving them duplicate aggno values.
+ * for the first one. The clones are simply pointed at the same
+ * result entry by giving them duplicate aggno values.
*/
aggno = -1;
foreach(alist, aggstate->aggs)
@@ -1425,9 +1425,9 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt)
if (((Agg *) node->ss.ps.plan)->aggstrategy == AGG_HASHED)
{
/*
- * In the hashed case, if we haven't yet built the hash table
- * then we can just return; nothing done yet, so nothing to undo.
- * If subnode's chgParam is not NULL then it will be re-scanned by
+ * In the hashed case, if we haven't yet built the hash table then
+ * we can just return; nothing done yet, so nothing to undo. If
+ * subnode's chgParam is not NULL then it will be re-scanned by
* ExecProcNode, else no reason to re-scan it at all.
*/
if (!node->table_filled)
diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c
index e79d37fd857..7fc8caac2a9 100644
--- a/src/backend/executor/nodeAppend.c
+++ b/src/backend/executor/nodeAppend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.52 2003/02/09 00:30:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.53 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -355,7 +355,7 @@ ExecReScanAppend(AppendState *node, ExprContext *exprCtxt)
for (i = node->as_firstplan; i <= node->as_lastplan; i++)
{
- PlanState *subnode = node->appendplans[i];
+ PlanState *subnode = node->appendplans[i];
/*
* ExecReScan doesn't know about my subplans, so I have to do
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 69053708cda..a4eb9065c06 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.77 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.78 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -244,7 +244,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
i = 0;
foreach(ho, hashOperators)
{
- Oid hashfn;
+ Oid hashfn;
hashfn = get_op_hash_function(lfirsto(ho));
if (!OidIsValid(hashfn))
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index bc3ecdfeeda..a45e007aff2 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.53 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.54 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,8 +22,8 @@
#include "utils/memutils.h"
-static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState *node,
- HashJoinState *hjstate);
+static TupleTableSlot *ExecHashJoinOuterGetTuple(PlanState * node,
+ HashJoinState *hjstate);
static TupleTableSlot *ExecHashJoinGetSavedTuple(HashJoinState *hjstate,
BufFile *file,
TupleTableSlot *tupleSlot);
@@ -94,10 +94,10 @@ ExecHashJoin(HashJoinState *node)
/*
* If we're doing an IN join, we want to return at most one row per
- * outer tuple; so we can stop scanning the inner scan if we matched on
- * the previous try.
+ * outer tuple; so we can stop scanning the inner scan if we matched
+ * on the previous try.
*/
- if (node->js.jointype == JOIN_IN &&
+ if (node->js.jointype == JOIN_IN &&
node->hj_MatchedOuter)
node->hj_NeedNewOuter = true;
@@ -244,7 +244,10 @@ ExecHashJoin(HashJoinState *node)
}
}
- /* If we didn't return a tuple, may need to set NeedNewOuter */
+ /*
+ * If we didn't return a tuple, may need to set
+ * NeedNewOuter
+ */
if (node->js.jointype == JOIN_IN)
{
node->hj_NeedNewOuter = true;
@@ -365,7 +368,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
case JOIN_LEFT:
hjstate->hj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(hjstate)));
+ ExecGetResultType(innerPlanState(hjstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
@@ -407,10 +410,10 @@ ExecInitHashJoin(HashJoin *node, EState *estate)
hjstate->hj_CurTuple = (HashJoinTuple) NULL;
/*
- * The planner already made a list of the inner hashkeys for us,
- * but we also need a list of the outer hashkeys, as well as a list
- * of the hash operator OIDs. Both lists of exprs must then be prepared
- * for execution.
+ * The planner already made a list of the inner hashkeys for us, but
+ * we also need a list of the outer hashkeys, as well as a list of the
+ * hash operator OIDs. Both lists of exprs must then be prepared for
+ * execution.
*/
hjstate->hj_InnerHashKeys = (List *)
ExecInitExpr((Expr *) hashNode->hashkeys,
@@ -496,7 +499,7 @@ ExecEndHashJoin(HashJoinState *node)
*/
static TupleTableSlot *
-ExecHashJoinOuterGetTuple(PlanState *node, HashJoinState *hjstate)
+ExecHashJoinOuterGetTuple(PlanState * node, HashJoinState *hjstate)
{
HashJoinTable hashtable = hjstate->hj_HashTable;
int curbatch = hashtable->curbatch;
@@ -701,11 +704,11 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
Assert(node->hj_HashTable != NULL);
/*
- * In a multi-batch join, we currently have to do rescans the hard way,
- * primarily because batch temp files may have already been released.
- * But if it's a single-batch join, and there is no parameter change
- * for the inner subnode, then we can just re-use the existing hash
- * table without rebuilding it.
+ * In a multi-batch join, we currently have to do rescans the hard
+ * way, primarily because batch temp files may have already been
+ * released. But if it's a single-batch join, and there is no
+ * parameter change for the inner subnode, then we can just re-use the
+ * existing hash table without rebuilding it.
*/
if (node->hj_HashTable->nbatch == 0 &&
((PlanState *) node)->righttree->chgParam == NULL)
@@ -718,6 +721,7 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
node->hj_hashdone = false;
ExecHashTableDestroy(node->hj_HashTable);
node->hj_HashTable = NULL;
+
/*
* if chgParam of subnode is not null then plan will be re-scanned
* by first ExecProcNode.
@@ -736,8 +740,8 @@ ExecReScanHashJoin(HashJoinState *node, ExprContext *exprCtxt)
node->hj_MatchedOuter = false;
/*
- * if chgParam of subnode is not null then plan will be re-scanned
- * by first ExecProcNode.
+ * if chgParam of subnode is not null then plan will be re-scanned by
+ * first ExecProcNode.
*/
if (((PlanState *) node)->lefttree->chgParam == NULL)
ExecReScan(((PlanState *) node)->lefttree, exprCtxt);
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 017a378f9da..d01d4cfa7c7 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.80 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.81 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -290,7 +290,8 @@ ExecIndexReScan(IndexScanState *node, ExprContext *exprCtxt)
int j;
estate = node->ss.ps.state;
- econtext = node->iss_RuntimeContext; /* context for runtime keys */
+ econtext = node->iss_RuntimeContext; /* context for runtime
+ * keys */
numIndices = node->iss_NumIndices;
scanDescs = node->iss_ScanDescs;
scanKeys = node->iss_ScanKeys;
@@ -882,7 +883,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate)
reloid)));
indexstate->ss.ss_currentRelation = currentRelation;
- indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */
+ indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */
/*
* get the scan type from the relation descriptor.
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index 4b1145e258c..7477bd43042 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.15 2003/07/21 17:05:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.16 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,17 +55,21 @@ ExecLimit(LimitState *node)
switch (node->lstate)
{
case LIMIT_INITIAL:
+
/*
* If backwards scan, just return NULL without changing state.
*/
if (!ScanDirectionIsForward(direction))
return NULL;
+
/*
- * First call for this scan, so compute limit/offset. (We can't do
- * this any earlier, because parameters from upper nodes may not
- * be set until now.) This also sets position = 0.
+ * First call for this scan, so compute limit/offset. (We
+ * can't do this any earlier, because parameters from upper
+ * nodes may not be set until now.) This also sets position =
+ * 0.
*/
recompute_limits(node);
+
/*
* Check for empty window; if so, treat like empty subplan.
*/
@@ -74,6 +78,7 @@ ExecLimit(LimitState *node)
node->lstate = LIMIT_EMPTY;
return NULL;
}
+
/*
* Fetch rows from subplan until we reach position > offset.
*/
@@ -83,8 +88,8 @@ ExecLimit(LimitState *node)
if (TupIsNull(slot))
{
/*
- * The subplan returns too few tuples for us to produce
- * any output at all.
+ * The subplan returns too few tuples for us to
+ * produce any output at all.
*/
node->lstate = LIMIT_EMPTY;
return NULL;
@@ -93,6 +98,7 @@ ExecLimit(LimitState *node)
if (++node->position > node->offset)
break;
}
+
/*
* Okay, we have the first tuple of the window.
*/
@@ -100,9 +106,10 @@ ExecLimit(LimitState *node)
break;
case LIMIT_EMPTY:
+
/*
* The subplan is known to return no tuples (or not more than
- * OFFSET tuples, in general). So we return no tuples.
+ * OFFSET tuples, in general). So we return no tuples.
*/
return NULL;
@@ -113,7 +120,8 @@ ExecLimit(LimitState *node)
* Forwards scan, so check for stepping off end of window.
* If we are at the end of the window, return NULL without
* advancing the subplan or the position variable; but
- * change the state machine state to record having done so.
+ * change the state machine state to record having done
+ * so.
*/
if (!node->noCount &&
node->position >= node->offset + node->count)
@@ -121,6 +129,7 @@ ExecLimit(LimitState *node)
node->lstate = LIMIT_WINDOWEND;
return NULL;
}
+
/*
* Get next tuple from subplan, if any.
*/
@@ -136,14 +145,16 @@ ExecLimit(LimitState *node)
else
{
/*
- * Backwards scan, so check for stepping off start of window.
- * As above, change only state-machine status if so.
+ * Backwards scan, so check for stepping off start of
+ * window. As above, change only state-machine status if
+ * so.
*/
if (node->position <= node->offset + 1)
{
node->lstate = LIMIT_WINDOWSTART;
return NULL;
}
+
/*
* Get previous tuple from subplan; there should be one!
*/
@@ -158,9 +169,11 @@ ExecLimit(LimitState *node)
case LIMIT_SUBPLANEOF:
if (ScanDirectionIsForward(direction))
return NULL;
+
/*
* Backing up from subplan EOF, so re-fetch previous tuple;
- * there should be one! Note previous tuple must be in window.
+ * there should be one! Note previous tuple must be in
+ * window.
*/
slot = ExecProcNode(outerPlan);
if (TupIsNull(slot))
@@ -173,9 +186,10 @@ ExecLimit(LimitState *node)
case LIMIT_WINDOWEND:
if (ScanDirectionIsForward(direction))
return NULL;
+
/*
- * Backing up from window end: simply re-return the last
- * tuple fetched from the subplan.
+ * Backing up from window end: simply re-return the last tuple
+ * fetched from the subplan.
*/
slot = node->subSlot;
node->lstate = LIMIT_INWINDOW;
@@ -185,6 +199,7 @@ ExecLimit(LimitState *node)
case LIMIT_WINDOWSTART:
if (!ScanDirectionIsForward(direction))
return NULL;
+
/*
* Advancing after having backed off window start: simply
* re-return the last tuple fetched from the subplan.
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 39968c65e0c..afd08e80945 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.42 2003/03/27 16:51:27 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.43 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,15 +79,15 @@ ExecMaterial(MaterialState *node)
{
/*
* When reversing direction at tuplestore EOF, the first
- * getheaptuple call will fetch the last-added tuple; but
- * we want to return the one before that, if possible.
- * So do an extra fetch.
+ * getheaptuple call will fetch the last-added tuple; but we
+ * want to return the one before that, if possible. So do an
+ * extra fetch.
*/
heapTuple = tuplestore_getheaptuple(tuplestorestate,
forward,
&should_free);
if (heapTuple == NULL)
- return NULL; /* the tuplestore must be empty */
+ return NULL; /* the tuplestore must be empty */
if (should_free)
heap_freetuple(heapTuple);
}
@@ -129,10 +129,11 @@ ExecMaterial(MaterialState *node)
}
heapTuple = outerslot->val;
should_free = false;
+
/*
* Append returned tuple to tuplestore, too. NOTE: because the
- * tuplestore is certainly in EOF state, its read position will move
- * forward over the added tuple. This is what we want.
+ * tuplestore is certainly in EOF state, its read position will
+ * move forward over the added tuple. This is what we want.
*/
tuplestore_puttuple(tuplestorestate, (void *) heapTuple);
}
@@ -293,8 +294,8 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
* If subnode is to be rescanned then we forget previous stored
* results; we have to re-read the subplan and re-store.
*
- * Otherwise we can just rewind and rescan the stored output.
- * The state of the subnode does not change.
+ * Otherwise we can just rewind and rescan the stored output. The state
+ * of the subnode does not change.
*/
if (((PlanState *) node)->lefttree->chgParam != NULL)
{
@@ -303,7 +304,5 @@ ExecMaterialReScan(MaterialState *node, ExprContext *exprCtxt)
node->eof_underlying = false;
}
else
- {
tuplestore_rescan((Tuplestorestate *) node->tuplestorestate);
- }
}
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 57249d7d970..081ce24cb19 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.58 2003/07/21 17:05:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.59 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,7 +102,7 @@ static bool MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
*/
static void
MJFormSkipQuals(List *qualList, List **ltQuals, List **gtQuals,
- PlanState *parent)
+ PlanState * parent)
{
List *ltexprs,
*gtexprs,
@@ -358,9 +358,9 @@ ExecMergeJoin(MergeJoinState *node)
List *otherqual;
bool qualResult;
bool compareResult;
- PlanState *innerPlan;
+ PlanState *innerPlan;
TupleTableSlot *innerTupleSlot;
- PlanState *outerPlan;
+ PlanState *outerPlan;
TupleTableSlot *outerTupleSlot;
ExprContext *econtext;
bool doFillOuter;
@@ -644,7 +644,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedInner = true; /* do it only once */
+ node->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -720,7 +720,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedOuter = true; /* do it only once */
+ node->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1004,7 +1004,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedOuter = true; /* do it only once */
+ node->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1181,7 +1181,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedInner = true; /* do it only once */
+ node->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -1266,7 +1266,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedInner = true; /* do it only once */
+ node->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -1333,7 +1333,7 @@ ExecMergeJoin(MergeJoinState *node)
* tuple, and return it if it passes the non-join
* quals.
*/
- node->mj_MatchedOuter = true; /* do it only once */
+ node->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1462,12 +1462,12 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
case JOIN_LEFT:
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(mergestate)));
+ ExecGetResultType(innerPlanState(mergestate)));
break;
case JOIN_RIGHT:
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(outerPlanState(mergestate)));
+ ExecGetResultType(outerPlanState(mergestate)));
/*
* Can't handle right or full join with non-nil extra
@@ -1481,10 +1481,10 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate)
case JOIN_FULL:
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(outerPlanState(mergestate)));
+ ExecGetResultType(outerPlanState(mergestate)));
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(mergestate)));
+ ExecGetResultType(innerPlanState(mergestate)));
/*
* Can't handle right or full join with non-nil extra
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index 2b69da95e82..66dbd8c063d 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.33 2003/07/21 17:05:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.34 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,8 +103,8 @@ ExecNestLoop(NestLoopState *node)
/*
* If we're doing an IN join, we want to return at most one row per
- * outer tuple; so we can stop scanning the inner scan if we matched on
- * the previous try.
+ * outer tuple; so we can stop scanning the inner scan if we matched
+ * on the previous try.
*/
if (node->js.jointype == JOIN_IN &&
node->nl_MatchedOuter)
@@ -330,7 +330,7 @@ ExecInitNestLoop(NestLoop *node, EState *estate)
case JOIN_LEFT:
nlstate->nl_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetResultType(innerPlanState(nlstate)));
+ ExecGetResultType(innerPlanState(nlstate)));
break;
default:
elog(ERROR, "unrecognized join type: %d",
@@ -404,7 +404,7 @@ ExecEndNestLoop(NestLoopState *node)
void
ExecReScanNestLoop(NestLoopState *node, ExprContext *exprCtxt)
{
- PlanState *outerPlan = outerPlanState(node);
+ PlanState *outerPlan = outerPlanState(node);
/*
* If outerPlan->chgParam is not null then plan will be automatically
diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c
index 9ea75eb3ce7..194ed192169 100644
--- a/src/backend/executor/nodeResult.c
+++ b/src/backend/executor/nodeResult.c
@@ -34,7 +34,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.24 2002/12/15 16:17:46 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.25 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,7 +64,7 @@ ExecResult(ResultState *node)
{
TupleTableSlot *outerTupleSlot;
TupleTableSlot *resultSlot;
- PlanState *outerPlan;
+ PlanState *outerPlan;
ExprContext *econtext;
ExprDoneCond isDone;
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index 47d2e4eb497..4721fc5bf6a 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.43 2003/02/03 15:07:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.44 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,8 +29,8 @@
#include "executor/nodeSeqscan.h"
#include "parser/parsetree.h"
-static void InitScanRelation(SeqScanState *node, EState *estate);
-static TupleTableSlot *SeqNext(SeqScanState *node);
+static void InitScanRelation(SeqScanState * node, EState *estate);
+static TupleTableSlot *SeqNext(SeqScanState * node);
/* ----------------------------------------------------------------
* Scan Support
@@ -43,7 +43,7 @@ static TupleTableSlot *SeqNext(SeqScanState *node);
* ----------------------------------------------------------------
*/
static TupleTableSlot *
-SeqNext(SeqScanState *node)
+SeqNext(SeqScanState * node)
{
HeapTuple tuple;
HeapScanDesc scandesc;
@@ -123,7 +123,7 @@ SeqNext(SeqScanState *node)
*/
TupleTableSlot *
-ExecSeqScan(SeqScanState *node)
+ExecSeqScan(SeqScanState * node)
{
/*
* use SeqNext as access method
@@ -139,7 +139,7 @@ ExecSeqScan(SeqScanState *node)
* ----------------------------------------------------------------
*/
static void
-InitScanRelation(SeqScanState *node, EState *estate)
+InitScanRelation(SeqScanState * node, EState *estate)
{
Index relid;
List *rangeTable;
@@ -252,7 +252,7 @@ ExecCountSlotsSeqScan(SeqScan *node)
* ----------------------------------------------------------------
*/
void
-ExecEndSeqScan(SeqScanState *node)
+ExecEndSeqScan(SeqScanState * node)
{
Relation relation;
HeapScanDesc scanDesc;
@@ -302,7 +302,7 @@ ExecEndSeqScan(SeqScanState *node)
* ----------------------------------------------------------------
*/
void
-ExecSeqReScan(SeqScanState *node, ExprContext *exprCtxt)
+ExecSeqReScan(SeqScanState * node, ExprContext *exprCtxt)
{
EState *estate;
Index scanrelid;
@@ -332,7 +332,7 @@ ExecSeqReScan(SeqScanState *node, ExprContext *exprCtxt)
* ----------------------------------------------------------------
*/
void
-ExecSeqMarkPos(SeqScanState *node)
+ExecSeqMarkPos(SeqScanState * node)
{
HeapScanDesc scan;
@@ -347,7 +347,7 @@ ExecSeqMarkPos(SeqScanState *node)
* ----------------------------------------------------------------
*/
void
-ExecSeqRestrPos(SeqScanState *node)
+ExecSeqRestrPos(SeqScanState * node)
{
HeapScanDesc scan;
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index a42e8f18e26..0fe888c803c 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.51 2003/07/21 17:05:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.52 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,13 +29,13 @@
#include "utils/lsyscache.h"
-static Datum ExecHashSubPlan(SubPlanState *node,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecScanSubPlan(SubPlanState *node,
- ExprContext *econtext,
- bool *isNull);
-static void buildSubPlanHash(SubPlanState *node);
+static Datum ExecHashSubPlan(SubPlanState * node,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecScanSubPlan(SubPlanState * node,
+ ExprContext *econtext,
+ bool *isNull);
+static void buildSubPlanHash(SubPlanState * node);
static bool findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot);
static bool tupleAllNulls(HeapTuple tuple);
@@ -45,11 +45,11 @@ static bool tupleAllNulls(HeapTuple tuple);
* ----------------------------------------------------------------
*/
Datum
-ExecSubPlan(SubPlanState *node,
+ExecSubPlan(SubPlanState * node,
ExprContext *econtext,
bool *isNull)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
if (subplan->setParam != NIL)
elog(ERROR, "cannot set parent params from subquery");
@@ -64,11 +64,11 @@ ExecSubPlan(SubPlanState *node,
* ExecHashSubPlan: store subselect result in an in-memory hash table
*/
static Datum
-ExecHashSubPlan(SubPlanState *node,
+ExecHashSubPlan(SubPlanState * node,
ExprContext *econtext,
bool *isNull)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
PlanState *planstate = node->planstate;
ExprContext *innerecontext = node->innerecontext;
TupleTableSlot *slot;
@@ -79,8 +79,8 @@ ExecHashSubPlan(SubPlanState *node,
elog(ERROR, "hashed subplan with direct correlation not supported");
/*
- * If first time through or we need to rescan the subplan, build
- * the hash table.
+ * If first time through or we need to rescan the subplan, build the
+ * hash table.
*/
if (node->hashtable == NULL || planstate->chgParam != NULL)
buildSubPlanHash(node);
@@ -94,19 +94,19 @@ ExecHashSubPlan(SubPlanState *node,
return BoolGetDatum(false);
/*
- * Evaluate lefthand expressions and form a projection tuple.
- * First we have to set the econtext to use (hack alert!).
+ * Evaluate lefthand expressions and form a projection tuple. First we
+ * have to set the econtext to use (hack alert!).
*/
node->projLeft->pi_exprContext = econtext;
slot = ExecProject(node->projLeft, NULL);
tup = slot->val;
/*
- * Note: because we are typically called in a per-tuple context,
- * we have to explicitly clear the projected tuple before returning.
- * Otherwise, we'll have a double-free situation: the per-tuple context
- * will probably be reset before we're called again, and then the tuple
- * slot will think it still needs to free the tuple.
+ * Note: because we are typically called in a per-tuple context, we
+ * have to explicitly clear the projected tuple before returning.
+ * Otherwise, we'll have a double-free situation: the per-tuple
+ * context will probably be reset before we're called again, and then
+ * the tuple slot will think it still needs to free the tuple.
*/
/*
@@ -116,20 +116,20 @@ ExecHashSubPlan(SubPlanState *node,
ResetExprContext(innerecontext);
/*
- * If the LHS is all non-null, probe for an exact match in the
- * main hash table. If we find one, the result is TRUE.
- * Otherwise, scan the partly-null table to see if there are any
- * rows that aren't provably unequal to the LHS; if so, the result
- * is UNKNOWN. (We skip that part if we don't care about UNKNOWN.)
- * Otherwise, the result is FALSE.
+ * If the LHS is all non-null, probe for an exact match in the main
+ * hash table. If we find one, the result is TRUE. Otherwise, scan
+ * the partly-null table to see if there are any rows that aren't
+ * provably unequal to the LHS; if so, the result is UNKNOWN. (We
+ * skip that part if we don't care about UNKNOWN.) Otherwise, the
+ * result is FALSE.
*
- * Note: the reason we can avoid a full scan of the main hash table
- * is that the combining operators are assumed never to yield NULL
- * when both inputs are non-null. If they were to do so, we might
- * need to produce UNKNOWN instead of FALSE because of an UNKNOWN
- * result in comparing the LHS to some main-table entry --- which
- * is a comparison we will not even make, unless there's a chance
- * match of hash keys.
+ * Note: the reason we can avoid a full scan of the main hash table is
+ * that the combining operators are assumed never to yield NULL when
+ * both inputs are non-null. If they were to do so, we might need to
+ * produce UNKNOWN instead of FALSE because of an UNKNOWN result in
+ * comparing the LHS to some main-table entry --- which is a
+ * comparison we will not even make, unless there's a chance match of
+ * hash keys.
*/
if (HeapTupleNoNulls(tup))
{
@@ -151,14 +151,14 @@ ExecHashSubPlan(SubPlanState *node,
}
/*
- * When the LHS is partly or wholly NULL, we can never return TRUE.
- * If we don't care about UNKNOWN, just return FALSE. Otherwise,
- * if the LHS is wholly NULL, immediately return UNKNOWN. (Since the
- * combining operators are strict, the result could only be FALSE if the
- * sub-select were empty, but we already handled that case.) Otherwise,
- * we must scan both the main and partly-null tables to see if there are
- * any rows that aren't provably unequal to the LHS; if so, the result is
- * UNKNOWN. Otherwise, the result is FALSE.
+ * When the LHS is partly or wholly NULL, we can never return TRUE. If
+ * we don't care about UNKNOWN, just return FALSE. Otherwise, if the
+ * LHS is wholly NULL, immediately return UNKNOWN. (Since the
+ * combining operators are strict, the result could only be FALSE if
+ * the sub-select were empty, but we already handled that case.)
+ * Otherwise, we must scan both the main and partly-null tables to see
+ * if there are any rows that aren't provably unequal to the LHS; if
+ * so, the result is UNKNOWN. Otherwise, the result is FALSE.
*/
if (node->hashnulls == NULL)
{
@@ -194,11 +194,11 @@ ExecHashSubPlan(SubPlanState *node,
* ExecScanSubPlan: default case where we have to rescan subplan each time
*/
static Datum
-ExecScanSubPlan(SubPlanState *node,
+ExecScanSubPlan(SubPlanState * node,
ExprContext *econtext,
bool *isNull)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
PlanState *planstate = node->planstate;
SubLinkType subLinkType = subplan->subLinkType;
bool useOr = subplan->useOr;
@@ -218,14 +218,14 @@ ExecScanSubPlan(SubPlanState *node,
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
/*
- * Set Params of this plan from parent plan correlation values.
- * (Any calculation we have to do is done in the parent econtext,
- * since the Param values don't need to have per-query lifetime.)
+ * Set Params of this plan from parent plan correlation values. (Any
+ * calculation we have to do is done in the parent econtext, since the
+ * Param values don't need to have per-query lifetime.)
*/
pvar = node->args;
foreach(lst, subplan->parParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
Assert(pvar != NIL);
@@ -241,23 +241,24 @@ ExecScanSubPlan(SubPlanState *node,
ExecReScan(planstate, NULL);
/*
- * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the result
- * is boolean as are the results of the combining operators. We combine
- * results within a tuple (if there are multiple columns) using OR
- * semantics if "useOr" is true, AND semantics if not. We then combine
- * results across tuples (if the subplan produces more than one) using OR
- * semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK.
- * (MULTIEXPR_SUBLINK doesn't allow multiple tuples from the subplan.)
- * NULL results from the combining operators are handled according to
- * the usual SQL semantics for OR and AND. The result for no input
- * tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
- * MULTIEXPR_SUBLINK.
+ * For all sublink types except EXPR_SUBLINK and ARRAY_SUBLINK, the
+ * result is boolean as are the results of the combining operators. We
+ * combine results within a tuple (if there are multiple columns)
+ * using OR semantics if "useOr" is true, AND semantics if not. We
+ * then combine results across tuples (if the subplan produces more
+ * than one) using OR semantics for ANY_SUBLINK or AND semantics for
+ * ALL_SUBLINK. (MULTIEXPR_SUBLINK doesn't allow multiple tuples from
+ * the subplan.) NULL results from the combining operators are handled
+ * according to the usual SQL semantics for OR and AND. The result
+ * for no input tuples is FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK,
+ * NULL for MULTIEXPR_SUBLINK.
*
* For EXPR_SUBLINK we require the subplan to produce no more than one
- * tuple, else an error is raised. For ARRAY_SUBLINK we allow the subplan
- * to produce more than one tuple. In either case, if zero tuples are
- * produced, we return NULL. Assuming we get a tuple, we just use its
- * first column (there can be only one non-junk column in this case).
+ * tuple, else an error is raised. For ARRAY_SUBLINK we allow the
+ * subplan to produce more than one tuple. In either case, if zero
+ * tuples are produced, we return NULL. Assuming we get a tuple, we
+ * just use its first column (there can be only one non-junk column in
+ * this case).
*/
result = BoolGetDatum(subLinkType == ALL_SUBLINK);
*isNull = false;
@@ -311,8 +312,8 @@ ExecScanSubPlan(SubPlanState *node,
if (subLinkType == ARRAY_SUBLINK)
{
- Datum dvalue;
- bool disnull;
+ Datum dvalue;
+ bool disnull;
found = true;
/* stash away current value */
@@ -346,7 +347,8 @@ ExecScanSubPlan(SubPlanState *node,
bool expnull;
/*
- * Load up the Param representing this column of the sub-select.
+ * Load up the Param representing this column of the
+ * sub-select.
*/
prmdata = &(econtext->ecxt_param_exec_vals[paramid]);
Assert(prmdata->execPlan == NULL);
@@ -432,8 +434,8 @@ ExecScanSubPlan(SubPlanState *node,
{
/*
* deal with empty subplan result. result/isNull were previously
- * initialized correctly for all sublink types except EXPR, ARRAY, and
- * MULTIEXPR; for those, return NULL.
+ * initialized correctly for all sublink types except EXPR, ARRAY,
+ * and MULTIEXPR; for those, return NULL.
*/
if (subLinkType == EXPR_SUBLINK ||
subLinkType == ARRAY_SUBLINK ||
@@ -459,9 +461,9 @@ ExecScanSubPlan(SubPlanState *node,
* buildSubPlanHash: load hash table by scanning subplan output.
*/
static void
-buildSubPlanHash(SubPlanState *node)
+buildSubPlanHash(SubPlanState * node)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
PlanState *planstate = node->planstate;
int ncols = length(node->exprs);
ExprContext *innerecontext = node->innerecontext;
@@ -474,19 +476,19 @@ buildSubPlanHash(SubPlanState *node)
Assert(!subplan->useOr);
/*
- * If we already had any hash tables, destroy 'em; then create
- * empty hash table(s).
+ * If we already had any hash tables, destroy 'em; then create empty
+ * hash table(s).
*
- * If we need to distinguish accurately between FALSE and UNKNOWN
- * (i.e., NULL) results of the IN operation, then we have to store
- * subplan output rows that are partly or wholly NULL. We store such
- * rows in a separate hash table that we expect will be much smaller
- * than the main table. (We can use hashing to eliminate partly-null
- * rows that are not distinct. We keep them separate to minimize the
- * cost of the inevitable full-table searches; see findPartialMatch.)
+ * If we need to distinguish accurately between FALSE and UNKNOWN (i.e.,
+ * NULL) results of the IN operation, then we have to store subplan
+ * output rows that are partly or wholly NULL. We store such rows in
+ * a separate hash table that we expect will be much smaller than the
+ * main table. (We can use hashing to eliminate partly-null rows that
+ * are not distinct. We keep them separate to minimize the cost of
+ * the inevitable full-table searches; see findPartialMatch.)
*
- * If it's not necessary to distinguish FALSE and UNKNOWN, then we
- * don't need to store subplan output rows that contain NULL.
+ * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
+ * need to store subplan output rows that contain NULL.
*/
MemoryContextReset(node->tablecxt);
node->hashtable = NULL;
@@ -529,7 +531,8 @@ buildSubPlanHash(SubPlanState *node)
/*
* We are probably in a short-lived expression-evaluation context.
- * Switch to the child plan's per-query context for calling ExecProcNode.
+ * Switch to the child plan's per-query context for calling
+ * ExecProcNode.
*/
oldcontext = MemoryContextSwitchTo(node->sub_estate->es_query_cxt);
@@ -539,8 +542,9 @@ buildSubPlanHash(SubPlanState *node)
ExecReScan(planstate, NULL);
/*
- * Scan the subplan and load the hash table(s). Note that when there are
- * duplicate rows coming out of the sub-select, only one copy is stored.
+ * Scan the subplan and load the hash table(s). Note that when there
+ * are duplicate rows coming out of the sub-select, only one copy is
+ * stored.
*/
for (slot = ExecProcNode(planstate);
!TupIsNull(slot);
@@ -572,9 +576,9 @@ buildSubPlanHash(SubPlanState *node)
/*
* If result contains any nulls, store separately or not at all.
- * (Since we know the projection tuple has no junk columns, we
- * can just look at the overall hasnull info bit, instead of
- * groveling through the columns.)
+ * (Since we know the projection tuple has no junk columns, we can
+ * just look at the overall hasnull info bit, instead of groveling
+ * through the columns.)
*/
if (HeapTupleNoNulls(tup))
{
@@ -621,7 +625,7 @@ findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot)
HeapTuple tuple = slot->val;
TupleDesc tupdesc = slot->ttc_tupleDescriptor;
TupleHashIterator hashiter;
- TupleHashEntry entry;
+ TupleHashEntry entry;
ResetTupleHashIterator(&hashiter);
while ((entry = ScanTupleHashTable(hashtable, &hashiter)) != NULL)
@@ -643,8 +647,8 @@ findPartialMatch(TupleHashTable hashtable, TupleTableSlot *slot)
static bool
tupleAllNulls(HeapTuple tuple)
{
- int ncols = tuple->t_data->t_natts;
- int i;
+ int ncols = tuple->t_data->t_natts;
+ int i;
for (i = 1; i <= ncols; i++)
{
@@ -659,15 +663,15 @@ tupleAllNulls(HeapTuple tuple)
* ----------------------------------------------------------------
*/
void
-ExecInitSubPlan(SubPlanState *node, EState *estate)
+ExecInitSubPlan(SubPlanState * node, EState *estate)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
EState *sp_estate;
MemoryContext oldcontext;
/*
- * Do access checking on the rangetable entries in the subquery.
- * Here, we assume the subquery is a SELECT.
+ * Do access checking on the rangetable entries in the subquery. Here,
+ * we assume the subquery is a SELECT.
*/
ExecCheckRTPerms(subplan->rtable, CMD_SELECT);
@@ -690,9 +694,9 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
* create an EState for the subplan
*
* The subquery needs its own EState because it has its own rangetable.
- * It shares our Param ID space, however. XXX if rangetable access were
- * done differently, the subquery could share our EState, which would
- * eliminate some thrashing about in this module...
+ * It shares our Param ID space, however. XXX if rangetable access
+ * were done differently, the subquery could share our EState, which
+ * would eliminate some thrashing about in this module...
*/
sp_estate = CreateExecutorState();
node->sub_estate = sp_estate;
@@ -721,9 +725,9 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
* to set params for parent plan then mark parameters as needing
* evaluation.
*
- * Note that in the case of un-correlated subqueries we don't care
- * about setting parent->chgParam here: indices take care about
- * it, for others - it doesn't matter...
+ * Note that in the case of un-correlated subqueries we don't care about
+ * setting parent->chgParam here: indices take care about it, for
+ * others - it doesn't matter...
*/
if (subplan->setParam != NIL)
{
@@ -731,7 +735,7 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
foreach(lst, subplan->setParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(estate->es_param_exec_vals[paramid]);
prm->execPlan = node;
@@ -744,8 +748,8 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
*/
if (subplan->useHashTable)
{
- int ncols,
- i;
+ int ncols,
+ i;
TupleDesc tupDesc;
TupleTable tupTable;
TupleTableSlot *slot;
@@ -768,15 +772,16 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
ncols = length(node->exprs);
node->keyColIdx = (AttrNumber *) palloc(ncols * sizeof(AttrNumber));
for (i = 0; i < ncols; i++)
- node->keyColIdx[i] = i+1;
+ node->keyColIdx[i] = i + 1;
+
/*
* We use ExecProject to evaluate the lefthand and righthand
* expression lists and form tuples. (You might think that we
* could use the sub-select's output tuples directly, but that is
* not the case if we had to insert any run-time coercions of the
* sub-select's output datatypes; anyway this avoids storing any
- * resjunk columns that might be in the sub-select's output.)
- * Run through the combining expressions to build tlists for the
+ * resjunk columns that might be in the sub-select's output.) Run
+ * through the combining expressions to build tlists for the
* lefthand and righthand sides. We need both the ExprState list
* (for ExecProject) and the underlying parse Exprs (for
* ExecTypeFromTL).
@@ -791,7 +796,7 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
i = 1;
foreach(lexpr, node->exprs)
{
- FuncExprState *fstate = (FuncExprState *) lfirst(lexpr);
+ FuncExprState *fstate = (FuncExprState *) lfirst(lexpr);
OpExpr *opexpr = (OpExpr *) fstate->xprstate.expr;
ExprState *exstate;
Expr *expr;
@@ -834,34 +839,34 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
rightptlist = lappend(rightptlist, tle);
/* Lookup the combining function */
- fmgr_info(opexpr->opfuncid, &node->eqfunctions[i-1]);
- node->eqfunctions[i-1].fn_expr = (Node *) opexpr;
+ fmgr_info(opexpr->opfuncid, &node->eqfunctions[i - 1]);
+ node->eqfunctions[i - 1].fn_expr = (Node *) opexpr;
/* Lookup the associated hash function */
hashfn = get_op_hash_function(opexpr->opno);
if (!OidIsValid(hashfn))
elog(ERROR, "could not find hash function for hash operator %u",
opexpr->opno);
- fmgr_info(hashfn, &node->hashfunctions[i-1]);
+ fmgr_info(hashfn, &node->hashfunctions[i - 1]);
i++;
}
/*
- * Create a tupletable to hold these tuples. (Note: we never bother
- * to free the tupletable explicitly; that's okay because it will
- * never store raw disk tuples that might have associated buffer
- * pins. The only resource involved is memory, which will be
- * cleaned up by freeing the query context.)
+ * Create a tupletable to hold these tuples. (Note: we never
+ * bother to free the tupletable explicitly; that's okay because
+ * it will never store raw disk tuples that might have associated
+ * buffer pins. The only resource involved is memory, which will
+ * be cleaned up by freeing the query context.)
*/
tupTable = ExecCreateTupleTable(2);
/*
* Construct tupdescs, slots and projection nodes for left and
- * right sides. The lefthand expressions will be evaluated in
- * the parent plan node's exprcontext, which we don't have access
- * to here. Fortunately we can just pass NULL for now and fill it
- * in later (hack alert!). The righthand expressions will be
+ * right sides. The lefthand expressions will be evaluated in the
+ * parent plan node's exprcontext, which we don't have access to
+ * here. Fortunately we can just pass NULL for now and fill it in
+ * later (hack alert!). The righthand expressions will be
* evaluated in our own innerecontext.
*/
tupDesc = ExecTypeFromTL(leftptlist, false);
@@ -894,11 +899,11 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
* ----------------------------------------------------------------
*/
void
-ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
+ExecSetParamPlan(SubPlanState * node, ExprContext *econtext)
{
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
PlanState *planstate = node->planstate;
- SubLinkType subLinkType = subplan->subLinkType;
+ SubLinkType subLinkType = subplan->subLinkType;
MemoryContext oldcontext;
TupleTableSlot *slot;
List *lst;
@@ -928,7 +933,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
if (subLinkType == EXISTS_SUBLINK)
{
/* There can be only one param... */
- int paramid = lfirsti(subplan->setParam);
+ int paramid = lfirsti(subplan->setParam);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
prm->execPlan = NULL;
@@ -940,8 +945,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
if (subLinkType == ARRAY_SUBLINK)
{
- Datum dvalue;
- bool disnull;
+ Datum dvalue;
+ bool disnull;
found = true;
/* stash away current value */
@@ -963,8 +968,8 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
found = true;
/*
- * We need to copy the subplan's tuple into our own context,
- * in case any of the params are pass-by-ref type --- the pointers
+ * We need to copy the subplan's tuple into our own context, in
+ * case any of the params are pass-by-ref type --- the pointers
* stored in the param structs will point at this copied tuple!
* node->curTuple keeps track of the copied tuple for eventual
* freeing.
@@ -981,7 +986,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
*/
foreach(lst, subplan->setParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
prm->execPlan = NULL;
@@ -995,7 +1000,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
if (subLinkType == EXISTS_SUBLINK)
{
/* There can be only one param... */
- int paramid = lfirsti(subplan->setParam);
+ int paramid = lfirsti(subplan->setParam);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
prm->execPlan = NULL;
@@ -1006,7 +1011,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
{
foreach(lst, subplan->setParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
prm->execPlan = NULL;
@@ -1018,7 +1023,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
else if (subLinkType == ARRAY_SUBLINK)
{
/* There can be only one param... */
- int paramid = lfirsti(subplan->setParam);
+ int paramid = lfirsti(subplan->setParam);
ParamExecData *prm = &(econtext->ecxt_param_exec_vals[paramid]);
Assert(astate != NULL);
@@ -1036,7 +1041,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext)
* ----------------------------------------------------------------
*/
void
-ExecEndSubPlan(SubPlanState *node)
+ExecEndSubPlan(SubPlanState * node)
{
if (node->needShutdown)
{
@@ -1056,10 +1061,10 @@ ExecEndSubPlan(SubPlanState *node)
* Mark an initplan as needing recalculation
*/
void
-ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent)
+ExecReScanSetParamPlan(SubPlanState * node, PlanState * parent)
{
PlanState *planstate = node->planstate;
- SubPlan *subplan = (SubPlan *) node->xprstate.expr;
+ SubPlan *subplan = (SubPlan *) node->xprstate.expr;
EState *estate = parent->state;
List *lst;
@@ -1080,7 +1085,7 @@ ExecReScanSetParamPlan(SubPlanState *node, PlanState *parent)
*/
foreach(lst, subplan->setParam)
{
- int paramid = lfirsti(lst);
+ int paramid = lfirsti(lst);
ParamExecData *prm = &(estate->es_param_exec_vals[paramid]);
prm->execPlan = node;
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index ba4804fcebb..deec07ae6bb 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.18 2003/02/09 00:30:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.19 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -160,10 +160,11 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate)
Assert(rte->rtekind == RTE_SUBQUERY);
/*
- * The subquery needs its own EState because it has its own rangetable.
- * It shares our Param ID space, however. XXX if rangetable access were
- * done differently, the subquery could share our EState, which would
- * eliminate some thrashing about in this module...
+ * The subquery needs its own EState because it has its own
+ * rangetable. It shares our Param ID space, however. XXX if
+ * rangetable access were done differently, the subquery could share
+ * our EState, which would eliminate some thrashing about in this
+ * module...
*/
sp_estate = CreateExecutorState();
subquerystate->sss_SubEState = sp_estate;
@@ -259,9 +260,9 @@ ExecSubqueryReScan(SubqueryScanState *node, ExprContext *exprCtxt)
/*
* ExecReScan doesn't know about my subplan, so I have to do
- * changed-parameter signaling myself. This is just as well,
- * because the subplan has its own memory context in which its
- * chgParam state lives.
+ * changed-parameter signaling myself. This is just as well, because
+ * the subplan has its own memory context in which its chgParam state
+ * lives.
*/
if (node->ss.ps.chgParam != NULL)
UpdateChangedParamSet(node->subplan, node->ss.ps.chgParam);
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index fb012a5cc40..18e172209d1 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.38 2003/02/02 19:08:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.39 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,9 +57,9 @@ ExecUnique(UniqueState *node)
* now loop, returning only non-duplicate tuples. We assume that the
* tuples arrive in sorted order so we can detect duplicates easily.
*
- * We return the first tuple from each group of duplicates (or the
- * last tuple of each group, when moving backwards). At either end
- * of the subplan, clear priorTuple so that we correctly return the
+ * We return the first tuple from each group of duplicates (or the last
+ * tuple of each group, when moving backwards). At either end of the
+ * subplan, clear priorTuple so that we correctly return the
* first/last tuple when reversing direction.
*/
for (;;)
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 4d554aef1ed..b25f0a79ffa 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.99 2003/07/21 17:05:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/spi.c,v 1.100 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -95,8 +95,8 @@ SPI_connect(void)
/*
* Create memory contexts for this procedure
*
- * XXX it would be better to use PortalContext as the parent context,
- * but we may not be inside a portal (consider deferred-trigger
+ * XXX it would be better to use PortalContext as the parent context, but
+ * we may not be inside a portal (consider deferred-trigger
* execution).
*/
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
@@ -799,7 +799,7 @@ SPI_cursor_open(const char *name, void *plan, Datum *Values, const char *Nulls)
*/
PortalDefineQuery(portal,
NULL, /* unfortunately don't have sourceText */
- "SELECT", /* cursor's query is always a SELECT */
+ "SELECT", /* cursor's query is always a SELECT */
makeList1(queryTree),
makeList1(planTree),
PortalGetHeapMemory(portal));
@@ -1007,9 +1007,9 @@ _SPI_execute(const char *src, int tcount, _SPI_plan *plan)
/*
* Do parse analysis and rule rewrite for each raw parsetree.
*
- * We save the querytrees from each raw parsetree as a separate
- * sublist. This allows _SPI_execute_plan() to know where the
- * boundaries between original queries fall.
+ * We save the querytrees from each raw parsetree as a separate sublist.
+ * This allows _SPI_execute_plan() to know where the boundaries
+ * between original queries fall.
*/
query_list_list = NIL;
plan_list = NIL;
@@ -1136,8 +1136,8 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
foreach(query_list_list_item, query_list_list)
{
- List *query_list = lfirst(query_list_list_item);
- List *query_list_item;
+ List *query_list = lfirst(query_list_list_item);
+ List *query_list_item;
/* Reset state for each original parsetree */
/* (at most one of its querytrees will be marked canSetTag) */
@@ -1148,7 +1148,7 @@ _SPI_execute_plan(_SPI_plan *plan, Datum *Values, const char *Nulls,
foreach(query_list_item, query_list)
{
- Query *queryTree = (Query *) lfirst(query_list_item);
+ Query *queryTree = (Query *) lfirst(query_list_item);
Plan *planTree;
QueryDesc *qdesc;
DestReceiver *dest;
@@ -1190,10 +1190,10 @@ _SPI_pquery(QueryDesc *queryDesc, bool runit, int tcount)
{
case CMD_SELECT:
res = SPI_OK_SELECT;
- if (queryDesc->parsetree->into != NULL) /* select into table */
+ if (queryDesc->parsetree->into != NULL) /* select into table */
{
res = SPI_OK_SELINTO;
- queryDesc->dest = None_Receiver; /* don't output results */
+ queryDesc->dest = None_Receiver; /* don't output results */
}
break;
case CMD_INSERT:
@@ -1351,7 +1351,7 @@ _SPI_checktuples(void)
SPITupleTable *tuptable = _SPI_current->tuptable;
bool failed = false;
- if (tuptable == NULL) /* spi_dest_startup was not called */
+ if (tuptable == NULL) /* spi_dest_startup was not called */
failed = true;
else if (processed != (tuptable->alloced - tuptable->free))
failed = true;
@@ -1372,7 +1372,8 @@ _SPI_copy_plan(_SPI_plan *plan, int location)
parentcxt = _SPI_current->procCxt;
else if (location == _SPI_CPLAN_TOPCXT)
parentcxt = TopMemoryContext;
- else /* (this case not currently used) */
+ else
+/* (this case not currently used) */
parentcxt = CurrentMemoryContext;
/*
diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c
index 3d8479faee2..0989eb3e270 100644
--- a/src/backend/executor/tstoreReceiver.c
+++ b/src/backend/executor/tstoreReceiver.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/tstoreReceiver.c,v 1.6 2003/05/08 18:16:36 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/tstoreReceiver.c,v 1.7 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -21,10 +21,10 @@
typedef struct
{
- DestReceiver pub;
- Tuplestorestate *tstore;
- MemoryContext cxt;
-} TStoreState;
+ DestReceiver pub;
+ Tuplestorestate *tstore;
+ MemoryContext cxt;
+} TStoreState;
/*
diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c
index 03251beed90..9424070e506 100644
--- a/src/backend/lib/stringinfo.c
+++ b/src/backend/lib/stringinfo.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: stringinfo.c,v 1.34 2003/04/24 21:16:43 tgl Exp $
+ * $Id: stringinfo.c,v 1.35 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,7 +62,7 @@ initStringInfo(StringInfo str)
* strcat.
*/
void
-appendStringInfo(StringInfo str, const char *fmt, ...)
+appendStringInfo(StringInfo str, const char *fmt,...)
{
for (;;)
{
@@ -86,7 +86,7 @@ appendStringInfo(StringInfo str, const char *fmt, ...)
* appendStringInfoVA
*
* Attempt to format text data under the control of fmt (an sprintf-style
- * format string) and append it to whatever is already in str. If successful
+ * format string) and append it to whatever is already in str. If successful
* return true; if not (because there's not enough space), return false
* without modifying str. Typically the caller would enlarge str and retry
* on false return --- see appendStringInfo for standard usage pattern.
@@ -113,9 +113,9 @@ appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
return false;
/*
- * Assert check here is to catch buggy vsnprintf that overruns
- * the specified buffer length. Solaris 7 in 64-bit mode is
- * an example of a platform with such a bug.
+ * Assert check here is to catch buggy vsnprintf that overruns the
+ * specified buffer length. Solaris 7 in 64-bit mode is an example of
+ * a platform with such a bug.
*/
#ifdef USE_ASSERT_CHECKING
str->data[str->maxlen - 1] = '\0';
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 9c80651d5fb..9b9ffecbca8 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.108 2003/07/28 06:27:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.109 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -221,8 +221,8 @@ pg_krb5_init(void)
if (retval)
{
ereport(LOG,
- (errmsg("kerberos sname_to_principal(\"%s\") returned error %d",
- PG_KRB_SRVNAM, retval)));
+ (errmsg("kerberos sname_to_principal(\"%s\") returned error %d",
+ PG_KRB_SRVNAM, retval)));
com_err("postgres", retval,
"while getting server principal for service \"%s\"",
PG_KRB_SRVNAM);
@@ -432,7 +432,7 @@ ClientAuthentication(Port *port)
* out the less clueful good guys.
*/
{
- char hostinfo[NI_MAXHOST];
+ char hostinfo[NI_MAXHOST];
getnameinfo_all(&port->raddr.addr, port->raddr.salen,
hostinfo, sizeof(hostinfo),
@@ -441,15 +441,15 @@ ClientAuthentication(Port *port)
#ifdef USE_SSL
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
- hostinfo, port->user_name, port->database_name,
- port->ssl ? gettext("SSL on") : gettext("SSL off"))));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
+ hostinfo, port->user_name, port->database_name,
+ port->ssl ? gettext("SSL on") : gettext("SSL off"))));
#else
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
- hostinfo, port->user_name, port->database_name)));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
+ hostinfo, port->user_name, port->database_name)));
#endif
break;
}
@@ -460,7 +460,7 @@ ClientAuthentication(Port *port)
|| port->laddr.addr.ss_family != AF_INET)
ereport(FATAL,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("kerberos 4 only supports IPv4 connections")));
+ errmsg("kerberos 4 only supports IPv4 connections")));
sendAuthRequest(port, AUTH_REQ_KRB4);
status = pg_krb4_recvauth(port);
break;
@@ -492,7 +492,7 @@ ClientAuthentication(Port *port)
if (setsockopt(port->sock, 0, LOCAL_CREDS, &on, sizeof(on)) < 0)
ereport(FATAL,
(errcode_for_socket_access(),
- errmsg("failed to enable credential receipt: %m")));
+ errmsg("failed to enable credential receipt: %m")));
}
#endif
if (port->raddr.addr.ss_family == AF_UNIX)
@@ -755,22 +755,22 @@ recv_password_packet(Port *port)
if (PG_PROTOCOL_MAJOR(port->proto) >= 3)
{
/* Expect 'p' message type */
- int mtype;
+ int mtype;
mtype = pq_getbyte();
if (mtype != 'p')
{
/*
* If the client just disconnects without offering a password,
- * don't make a log entry. This is legal per protocol spec and
- * in fact commonly done by psql, so complaining just clutters
- * the log.
+ * don't make a log entry. This is legal per protocol spec
+ * and in fact commonly done by psql, so complaining just
+ * clutters the log.
*/
if (mtype != EOF)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("expected password response, got msg type %d",
- mtype)));
+ errmsg("expected password response, got msg type %d",
+ mtype)));
return NULL; /* EOF or bad message type */
}
}
@@ -782,7 +782,7 @@ recv_password_packet(Port *port)
}
initStringInfo(&buf);
- if (pq_getmessage(&buf, 1000)) /* receive password */
+ if (pq_getmessage(&buf, 1000)) /* receive password */
{
/* EOF - pq_getmessage already logged a suitable message */
pfree(buf.data);
@@ -804,7 +804,7 @@ recv_password_packet(Port *port)
(errmsg("received password packet")));
/*
- * Return the received string. Note we do not attempt to do any
+ * Return the received string. Note we do not attempt to do any
* character-set conversion on it; since we don't yet know the
* client's encoding, there wouldn't be much point.
*/
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 00bd01b6aef..5a33712243c 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.66 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.67 2003/08/04 00:43:18 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
@@ -372,7 +372,7 @@ lo_import(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to use server-side lo_import()"),
+ errmsg("must be superuser to use server-side lo_import()"),
errhint("Anyone can use the client-side lo_import() provided by libpq.")));
#endif
@@ -439,7 +439,7 @@ lo_export(PG_FUNCTION_ARGS)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to use server-side lo_export()"),
+ errmsg("must be superuser to use server-side lo_export()"),
errhint("Anyone can use the client-side lo_export() provided by libpq.")));
#endif
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index 0bd4f787414..7089b5077a9 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/be-secure.c,v 1.37 2003/07/27 21:49:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/be-secure.c,v 1.38 2003/08/04 00:43:18 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
@@ -187,7 +187,6 @@ OvOzKGtwcTqO/1wV5gKkzu1ZVswVUQd5Gg8lJicwqRWyyNRczDDoG9jVDxmogKTH\n\
AaqLulO7R8Ifa1SwF2DteSGVtgWEN8gDpN3RBmmPTDngyF2DHb5qmpnznwtFKdTL\n\
KWbuHn491xNO25CQWMtem80uKw+pTnisBRF/454n1Jnhub144YRBoN8CAQI=\n\
-----END DH PARAMETERS-----\n";
-
#endif
/* ------------------------------------------------------------ */
@@ -258,7 +257,7 @@ secure_read(Port *port, void *ptr, size_t len)
#ifdef USE_SSL
if (port->ssl)
{
- rloop:
+rloop:
n = SSL_read(port->ssl, ptr, len);
switch (SSL_get_error(port->ssl, n))
{
@@ -328,7 +327,7 @@ secure_write(Port *port, void *ptr, size_t len)
if (port->ssl->state != SSL_ST_OK)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("SSL failed to send renegotiation request")));
+ errmsg("SSL failed to send renegotiation request")));
port->ssl->state |= SSL_ST_ACCEPT;
SSL_do_handshake(port->ssl);
if (port->ssl->state != SSL_ST_OK)
@@ -338,7 +337,7 @@ secure_write(Port *port, void *ptr, size_t len)
port->count = 0;
}
- wloop:
+wloop:
n = SSL_write(port->ssl, ptr, len);
switch (SSL_get_error(port->ssl, n))
{
@@ -436,7 +435,7 @@ load_dh_file(int keylength)
(codes & DH_CHECK_P_NOT_SAFE_PRIME))
{
elog(LOG,
- "DH error (%s): neither suitable generator or safe prime",
+ "DH error (%s): neither suitable generator or safe prime",
fnbuf);
return NULL;
}
@@ -620,21 +619,21 @@ initialize_SSL(void)
if (!SSL_CTX_use_certificate_file(SSL_context, fnbuf, SSL_FILETYPE_PEM))
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("could not load server certificate file \"%s\": %s",
- fnbuf, SSLerrmessage())));
+ errmsg("could not load server certificate file \"%s\": %s",
+ fnbuf, SSLerrmessage())));
snprintf(fnbuf, sizeof(fnbuf), "%s/server.key", DataDir);
if (stat(fnbuf, &buf) == -1)
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not access private key file \"%s\": %m",
- fnbuf)));
+ errmsg("could not access private key file \"%s\": %m",
+ fnbuf)));
if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) ||
buf.st_uid != getuid())
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("unsafe permissions on private key file \"%s\"",
- fnbuf),
+ errmsg("unsafe permissions on private key file \"%s\"",
+ fnbuf),
errdetail("File must be owned by the database user and must have no permissions for \"group\" or \"other\".")));
if (!SSL_CTX_use_PrivateKey_file(SSL_context, fnbuf, SSL_FILETYPE_PEM))
diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c
index c1443e56744..9629a47aa9e 100644
--- a/src/backend/libpq/crypt.c
+++ b/src/backend/libpq/crypt.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.54 2003/07/22 19:00:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.55 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -119,7 +119,10 @@ md5_crypt_verify(const Port *port, const char *user, char *client_pass)
default:
if (isMD5(shadow_pass))
{
- /* Encrypt user-supplied password to match MD5 in pg_shadow */
+ /*
+ * Encrypt user-supplied password to match MD5 in
+ * pg_shadow
+ */
crypt_client_pass = palloc(MD5_PASSWD_LEN + 1);
if (!EncryptMD5(client_pass,
port->user_name,
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index f9f77cda574..1c42d1a7d43 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/hba.c,v 1.109 2003/08/01 23:24:28 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/hba.c,v 1.110 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -392,7 +392,7 @@ get_group_line(const char *group)
/*
* Lookup a user name in the pg_shadow file
*/
-List **
+List **
get_user_line(const char *user)
{
return (List **) bsearch((void *) user,
@@ -416,7 +416,7 @@ check_group(char *group, char *user)
{
foreach(l, lnext(lnext(*line)))
if (strcmp(lfirst(l), user) == 0)
- return true;
+ return true;
}
return false;
@@ -547,13 +547,14 @@ static void
parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
{
int line_number;
- char *token;
- char *db;
- char *user;
- struct addrinfo *file_ip_addr = NULL, *file_ip_mask = NULL;
- struct addrinfo hints;
- struct sockaddr_storage *mask;
- char *cidr_slash;
+ char *token;
+ char *db;
+ char *user;
+ struct addrinfo *file_ip_addr = NULL,
+ *file_ip_mask = NULL;
+ struct addrinfo hints;
+ struct sockaddr_storage *mask;
+ char *cidr_slash;
int ret;
Assert(line != NIL);
@@ -595,11 +596,11 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
return;
}
else if (strcmp(token, "host") == 0
- || strcmp(token, "hostssl") == 0
- || strcmp(token, "hostnossl") == 0)
+ || strcmp(token, "hostssl") == 0
+ || strcmp(token, "hostnossl") == 0)
{
- if (token[4] == 's') /* "hostssl" */
+ if (token[4] == 's') /* "hostssl" */
{
#ifdef USE_SSL
/* Record does not match if we are not on an SSL connection */
@@ -616,7 +617,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
#endif
}
#ifdef USE_SSL
- else if (token[4] == 'n') /* "hostnossl" */
+ else if (token[4] == 'n') /* "hostnossl" */
{
/* Record does not match if we are on an SSL connection */
if (port->ssl)
@@ -643,7 +644,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
token = lfirst(line);
/* Check if it has a CIDR suffix and if so isolate it */
- cidr_slash = strchr(token,'/');
+ cidr_slash = strchr(token, '/');
if (cidr_slash)
*cidr_slash = '\0';
@@ -698,7 +699,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
if (ret || !file_ip_mask)
goto hba_syntax;
- mask = (struct sockaddr_storage *)file_ip_mask->ai_addr;
+ mask = (struct sockaddr_storage *) file_ip_mask->ai_addr;
if (file_ip_addr->ai_family != mask->ss_family)
goto hba_syntax;
@@ -714,7 +715,7 @@ parse_hba(List *line, hbaPort *port, bool *found_p, bool *error_p)
/* Must meet network restrictions */
if (!rangeSockAddr(&port->raddr.addr,
- (struct sockaddr_storage *)file_ip_addr->ai_addr,
+ (struct sockaddr_storage *) file_ip_addr->ai_addr,
mask))
goto hba_freeaddr;
@@ -743,8 +744,8 @@ hba_syntax:
else
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("missing field in pg_hba.conf file at end of line %d",
- line_number)));
+ errmsg("missing field in pg_hba.conf file at end of line %d",
+ line_number)));
*error_p = true;
@@ -1012,8 +1013,8 @@ ident_syntax:
else
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("missing entry in pg_ident.conf file at end of line %d",
- line_number)));
+ errmsg("missing entry in pg_ident.conf file at end of line %d",
+ line_number)));
*error_p = true;
}
@@ -1044,7 +1045,7 @@ check_ident_usermap(const char *usermap_name,
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("cannot use IDENT authentication without usermap field")));
+ errmsg("cannot use IDENT authentication without usermap field")));
found_entry = false;
}
else if (strcmp(usermap_name, "sameuser") == 0)
@@ -1215,11 +1216,13 @@ ident_inet(const SockAddr remote_addr,
char ident_port[NI_MAXSERV];
char ident_query[80];
char ident_response[80 + IDENT_USERNAME_MAX];
- struct addrinfo *ident_serv = NULL, *la = NULL, hints;
+ struct addrinfo *ident_serv = NULL,
+ *la = NULL,
+ hints;
/*
- * Might look a little weird to first convert it to text and
- * then back to sockaddr, but it's protocol independent.
+ * Might look a little weird to first convert it to text and then back
+ * to sockaddr, but it's protocol independent.
*/
getnameinfo_all(&remote_addr.addr, remote_addr.salen,
remote_addr_s, sizeof(remote_addr_s),
@@ -1254,22 +1257,23 @@ ident_inet(const SockAddr remote_addr,
rc = getaddrinfo_all(local_addr_s, NULL, &hints, &la);
if (rc || !la)
return false; /* we don't expect this to happen */
-
+
sock_fd = socket(ident_serv->ai_family, ident_serv->ai_socktype,
ident_serv->ai_protocol);
if (sock_fd < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not create socket for IDENT connection: %m")));
+ errmsg("could not create socket for IDENT connection: %m")));
ident_return = false;
goto ident_inet_done;
}
+
/*
* Bind to the address which the client originally contacted,
* otherwise the ident server won't be able to match up the right
- * connection. This is necessary if the PostgreSQL server is
- * running on an IP alias.
+ * connection. This is necessary if the PostgreSQL server is running
+ * on an IP alias.
*/
rc = bind(sock_fd, la->ai_addr, la->ai_addrlen);
if (rc != 0)
@@ -1282,7 +1286,7 @@ ident_inet(const SockAddr remote_addr,
goto ident_inet_done;
}
- rc = connect(sock_fd, ident_serv->ai_addr,
+ rc = connect(sock_fd, ident_serv->ai_addr,
ident_serv->ai_addrlen);
if (rc != 0)
{
@@ -1354,12 +1358,12 @@ ident_unix(int sock, char *ident_user)
{
#if defined(HAVE_GETPEEREID)
/* OpenBSD style: */
- uid_t uid;
- gid_t gid;
+ uid_t uid;
+ gid_t gid;
struct passwd *pass;
errno = 0;
- if (getpeereid(sock,&uid,&gid) != 0)
+ if (getpeereid(sock, &uid, &gid) != 0)
{
/* We didn't get a valid credentials struct. */
ereport(LOG,
@@ -1491,8 +1495,7 @@ ident_unix(int sock, char *ident_user)
return false;
#endif
}
-
-#endif /* HAVE_UNIX_SOCKETS */
+#endif /* HAVE_UNIX_SOCKETS */
/*
diff --git a/src/backend/libpq/ip.c b/src/backend/libpq/ip.c
index 8fd3941266c..a17c817c65c 100644
--- a/src/backend/libpq/ip.c
+++ b/src/backend/libpq/ip.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/ip.c,v 1.17 2003/08/01 17:53:41 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/ip.c,v 1.18 2003/08/04 00:43:18 momjian Exp $
*
* This file and the IPV6 implementation were initially provided by
* Nigel Kukard <nkukard@lbsd.net>, Linux Based Systems Design
@@ -34,30 +34,30 @@
#endif
#include <arpa/inet.h>
#include <sys/file.h>
-
#endif
#include "libpq/ip.h"
-static int rangeSockAddrAF_INET(const struct sockaddr_in *addr,
- const struct sockaddr_in *netaddr,
- const struct sockaddr_in *netmask);
+static int rangeSockAddrAF_INET(const struct sockaddr_in * addr,
+ const struct sockaddr_in * netaddr,
+ const struct sockaddr_in * netmask);
+
#ifdef HAVE_IPV6
-static int rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
- const struct sockaddr_in6 *netaddr,
- const struct sockaddr_in6 *netmask);
+static int rangeSockAddrAF_INET6(const struct sockaddr_in6 * addr,
+ const struct sockaddr_in6 * netaddr,
+ const struct sockaddr_in6 * netmask);
#endif
#ifdef HAVE_UNIX_SOCKETS
-static int getaddrinfo_unix(const char *path,
- const struct addrinfo *hintsp,
- struct addrinfo **result);
-
-static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
- char *node, int nodelen,
- char *service, int servicelen,
- int flags);
+static int getaddrinfo_unix(const char *path,
+ const struct addrinfo * hintsp,
+ struct addrinfo ** result);
+
+static int getnameinfo_unix(const struct sockaddr_un * sa, int salen,
+ char *node, int nodelen,
+ char *service, int servicelen,
+ int flags);
#endif
@@ -66,7 +66,7 @@ static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
*/
int
getaddrinfo_all(const char *hostname, const char *servname,
- const struct addrinfo *hintp, struct addrinfo **result)
+ const struct addrinfo * hintp, struct addrinfo ** result)
{
#ifdef HAVE_UNIX_SOCKETS
if (hintp != NULL && hintp->ai_family == AF_UNIX)
@@ -89,7 +89,7 @@ getaddrinfo_all(const char *hostname, const char *servname,
* not safe to look at ai_family in the addrinfo itself.
*/
void
-freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai)
+freeaddrinfo_all(int hint_ai_family, struct addrinfo * ai)
{
#ifdef HAVE_UNIX_SOCKETS
if (hint_ai_family == AF_UNIX)
@@ -123,12 +123,12 @@ freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai)
* guaranteed to be filled with something even on failure return.
*/
int
-getnameinfo_all(const struct sockaddr_storage *addr, int salen,
+getnameinfo_all(const struct sockaddr_storage * addr, int salen,
char *node, int nodelen,
char *service, int servicelen,
int flags)
{
- int rc;
+ int rc;
#ifdef HAVE_UNIX_SOCKETS
if (addr && addr->ss_family == AF_UNIX)
@@ -166,8 +166,8 @@ getnameinfo_all(const struct sockaddr_storage *addr, int salen,
* -------
*/
static int
-getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
- struct addrinfo **result)
+getaddrinfo_unix(const char *path, const struct addrinfo * hintsp,
+ struct addrinfo ** result)
{
struct addrinfo hints;
struct addrinfo *aip;
@@ -178,9 +178,7 @@ getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
MemSet(&hints, 0, sizeof(hints));
if (strlen(path) >= sizeof(unp->sun_path))
- {
return EAI_FAIL;
- }
if (hintsp == NULL)
{
@@ -234,139 +232,123 @@ getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
* Convert an address to a hostname.
*/
static int
-getnameinfo_unix(const struct sockaddr_un *sa, int salen,
+getnameinfo_unix(const struct sockaddr_un * sa, int salen,
char *node, int nodelen,
char *service, int servicelen,
int flags)
{
- int ret = -1;
+ int ret = -1;
/* Invalid arguments. */
if (sa == NULL || sa->sun_family != AF_UNIX ||
(node == NULL && service == NULL))
- {
return EAI_FAIL;
- }
/* We don't support those. */
if ((node && !(flags & NI_NUMERICHOST))
|| (service && !(flags & NI_NUMERICSERV)))
- {
return EAI_FAIL;
- }
if (node)
{
ret = snprintf(node, nodelen, "%s", "localhost");
if (ret == -1 || ret > nodelen)
- {
return EAI_MEMORY;
- }
}
if (service)
{
ret = snprintf(service, servicelen, "%s", sa->sun_path);
if (ret == -1 || ret > servicelen)
- {
return EAI_MEMORY;
- }
}
return 0;
}
-
#endif /* HAVE_UNIX_SOCKETS */
int
-rangeSockAddr(const struct sockaddr_storage *addr,
- const struct sockaddr_storage *netaddr,
- const struct sockaddr_storage *netmask)
+rangeSockAddr(const struct sockaddr_storage * addr,
+ const struct sockaddr_storage * netaddr,
+ const struct sockaddr_storage * netmask)
{
if (addr->ss_family == AF_INET)
- return rangeSockAddrAF_INET((struct sockaddr_in *)addr,
- (struct sockaddr_in *)netaddr,
- (struct sockaddr_in *)netmask);
+ return rangeSockAddrAF_INET((struct sockaddr_in *) addr,
+ (struct sockaddr_in *) netaddr,
+ (struct sockaddr_in *) netmask);
#ifdef HAVE_IPV6
else if (addr->ss_family == AF_INET6)
- return rangeSockAddrAF_INET6((struct sockaddr_in6 *)addr,
- (struct sockaddr_in6 *)netaddr,
- (struct sockaddr_in6 *)netmask);
+ return rangeSockAddrAF_INET6((struct sockaddr_in6 *) addr,
+ (struct sockaddr_in6 *) netaddr,
+ (struct sockaddr_in6 *) netmask);
#endif
else
return 0;
}
/*
- * SockAddr_cidr_mask - make a network mask of the appropriate family
- * and required number of significant bits
+ * SockAddr_cidr_mask - make a network mask of the appropriate family
+ * and required number of significant bits
*
* Note: Returns a static pointer for the mask, so it's not thread safe,
- * and a second call will overwrite the data.
+ * and a second call will overwrite the data.
*/
int
-SockAddr_cidr_mask(struct sockaddr_storage **mask, char *numbits, int family)
+SockAddr_cidr_mask(struct sockaddr_storage ** mask, char *numbits, int family)
{
- long bits;
- char *endptr;
-static struct sockaddr_storage sock;
- struct sockaddr_in mask4;
+ long bits;
+ char *endptr;
+ static struct sockaddr_storage sock;
+ struct sockaddr_in mask4;
+
#ifdef HAVE_IPV6
- struct sockaddr_in6 mask6;
+ struct sockaddr_in6 mask6;
#endif
bits = strtol(numbits, &endptr, 10);
if (*numbits == '\0' || *endptr != '\0')
- {
return -1;
- }
if ((bits < 0) || (family == AF_INET && bits > 32)
#ifdef HAVE_IPV6
|| (family == AF_INET6 && bits > 128)
#endif
)
- {
return -1;
- }
*mask = &sock;
switch (family)
{
case AF_INET:
- mask4.sin_addr.s_addr =
+ mask4.sin_addr.s_addr =
htonl((0xffffffffUL << (32 - bits))
- & 0xffffffffUL);
- memcpy(&sock, &mask4, sizeof(mask4));
+ & 0xffffffffUL);
+ memcpy(&sock, &mask4, sizeof(mask4));
break;
#ifdef HAVE_IPV6
case AF_INET6:
- {
- int i;
-
- for (i = 0; i < 16; i++)
{
- if (bits <= 0)
- {
- mask6.sin6_addr.s6_addr[i] = 0;
- }
- else if (bits >= 8)
- {
- mask6.sin6_addr.s6_addr[i] = 0xff;
- }
- else
+ int i;
+
+ for (i = 0; i < 16; i++)
{
- mask6.sin6_addr.s6_addr[i] =
- (0xff << (8 - bits)) & 0xff;
+ if (bits <= 0)
+ mask6.sin6_addr.s6_addr[i] = 0;
+ else if (bits >= 8)
+ mask6.sin6_addr.s6_addr[i] = 0xff;
+ else
+ {
+ mask6.sin6_addr.s6_addr[i] =
+ (0xff << (8 - bits)) & 0xff;
+ }
+ bits -= 8;
}
- bits -= 8;
+ memcpy(&sock, &mask6, sizeof(mask6));
+ break;
}
- memcpy(&sock, &mask6, sizeof(mask6));
- break;
- }
#endif
default:
return -1;
@@ -377,8 +359,8 @@ static struct sockaddr_storage sock;
}
static int
-rangeSockAddrAF_INET(const struct sockaddr_in *addr, const struct sockaddr_in *netaddr,
- const struct sockaddr_in *netmask)
+rangeSockAddrAF_INET(const struct sockaddr_in * addr, const struct sockaddr_in * netaddr,
+ const struct sockaddr_in * netmask)
{
if (((addr->sin_addr.s_addr ^ netaddr->sin_addr.s_addr) &
netmask->sin_addr.s_addr) == 0)
@@ -390,9 +372,9 @@ rangeSockAddrAF_INET(const struct sockaddr_in *addr, const struct sockaddr_in *n
#ifdef HAVE_IPV6
static int
-rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
- const struct sockaddr_in6 *netaddr,
- const struct sockaddr_in6 *netmask)
+rangeSockAddrAF_INET6(const struct sockaddr_in6 * addr,
+ const struct sockaddr_in6 * netaddr,
+ const struct sockaddr_in6 * netmask)
{
int i;
@@ -405,6 +387,5 @@ rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
return 1;
}
-#endif
-
+#endif
diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c
index dbf639fc74f..05adff56a60 100644
--- a/src/backend/libpq/md5.c
+++ b/src/backend/libpq/md5.c
@@ -14,7 +14,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/md5.c,v 1.19 2002/10/03 17:09:41 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/md5.c,v 1.20 2003/08/04 00:43:18 momjian Exp $
*/
@@ -35,8 +35,8 @@
#include "postgres_fe.h"
#ifndef WIN32
#include "libpq/crypt.h"
-#endif /* WIN32 */
-#endif /* FRONTEND */
+#endif /* WIN32 */
+#endif /* FRONTEND */
#ifdef MD5_ODBC
#include "md5.h"
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 2f6d0245bf5..5ae7e1ae2c9 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -30,7 +30,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/libpq/pqcomm.c,v 1.161 2003/07/27 21:49:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/pqcomm.c,v 1.162 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -151,6 +151,7 @@ pq_close(void)
{
/* Cleanly shut down SSL layer */
secure_close(MyProcPort);
+
/*
* Formerly we did an explicit close() here, but it seems better
* to leave the socket open until the process dies. This allows
@@ -208,10 +209,11 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
int maxconn;
int one = 1;
int ret;
- char portNumberStr[64];
- char *service;
- struct addrinfo *addrs = NULL, *addr;
- struct addrinfo hint;
+ char portNumberStr[64];
+ char *service;
+ struct addrinfo *addrs = NULL,
+ *addr;
+ struct addrinfo hint;
int listen_index = 0;
int added = 0;
@@ -245,8 +247,8 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
hostName, service, gai_strerror(ret))));
else
ereport(LOG,
- (errmsg("could not translate service \"%s\" to address: %s",
- service, gai_strerror(ret))));
+ (errmsg("could not translate service \"%s\" to address: %s",
+ service, gai_strerror(ret))));
freeaddrinfo_all(hint.ai_family, addrs);
return STATUS_ERROR;
}
@@ -255,9 +257,9 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
{
if (!IS_AF_UNIX(family) && IS_AF_UNIX(addr->ai_family))
{
- /* Only set up a unix domain socket when
- * they really asked for it. The service/port
- * is different in that case.
+ /*
+ * Only set up a unix domain socket when they really asked for
+ * it. The service/port is different in that case.
*/
continue;
}
@@ -285,7 +287,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
if (!IS_AF_UNIX(addr->ai_family))
{
if ((setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
- (char *) &one, sizeof(one))) == -1)
+ (char *) &one, sizeof(one))) == -1)
{
ereport(LOG,
(errcode_for_socket_access(),
@@ -299,7 +301,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
if (addr->ai_family == AF_INET6)
{
if (setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY,
- (char *)&one, sizeof(one)) == -1)
+ (char *) &one, sizeof(one)) == -1)
{
ereport(LOG,
(errcode_for_socket_access(),
@@ -311,10 +313,10 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
#endif
/*
- * Note: This might fail on some OS's, like Linux
- * older than 2.4.21-pre3, that don't have the IPV6_V6ONLY
- * socket option, and map ipv4 addresses to ipv6. It will
- * show ::ffff:ipv4 for all ipv4 connections.
+ * Note: This might fail on some OS's, like Linux older than
+ * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and
+ * map ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all
+ * ipv4 connections.
*/
err = bind(fd, addr->ai_addr, addr->ai_addrlen);
if (err < 0)
@@ -323,12 +325,12 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
(errcode_for_socket_access(),
errmsg("failed to bind server socket: %m"),
(IS_AF_UNIX(addr->ai_family)) ?
- errhint("Is another postmaster already running on port %d?"
- " If not, remove socket node \"%s\" and retry.",
- (int) portNumber, sock_path) :
- errhint("Is another postmaster already running on port %d?"
- " If not, wait a few seconds and retry.",
- (int) portNumber)));
+ errhint("Is another postmaster already running on port %d?"
+ " If not, remove socket node \"%s\" and retry.",
+ (int) portNumber, sock_path) :
+ errhint("Is another postmaster already running on port %d?"
+ " If not, wait a few seconds and retry.",
+ (int) portNumber)));
closesocket(fd);
continue;
}
@@ -345,10 +347,10 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
#endif
/*
- * Select appropriate accept-queue length limit. PG_SOMAXCONN
- * is only intended to provide a clamp on the request on
- * platforms where an overly large request provokes a kernel
- * error (are there any?).
+ * Select appropriate accept-queue length limit. PG_SOMAXCONN is
+ * only intended to provide a clamp on the request on platforms
+ * where an overly large request provokes a kernel error (are
+ * there any?).
*/
maxconn = MaxBackends * 2;
if (maxconn > PG_SOMAXCONN)
@@ -465,7 +467,6 @@ Setup_AF_UNIX(void)
}
return STATUS_OK;
}
-
#endif /* HAVE_UNIX_SOCKETS */
@@ -485,8 +486,8 @@ StreamConnection(int server_fd, Port *port)
/* accept connection and fill in the client (remote) address */
port->raddr.salen = sizeof(port->raddr.addr);
if ((port->sock = accept(server_fd,
- (struct sockaddr *) &port->raddr.addr,
- &port->raddr.salen)) < 0)
+ (struct sockaddr *) & port->raddr.addr,
+ &port->raddr.salen)) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
@@ -495,6 +496,7 @@ StreamConnection(int server_fd, Port *port)
}
#ifdef SCO_ACCEPT_BUG
+
/*
* UnixWare 7+ and OpenServer 5.0.4 are known to have this bug, but it
* shouldn't hurt to catch it for all versions of those platforms.
@@ -571,19 +573,19 @@ TouchSocketFile(void)
if (sock_path[0] != '\0')
{
/*
- * utime() is POSIX standard, utimes() is a common alternative.
- * If we have neither, there's no way to affect the mod or access
+ * utime() is POSIX standard, utimes() is a common alternative. If
+ * we have neither, there's no way to affect the mod or access
* time of the socket :-(
*
* In either path, we ignore errors; there's no point in complaining.
*/
#ifdef HAVE_UTIME
utime(sock_path, NULL);
-#else /* !HAVE_UTIME */
+#else /* !HAVE_UTIME */
#ifdef HAVE_UTIMES
utimes(sock_path, NULL);
-#endif /* HAVE_UTIMES */
-#endif /* HAVE_UTIME */
+#endif /* HAVE_UTIMES */
+#endif /* HAVE_UTIME */
}
}
@@ -634,9 +636,10 @@ pq_recvbuf(void)
continue; /* Ok if interrupted */
/*
- * Careful: an ereport() that tries to write to the client would
- * cause recursion to here, leading to stack overflow and core
- * dump! This message must go *only* to the postmaster log.
+ * Careful: an ereport() that tries to write to the client
+ * would cause recursion to here, leading to stack overflow
+ * and core dump! This message must go *only* to the
+ * postmaster log.
*/
ereport(COMMERROR,
(errcode_for_socket_access(),
@@ -646,8 +649,8 @@ pq_recvbuf(void)
if (r == 0)
{
/*
- * EOF detected. We used to write a log message here, but it's
- * better to expect the ultimate caller to do that.
+ * EOF detected. We used to write a log message here, but
+ * it's better to expect the ultimate caller to do that.
*/
return EOF;
}
@@ -894,9 +897,10 @@ pq_flush(void)
continue; /* Ok if we were interrupted */
/*
- * Careful: an ereport() that tries to write to the client would
- * cause recursion to here, leading to stack overflow and core
- * dump! This message must go *only* to the postmaster log.
+ * Careful: an ereport() that tries to write to the client
+ * would cause recursion to here, leading to stack overflow
+ * and core dump! This message must go *only* to the
+ * postmaster log.
*
* If a client disconnects while we're in the midst of output, we
* might write quite a bit of data before we get to a safe
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index 8b04dbe7055..d5b1baded29 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -24,7 +24,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/libpq/pqformat.c,v 1.32 2003/07/22 19:00:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/pqformat.c,v 1.33 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,12 +58,12 @@
* pq_getmsgbyte - get a raw byte from a message buffer
* pq_getmsgint - get a binary integer from a message buffer
* pq_getmsgint64 - get a binary 8-byte int from a message buffer
- * pq_getmsgfloat4 - get a float4 from a message buffer
- * pq_getmsgfloat8 - get a float8 from a message buffer
+ * pq_getmsgfloat4 - get a float4 from a message buffer
+ * pq_getmsgfloat8 - get a float8 from a message buffer
* pq_getmsgbytes - get raw data from a message buffer
- * pq_copymsgbytes - copy raw data from a message buffer
+ * pq_copymsgbytes - copy raw data from a message buffer
* pq_getmsgtext - get a counted text string (with conversion)
- * pq_getmsgstring - get a null-terminated text string (with conversion)
+ * pq_getmsgstring - get a null-terminated text string (with conversion)
* pq_getmsgend - verify message fully consumed
*/
@@ -90,10 +90,12 @@ void
pq_beginmessage(StringInfo buf, char msgtype)
{
initStringInfo(buf);
+
/*
* We stash the message type into the buffer's cursor field, expecting
- * that the pq_sendXXX routines won't touch it. We could alternatively
- * make it the first byte of the buffer contents, but this seems easier.
+ * that the pq_sendXXX routines won't touch it. We could
+ * alternatively make it the first byte of the buffer contents, but
+ * this seems easier.
*/
buf->cursor = msgtype;
}
@@ -122,7 +124,7 @@ pq_sendbytes(StringInfo buf, const char *data, int datalen)
* pq_sendcountedtext - append a counted text string (with character set conversion)
*
* The data sent to the frontend by this routine is a 4-byte count field
- * followed by the string. The count includes itself or not, as per the
+ * followed by the string. The count includes itself or not, as per the
* countincludesself flag (pre-3.0 protocol requires it to include itself).
* The passed text string need not be null-terminated, and the data sent
* to the frontend isn't either.
@@ -173,9 +175,7 @@ pq_sendtext(StringInfo buf, const char *str, int slen)
pfree(p);
}
else
- {
appendBinaryStringInfo(buf, str, slen);
- }
}
/* --------------------------------
@@ -200,9 +200,7 @@ pq_sendstring(StringInfo buf, const char *str)
pfree(p);
}
else
- {
appendBinaryStringInfo(buf, str, slen + 1);
- }
}
/* --------------------------------
@@ -281,9 +279,9 @@ pq_sendfloat4(StringInfo buf, float4 f)
{
union
{
- float4 f;
- uint32 i;
- } swap;
+ float4 f;
+ uint32 i;
+ } swap;
swap.f = f;
swap.i = htonl(swap.i);
@@ -308,9 +306,9 @@ pq_sendfloat8(StringInfo buf, float8 f)
#ifdef INT64_IS_BUSTED
union
{
- float8 f;
- uint32 h[2];
- } swap;
+ float8 f;
+ uint32 h[2];
+ } swap;
swap.f = f;
swap.h[0] = htonl(swap.h[0]);
@@ -332,9 +330,9 @@ pq_sendfloat8(StringInfo buf, float8 f)
#else
union
{
- float8 f;
- int64 i;
- } swap;
+ float8 f;
+ int64 i;
+ } swap;
swap.f = f;
pq_sendint64(buf, swap.i);
@@ -515,7 +513,7 @@ pq_getmsgint64(StringInfo msg)
}
/* --------------------------------
- * pq_getmsgfloat4 - get a float4 from a message buffer
+ * pq_getmsgfloat4 - get a float4 from a message buffer
*
* See notes for pq_sendfloat4.
* --------------------------------
@@ -525,16 +523,16 @@ pq_getmsgfloat4(StringInfo msg)
{
union
{
- float4 f;
- uint32 i;
- } swap;
+ float4 f;
+ uint32 i;
+ } swap;
swap.i = pq_getmsgint(msg, 4);
return swap.f;
}
/* --------------------------------
- * pq_getmsgfloat8 - get a float8 from a message buffer
+ * pq_getmsgfloat8 - get a float8 from a message buffer
*
* See notes for pq_sendfloat8.
* --------------------------------
@@ -545,9 +543,9 @@ pq_getmsgfloat8(StringInfo msg)
#ifdef INT64_IS_BUSTED
union
{
- float8 f;
- uint32 h[2];
- } swap;
+ float8 f;
+ uint32 h[2];
+ } swap;
/* Have to figure out endianness by testing... */
if (((uint32) 1) == htonl((uint32) 1))
@@ -566,9 +564,9 @@ pq_getmsgfloat8(StringInfo msg)
#else
union
{
- float8 f;
- int64 i;
- } swap;
+ float8 f;
+ int64 i;
+ } swap;
swap.i = pq_getmsgint64(msg);
return swap.f;
@@ -597,7 +595,7 @@ pq_getmsgbytes(StringInfo msg, int datalen)
}
/* --------------------------------
- * pq_copymsgbytes - copy raw data from a message buffer
+ * pq_copymsgbytes - copy raw data from a message buffer
*
* Same as above, except data is copied to caller's buffer.
* --------------------------------
@@ -623,8 +621,8 @@ pq_copymsgbytes(StringInfo msg, char *buf, int datalen)
char *
pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
{
- char *str;
- char *p;
+ char *str;
+ char *p;
if (rawbytes < 0 || rawbytes > (msg->len - msg->cursor))
ereport(ERROR,
@@ -635,9 +633,7 @@ pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
p = (char *) pg_client_to_server((unsigned char *) str, rawbytes);
if (p != str) /* actual conversion has been done? */
- {
*nbytes = strlen(p);
- }
else
{
p = (char *) palloc(rawbytes + 1);
@@ -649,7 +645,7 @@ pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
}
/* --------------------------------
- * pq_getmsgstring - get a null-terminated text string (with conversion)
+ * pq_getmsgstring - get a null-terminated text string (with conversion)
*
* May return a pointer directly into the message buffer, or a pointer
* to a palloc'd conversion result.
@@ -658,14 +654,15 @@ pq_getmsgtext(StringInfo msg, int rawbytes, int *nbytes)
const char *
pq_getmsgstring(StringInfo msg)
{
- char *str;
- int slen;
+ char *str;
+ int slen;
str = &msg->data[msg->cursor];
+
/*
* It's safe to use strlen() here because a StringInfo is guaranteed
- * to have a trailing null byte. But check we found a null inside
- * the message.
+ * to have a trailing null byte. But check we found a null inside the
+ * message.
*/
slen = strlen(str);
if (msg->cursor + slen >= msg->len)
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 862194b17d5..fcc6ea5a7e5 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.59 2003/07/27 21:49:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.60 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -163,6 +163,7 @@ main(int argc, char *argv[])
{
#ifndef WIN32
#ifndef __BEOS__
+
/*
* Make sure we are not running as root.
*
@@ -175,8 +176,8 @@ main(int argc, char *argv[])
gettext("\"root\" execution of the PostgreSQL server is not permitted.\n"
"The server must be started under an unprivileged user id to prevent\n"
"possible system security compromise. See the documentation for\n"
- "more information on how to properly start the server.\n"
- ));
+ "more information on how to properly start the server.\n"
+ ));
exit(1);
}
#endif /* !__BEOS__ */
@@ -193,16 +194,16 @@ main(int argc, char *argv[])
if (getuid() != geteuid())
{
fprintf(stderr,
- gettext("%s: real and effective user ids must match\n"),
+ gettext("%s: real and effective user ids must match\n"),
argv[0]);
exit(1);
}
-#endif /* !WIN32 */
+#endif /* !WIN32 */
}
/*
- * Now dispatch to one of PostmasterMain, PostgresMain, GucInfoMain, or
- * BootstrapMain depending on the program name (and possibly first
+ * Now dispatch to one of PostmasterMain, PostgresMain, GucInfoMain,
+ * or BootstrapMain depending on the program name (and possibly first
* argument) we were called with. The lack of consistency here is
* historical.
*/
@@ -223,8 +224,8 @@ main(int argc, char *argv[])
/*
* If the first argument is "--help-config", then invoke runtime
- * configuration option display mode.
- * We remove "--help-config" from the arguments passed on to GucInfoMain.
+ * configuration option display mode. We remove "--help-config" from
+ * the arguments passed on to GucInfoMain.
*/
if (argc > 1 && strcmp(new_argv[1], "--help-config") == 0)
exit(GucInfoMain(argc - 1, new_argv + 1));
@@ -246,7 +247,7 @@ main(int argc, char *argv[])
pw_name_persist = strdup(pw->pw_name);
#else
{
- long namesize = 256 /* UNLEN */ + 1;
+ long namesize = 256 /* UNLEN */ + 1;
pw_name_persist = malloc(namesize);
if (!GetUserName(pw_name_persist, &namesize))
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index c4576cf3b3d..e444f449e19 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -14,7 +14,7 @@
* Copyright (c) 2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/bitmapset.c,v 1.3 2003/07/22 23:30:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/bitmapset.c,v 1.4 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -38,7 +38,7 @@
* where x's are unspecified bits. The two's complement negative is formed
* by inverting all the bits and adding one. Inversion gives
* yyyyyy01111
- * where each y is the inverse of the corresponding x. Incrementing gives
+ * where each y is the inverse of the corresponding x. Incrementing gives
* yyyyyy10000
* and then ANDing with the original value gives
* 00000010000
@@ -65,41 +65,41 @@
*/
static const uint8 rightmost_one_pos[256] = {
- 0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
+ 0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
static const uint8 number_of_ones[256] = {
- 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
- 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8
};
@@ -107,7 +107,7 @@ static const uint8 number_of_ones[256] = {
* bms_copy - make a palloc'd copy of a bitmapset
*/
Bitmapset *
-bms_copy(const Bitmapset *a)
+bms_copy(const Bitmapset * a)
{
Bitmapset *result;
size_t size;
@@ -127,7 +127,7 @@ bms_copy(const Bitmapset *a)
* be reported as equal to a palloc'd value containing no members.
*/
bool
-bms_equal(const Bitmapset *a, const Bitmapset *b)
+bms_equal(const Bitmapset * a, const Bitmapset * b)
{
const Bitmapset *shorter;
const Bitmapset *longer;
@@ -143,9 +143,7 @@ bms_equal(const Bitmapset *a, const Bitmapset *b)
return bms_is_empty(b);
}
else if (b == NULL)
- {
return bms_is_empty(a);
- }
/* Identify shorter and longer input */
if (a->nwords <= b->nwords)
{
@@ -199,7 +197,7 @@ bms_make_singleton(int x)
* Same as pfree except for allowing NULL input
*/
void
-bms_free(Bitmapset *a)
+bms_free(Bitmapset * a)
{
if (a)
pfree(a);
@@ -216,7 +214,7 @@ bms_free(Bitmapset *a)
* bms_union - set union
*/
Bitmapset *
-bms_union(const Bitmapset *a, const Bitmapset *b)
+bms_union(const Bitmapset * a, const Bitmapset * b)
{
Bitmapset *result;
const Bitmapset *other;
@@ -242,9 +240,7 @@ bms_union(const Bitmapset *a, const Bitmapset *b)
/* And union the shorter input into the result */
otherlen = other->nwords;
for (i = 0; i < otherlen; i++)
- {
result->words[i] |= other->words[i];
- }
return result;
}
@@ -252,7 +248,7 @@ bms_union(const Bitmapset *a, const Bitmapset *b)
* bms_intersect - set intersection
*/
Bitmapset *
-bms_intersect(const Bitmapset *a, const Bitmapset *b)
+bms_intersect(const Bitmapset * a, const Bitmapset * b)
{
Bitmapset *result;
const Bitmapset *other;
@@ -276,9 +272,7 @@ bms_intersect(const Bitmapset *a, const Bitmapset *b)
/* And intersect the longer input with the result */
resultlen = result->nwords;
for (i = 0; i < resultlen; i++)
- {
result->words[i] &= other->words[i];
- }
return result;
}
@@ -286,7 +280,7 @@ bms_intersect(const Bitmapset *a, const Bitmapset *b)
* bms_difference - set difference (ie, A without members of B)
*/
Bitmapset *
-bms_difference(const Bitmapset *a, const Bitmapset *b)
+bms_difference(const Bitmapset * a, const Bitmapset * b)
{
Bitmapset *result;
int shortlen;
@@ -302,9 +296,7 @@ bms_difference(const Bitmapset *a, const Bitmapset *b)
/* And remove b's bits from result */
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
- {
- result->words[i] &= ~ b->words[i];
- }
+ result->words[i] &= ~b->words[i];
return result;
}
@@ -312,7 +304,7 @@ bms_difference(const Bitmapset *a, const Bitmapset *b)
* bms_is_subset - is A a subset of B?
*/
bool
-bms_is_subset(const Bitmapset *a, const Bitmapset *b)
+bms_is_subset(const Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int longlen;
@@ -327,7 +319,7 @@ bms_is_subset(const Bitmapset *a, const Bitmapset *b)
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
{
- if ((a->words[i] & ~ b->words[i]) != 0)
+ if ((a->words[i] & ~b->words[i]) != 0)
return false;
}
/* Check extra words */
@@ -347,7 +339,7 @@ bms_is_subset(const Bitmapset *a, const Bitmapset *b)
* bms_is_member - is X a member of A?
*/
bool
-bms_is_member(int x, const Bitmapset *a)
+bms_is_member(int x, const Bitmapset * a)
{
int wordnum,
bitnum;
@@ -370,7 +362,7 @@ bms_is_member(int x, const Bitmapset *a)
* bms_overlap - do sets overlap (ie, have a nonempty intersection)?
*/
bool
-bms_overlap(const Bitmapset *a, const Bitmapset *b)
+bms_overlap(const Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int i;
@@ -392,7 +384,7 @@ bms_overlap(const Bitmapset *a, const Bitmapset *b)
* bms_nonempty_difference - do sets have a nonempty difference?
*/
bool
-bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
+bms_nonempty_difference(const Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int i;
@@ -406,7 +398,7 @@ bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
{
- if ((a->words[i] & ~ b->words[i]) != 0)
+ if ((a->words[i] & ~b->words[i]) != 0)
return true;
}
/* Check extra words in a */
@@ -424,11 +416,11 @@ bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b)
* Raises error if |a| is not 1.
*/
int
-bms_singleton_member(const Bitmapset *a)
+bms_singleton_member(const Bitmapset * a)
{
- int result = -1;
- int nwords;
- int wordnum;
+ int result = -1;
+ int nwords;
+ int wordnum;
if (a == NULL)
elog(ERROR, "bitmapset is empty");
@@ -459,11 +451,11 @@ bms_singleton_member(const Bitmapset *a)
* bms_num_members - count members of set
*/
int
-bms_num_members(const Bitmapset *a)
+bms_num_members(const Bitmapset * a)
{
- int result = 0;
- int nwords;
- int wordnum;
+ int result = 0;
+ int nwords;
+ int wordnum;
if (a == NULL)
return 0;
@@ -488,11 +480,11 @@ bms_num_members(const Bitmapset *a)
* This is faster than making an exact count with bms_num_members().
*/
BMS_Membership
-bms_membership(const Bitmapset *a)
+bms_membership(const Bitmapset * a)
{
BMS_Membership result = BMS_EMPTY_SET;
- int nwords;
- int wordnum;
+ int nwords;
+ int wordnum;
if (a == NULL)
return BMS_EMPTY_SET;
@@ -517,10 +509,10 @@ bms_membership(const Bitmapset *a)
* This is even faster than bms_membership().
*/
bool
-bms_is_empty(const Bitmapset *a)
+bms_is_empty(const Bitmapset * a)
{
- int nwords;
- int wordnum;
+ int nwords;
+ int wordnum;
if (a == NULL)
return true;
@@ -552,7 +544,7 @@ bms_is_empty(const Bitmapset *a)
* Input set is modified or recycled!
*/
Bitmapset *
-bms_add_member(Bitmapset *a, int x)
+bms_add_member(Bitmapset * a, int x)
{
int wordnum,
bitnum;
@@ -573,9 +565,7 @@ bms_add_member(Bitmapset *a, int x)
result = bms_make_singleton(x);
nwords = a->nwords;
for (i = 0; i < nwords; i++)
- {
result->words[i] |= a->words[i];
- }
pfree(a);
return result;
}
@@ -592,7 +582,7 @@ bms_add_member(Bitmapset *a, int x)
* Input set is modified in-place!
*/
Bitmapset *
-bms_del_member(Bitmapset *a, int x)
+bms_del_member(Bitmapset * a, int x)
{
int wordnum,
bitnum;
@@ -604,9 +594,7 @@ bms_del_member(Bitmapset *a, int x)
wordnum = WORDNUM(x);
bitnum = BITNUM(x);
if (wordnum < a->nwords)
- {
- a->words[wordnum] &= ~ ((bitmapword) 1 << bitnum);
- }
+ a->words[wordnum] &= ~((bitmapword) 1 << bitnum);
return a;
}
@@ -614,7 +602,7 @@ bms_del_member(Bitmapset *a, int x)
* bms_add_members - like bms_union, but left input is recycled
*/
Bitmapset *
-bms_add_members(Bitmapset *a, const Bitmapset *b)
+bms_add_members(Bitmapset * a, const Bitmapset * b)
{
Bitmapset *result;
const Bitmapset *other;
@@ -640,9 +628,7 @@ bms_add_members(Bitmapset *a, const Bitmapset *b)
/* And union the shorter input into the result */
otherlen = other->nwords;
for (i = 0; i < otherlen; i++)
- {
result->words[i] |= other->words[i];
- }
if (result != a)
pfree(a);
return result;
@@ -652,7 +638,7 @@ bms_add_members(Bitmapset *a, const Bitmapset *b)
* bms_int_members - like bms_intersect, but left input is recycled
*/
Bitmapset *
-bms_int_members(Bitmapset *a, const Bitmapset *b)
+bms_int_members(Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int i;
@@ -668,13 +654,9 @@ bms_int_members(Bitmapset *a, const Bitmapset *b)
/* Intersect b into a; we need never copy */
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
- {
a->words[i] &= b->words[i];
- }
for (; i < a->nwords; i++)
- {
a->words[i] = 0;
- }
return a;
}
@@ -682,7 +664,7 @@ bms_int_members(Bitmapset *a, const Bitmapset *b)
* bms_del_members - like bms_difference, but left input is recycled
*/
Bitmapset *
-bms_del_members(Bitmapset *a, const Bitmapset *b)
+bms_del_members(Bitmapset * a, const Bitmapset * b)
{
int shortlen;
int i;
@@ -695,9 +677,7 @@ bms_del_members(Bitmapset *a, const Bitmapset *b)
/* Remove b's bits from a; we need never copy */
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
- {
- a->words[i] &= ~ b->words[i];
- }
+ a->words[i] &= ~b->words[i];
return a;
}
@@ -705,7 +685,7 @@ bms_del_members(Bitmapset *a, const Bitmapset *b)
* bms_join - like bms_union, but *both* inputs are recycled
*/
Bitmapset *
-bms_join(Bitmapset *a, Bitmapset *b)
+bms_join(Bitmapset * a, Bitmapset * b)
{
Bitmapset *result;
Bitmapset *other;
@@ -731,9 +711,7 @@ bms_join(Bitmapset *a, Bitmapset *b)
/* And union the shorter input into the result */
otherlen = other->nwords;
for (i = 0; i < otherlen; i++)
- {
result->words[i] |= other->words[i];
- }
if (other != result) /* pure paranoia */
pfree(other);
return result;
@@ -742,24 +720,22 @@ bms_join(Bitmapset *a, Bitmapset *b)
/*----------
* bms_first_member - find and remove first member of a set
*
- * Returns -1 if set is empty. NB: set is destructively modified!
+ * Returns -1 if set is empty. NB: set is destructively modified!
*
* This is intended as support for iterating through the members of a set.
* The typical pattern is
*
* tmpset = bms_copy(inputset);
* while ((x = bms_first_member(tmpset)) >= 0)
- * {
* process member x;
- * }
* bms_free(tmpset);
*----------
*/
int
-bms_first_member(Bitmapset *a)
+bms_first_member(Bitmapset * a)
{
- int nwords;
- int wordnum;
+ int nwords;
+ int wordnum;
if (a == NULL)
return -1;
@@ -770,10 +746,10 @@ bms_first_member(Bitmapset *a)
if (w != 0)
{
- int result;
+ int result;
w = RIGHTMOST_ONE(w);
- a->words[wordnum] &= ~ w;
+ a->words[wordnum] &= ~w;
result = wordnum * BITS_PER_BITMAPWORD;
while ((w & 255) == 0)
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 6945e98d5d7..03349efdc74 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -4,7 +4,7 @@
* Copy functions for Postgres tree nodes.
*
* NOTE: we currently support copying all node types found in parse and
- * plan trees. We do not support copying executor state trees; there
+ * plan trees. We do not support copying executor state trees; there
* is no need for that, and no point in maintaining all the code that
* would be needed. We also do not support copying Path trees, mainly
* because the circular linkages between RelOptInfo and Path nodes can't
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.260 2003/07/22 23:30:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.261 2003/08/04 00:43:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,7 +30,7 @@
/*
* Macros to simplify copying of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in a Copy routine are
* named 'newnode' and 'from'.
*/
@@ -639,7 +639,7 @@ _copyRangeVar(RangeVar *from)
/*
* We don't need a _copyExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out copying the common fields...
*/
@@ -755,9 +755,9 @@ _copyArrayRef(ArrayRef *from)
* _copyFuncExpr
*/
static FuncExpr *
-_copyFuncExpr(FuncExpr *from)
+_copyFuncExpr(FuncExpr * from)
{
- FuncExpr *newnode = makeNode(FuncExpr);
+ FuncExpr *newnode = makeNode(FuncExpr);
COPY_SCALAR_FIELD(funcid);
COPY_SCALAR_FIELD(funcresulttype);
@@ -772,7 +772,7 @@ _copyFuncExpr(FuncExpr *from)
* _copyOpExpr
*/
static OpExpr *
-_copyOpExpr(OpExpr *from)
+_copyOpExpr(OpExpr * from)
{
OpExpr *newnode = makeNode(OpExpr);
@@ -789,9 +789,9 @@ _copyOpExpr(OpExpr *from)
* _copyDistinctExpr (same as OpExpr)
*/
static DistinctExpr *
-_copyDistinctExpr(DistinctExpr *from)
+_copyDistinctExpr(DistinctExpr * from)
{
- DistinctExpr *newnode = makeNode(DistinctExpr);
+ DistinctExpr *newnode = makeNode(DistinctExpr);
COPY_SCALAR_FIELD(opno);
COPY_SCALAR_FIELD(opfuncid);
@@ -806,9 +806,9 @@ _copyDistinctExpr(DistinctExpr *from)
* _copyScalarArrayOpExpr
*/
static ScalarArrayOpExpr *
-_copyScalarArrayOpExpr(ScalarArrayOpExpr *from)
+_copyScalarArrayOpExpr(ScalarArrayOpExpr * from)
{
- ScalarArrayOpExpr *newnode = makeNode(ScalarArrayOpExpr);
+ ScalarArrayOpExpr *newnode = makeNode(ScalarArrayOpExpr);
COPY_SCALAR_FIELD(opno);
COPY_SCALAR_FIELD(opfuncid);
@@ -822,9 +822,9 @@ _copyScalarArrayOpExpr(ScalarArrayOpExpr *from)
* _copyBoolExpr
*/
static BoolExpr *
-_copyBoolExpr(BoolExpr *from)
+_copyBoolExpr(BoolExpr * from)
{
- BoolExpr *newnode = makeNode(BoolExpr);
+ BoolExpr *newnode = makeNode(BoolExpr);
COPY_SCALAR_FIELD(boolop);
COPY_NODE_FIELD(args);
@@ -940,9 +940,9 @@ _copyCaseWhen(CaseWhen *from)
* _copyArrayExpr
*/
static ArrayExpr *
-_copyArrayExpr(ArrayExpr *from)
+_copyArrayExpr(ArrayExpr * from)
{
- ArrayExpr *newnode = makeNode(ArrayExpr);
+ ArrayExpr *newnode = makeNode(ArrayExpr);
COPY_SCALAR_FIELD(array_typeid);
COPY_SCALAR_FIELD(element_typeid);
@@ -956,7 +956,7 @@ _copyArrayExpr(ArrayExpr *from)
* _copyCoalesceExpr
*/
static CoalesceExpr *
-_copyCoalesceExpr(CoalesceExpr *from)
+_copyCoalesceExpr(CoalesceExpr * from)
{
CoalesceExpr *newnode = makeNode(CoalesceExpr);
@@ -970,9 +970,9 @@ _copyCoalesceExpr(CoalesceExpr *from)
* _copyNullIfExpr (same as OpExpr)
*/
static NullIfExpr *
-_copyNullIfExpr(NullIfExpr *from)
+_copyNullIfExpr(NullIfExpr * from)
{
- NullIfExpr *newnode = makeNode(NullIfExpr);
+ NullIfExpr *newnode = makeNode(NullIfExpr);
COPY_SCALAR_FIELD(opno);
COPY_SCALAR_FIELD(opfuncid);
@@ -1015,7 +1015,7 @@ _copyBooleanTest(BooleanTest *from)
* _copyCoerceToDomain
*/
static CoerceToDomain *
-_copyCoerceToDomain(CoerceToDomain *from)
+_copyCoerceToDomain(CoerceToDomain * from)
{
CoerceToDomain *newnode = makeNode(CoerceToDomain);
@@ -1031,7 +1031,7 @@ _copyCoerceToDomain(CoerceToDomain *from)
* _copyCoerceToDomainValue
*/
static CoerceToDomainValue *
-_copyCoerceToDomainValue(CoerceToDomainValue *from)
+_copyCoerceToDomainValue(CoerceToDomainValue * from)
{
CoerceToDomainValue *newnode = makeNode(CoerceToDomainValue);
@@ -1045,7 +1045,7 @@ _copyCoerceToDomainValue(CoerceToDomainValue *from)
* _copySetToDefault
*/
static SetToDefault *
-_copySetToDefault(SetToDefault *from)
+_copySetToDefault(SetToDefault * from)
{
SetToDefault *newnode = makeNode(SetToDefault);
@@ -1148,7 +1148,7 @@ _copyRestrictInfo(RestrictInfo *from)
COPY_NODE_FIELD(clause);
COPY_SCALAR_FIELD(ispusheddown);
- COPY_NODE_FIELD(subclauseindices); /* XXX probably bad */
+ COPY_NODE_FIELD(subclauseindices); /* XXX probably bad */
COPY_SCALAR_FIELD(eval_cost);
COPY_SCALAR_FIELD(this_selec);
COPY_BITMAPSET_FIELD(left_relids);
@@ -1191,7 +1191,7 @@ _copyJoinInfo(JoinInfo *from)
* _copyInClauseInfo
*/
static InClauseInfo *
-_copyInClauseInfo(InClauseInfo *from)
+_copyInClauseInfo(InClauseInfo * from)
{
InClauseInfo *newnode = makeNode(InClauseInfo);
@@ -1532,9 +1532,9 @@ _copyQuery(Query *from)
/*
* We do not copy the other planner internal fields: base_rel_list,
- * other_rel_list, join_rel_list, equi_key_list, query_pathkeys.
- * That would get us into copying RelOptInfo/Path trees, which we don't
- * want to do. It is necessary to copy in_info_list and hasJoinRTEs
+ * other_rel_list, join_rel_list, equi_key_list, query_pathkeys. That
+ * would get us into copying RelOptInfo/Path trees, which we don't
+ * want to do. It is necessary to copy in_info_list and hasJoinRTEs
* for the benefit of inheritance_planner(), which may try to copy a
* Query in which these are already set.
*/
@@ -1633,7 +1633,7 @@ _copyAlterTableStmt(AlterTableStmt *from)
}
static AlterDomainStmt *
-_copyAlterDomainStmt(AlterDomainStmt *from)
+_copyAlterDomainStmt(AlterDomainStmt * from)
{
AlterDomainStmt *newnode = makeNode(AlterDomainStmt);
@@ -1644,7 +1644,7 @@ _copyAlterDomainStmt(AlterDomainStmt *from)
COPY_SCALAR_FIELD(behavior);
return newnode;
-}
+}
static GrantStmt *
_copyGrantStmt(GrantStmt *from)
@@ -1685,7 +1685,7 @@ _copyFuncWithArgs(FuncWithArgs *from)
}
static DeclareCursorStmt *
-_copyDeclareCursorStmt(DeclareCursorStmt *from)
+_copyDeclareCursorStmt(DeclareCursorStmt * from)
{
DeclareCursorStmt *newnode = makeNode(DeclareCursorStmt);
@@ -1747,7 +1747,7 @@ _copyCreateStmt(CreateStmt *from)
}
static InhRelation *
-_copyInhRelation(InhRelation *from)
+_copyInhRelation(InhRelation * from)
{
InhRelation *newnode = makeNode(InhRelation);
@@ -2118,7 +2118,7 @@ _copyCreateSeqStmt(CreateSeqStmt *from)
}
static AlterSeqStmt *
-_copyAlterSeqStmt(AlterSeqStmt *from)
+_copyAlterSeqStmt(AlterSeqStmt * from)
{
AlterSeqStmt *newnode = makeNode(AlterSeqStmt);
@@ -2171,7 +2171,7 @@ _copyCreateTrigStmt(CreateTrigStmt *from)
COPY_NODE_FIELD(args);
COPY_SCALAR_FIELD(before);
COPY_SCALAR_FIELD(row);
- strcpy(newnode->actions, from->actions); /* in-line string field */
+ strcpy(newnode->actions, from->actions); /* in-line string field */
COPY_SCALAR_FIELD(isconstraint);
COPY_SCALAR_FIELD(deferrable);
COPY_SCALAR_FIELD(initdeferred);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 513c17b048c..924793d07ec 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -11,14 +11,14 @@
* be handled easily in a simple depth-first traversal.
*
* Currently, in fact, equal() doesn't know how to compare Plan trees
- * either. This might need to be fixed someday.
+ * either. This might need to be fixed someday.
*
*
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.204 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.205 2003/08/04 00:43:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,8 +32,8 @@
/*
- * Macros to simplify comparison of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify comparison of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire the convention that the local variables in an Equal routine are
* named 'a' and 'b'.
*/
@@ -135,7 +135,7 @@ _equalRangeVar(RangeVar *a, RangeVar *b)
/*
* We don't need an _equalExpr because Expr is an abstract supertype which
- * should never actually get instantiated. Also, since it has no common
+ * should never actually get instantiated. Also, since it has no common
* fields except NodeTag, there's no need for a helper routine to factor
* out comparing the common fields...
*/
@@ -224,11 +224,12 @@ _equalArrayRef(ArrayRef *a, ArrayRef *b)
}
static bool
-_equalFuncExpr(FuncExpr *a, FuncExpr *b)
+_equalFuncExpr(FuncExpr * a, FuncExpr * b)
{
COMPARE_SCALAR_FIELD(funcid);
COMPARE_SCALAR_FIELD(funcresulttype);
COMPARE_SCALAR_FIELD(funcretset);
+
/*
* Special-case COERCE_DONTCARE, so that pathkeys can build coercion
* nodes that are equal() to both explicit and implicit coercions.
@@ -244,14 +245,15 @@ _equalFuncExpr(FuncExpr *a, FuncExpr *b)
}
static bool
-_equalOpExpr(OpExpr *a, OpExpr *b)
+_equalOpExpr(OpExpr * a, OpExpr * b)
{
COMPARE_SCALAR_FIELD(opno);
+
/*
- * Special-case opfuncid: it is allowable for it to differ if one
- * node contains zero and the other doesn't. This just means that the
- * one node isn't as far along in the parse/plan pipeline and hasn't
- * had the opfuncid cache filled yet.
+ * Special-case opfuncid: it is allowable for it to differ if one node
+ * contains zero and the other doesn't. This just means that the one
+ * node isn't as far along in the parse/plan pipeline and hasn't had
+ * the opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -266,14 +268,15 @@ _equalOpExpr(OpExpr *a, OpExpr *b)
}
static bool
-_equalDistinctExpr(DistinctExpr *a, DistinctExpr *b)
+_equalDistinctExpr(DistinctExpr * a, DistinctExpr * b)
{
COMPARE_SCALAR_FIELD(opno);
+
/*
- * Special-case opfuncid: it is allowable for it to differ if one
- * node contains zero and the other doesn't. This just means that the
- * one node isn't as far along in the parse/plan pipeline and hasn't
- * had the opfuncid cache filled yet.
+ * Special-case opfuncid: it is allowable for it to differ if one node
+ * contains zero and the other doesn't. This just means that the one
+ * node isn't as far along in the parse/plan pipeline and hasn't had
+ * the opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -288,14 +291,15 @@ _equalDistinctExpr(DistinctExpr *a, DistinctExpr *b)
}
static bool
-_equalScalarArrayOpExpr(ScalarArrayOpExpr *a, ScalarArrayOpExpr *b)
+_equalScalarArrayOpExpr(ScalarArrayOpExpr * a, ScalarArrayOpExpr * b)
{
COMPARE_SCALAR_FIELD(opno);
+
/*
- * Special-case opfuncid: it is allowable for it to differ if one
- * node contains zero and the other doesn't. This just means that the
- * one node isn't as far along in the parse/plan pipeline and hasn't
- * had the opfuncid cache filled yet.
+ * Special-case opfuncid: it is allowable for it to differ if one node
+ * contains zero and the other doesn't. This just means that the one
+ * node isn't as far along in the parse/plan pipeline and hasn't had
+ * the opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -309,7 +313,7 @@ _equalScalarArrayOpExpr(ScalarArrayOpExpr *a, ScalarArrayOpExpr *b)
}
static bool
-_equalBoolExpr(BoolExpr *a, BoolExpr *b)
+_equalBoolExpr(BoolExpr * a, BoolExpr * b)
{
COMPARE_SCALAR_FIELD(boolop);
COMPARE_NODE_FIELD(args);
@@ -366,6 +370,7 @@ _equalRelabelType(RelabelType *a, RelabelType *b)
COMPARE_NODE_FIELD(arg);
COMPARE_SCALAR_FIELD(resulttype);
COMPARE_SCALAR_FIELD(resulttypmod);
+
/*
* Special-case COERCE_DONTCARE, so that pathkeys can build coercion
* nodes that are equal() to both explicit and implicit coercions.
@@ -399,7 +404,7 @@ _equalCaseWhen(CaseWhen *a, CaseWhen *b)
}
static bool
-_equalArrayExpr(ArrayExpr *a, ArrayExpr *b)
+_equalArrayExpr(ArrayExpr * a, ArrayExpr * b)
{
COMPARE_SCALAR_FIELD(array_typeid);
COMPARE_SCALAR_FIELD(element_typeid);
@@ -410,7 +415,7 @@ _equalArrayExpr(ArrayExpr *a, ArrayExpr *b)
}
static bool
-_equalCoalesceExpr(CoalesceExpr *a, CoalesceExpr *b)
+_equalCoalesceExpr(CoalesceExpr * a, CoalesceExpr * b)
{
COMPARE_SCALAR_FIELD(coalescetype);
COMPARE_NODE_FIELD(args);
@@ -419,14 +424,15 @@ _equalCoalesceExpr(CoalesceExpr *a, CoalesceExpr *b)
}
static bool
-_equalNullIfExpr(NullIfExpr *a, NullIfExpr *b)
+_equalNullIfExpr(NullIfExpr * a, NullIfExpr * b)
{
COMPARE_SCALAR_FIELD(opno);
+
/*
- * Special-case opfuncid: it is allowable for it to differ if one
- * node contains zero and the other doesn't. This just means that the
- * one node isn't as far along in the parse/plan pipeline and hasn't
- * had the opfuncid cache filled yet.
+ * Special-case opfuncid: it is allowable for it to differ if one node
+ * contains zero and the other doesn't. This just means that the one
+ * node isn't as far along in the parse/plan pipeline and hasn't had
+ * the opfuncid cache filled yet.
*/
if (a->opfuncid != b->opfuncid &&
a->opfuncid != 0 &&
@@ -459,11 +465,12 @@ _equalBooleanTest(BooleanTest *a, BooleanTest *b)
}
static bool
-_equalCoerceToDomain(CoerceToDomain *a, CoerceToDomain *b)
+_equalCoerceToDomain(CoerceToDomain * a, CoerceToDomain * b)
{
COMPARE_NODE_FIELD(arg);
COMPARE_SCALAR_FIELD(resulttype);
COMPARE_SCALAR_FIELD(resulttypmod);
+
/*
* Special-case COERCE_DONTCARE, so that pathkeys can build coercion
* nodes that are equal() to both explicit and implicit coercions.
@@ -477,7 +484,7 @@ _equalCoerceToDomain(CoerceToDomain *a, CoerceToDomain *b)
}
static bool
-_equalCoerceToDomainValue(CoerceToDomainValue *a, CoerceToDomainValue *b)
+_equalCoerceToDomainValue(CoerceToDomainValue * a, CoerceToDomainValue * b)
{
COMPARE_SCALAR_FIELD(typeId);
COMPARE_SCALAR_FIELD(typeMod);
@@ -486,7 +493,7 @@ _equalCoerceToDomainValue(CoerceToDomainValue *a, CoerceToDomainValue *b)
}
static bool
-_equalSetToDefault(SetToDefault *a, SetToDefault *b)
+_equalSetToDefault(SetToDefault * a, SetToDefault * b)
{
COMPARE_SCALAR_FIELD(typeId);
COMPARE_SCALAR_FIELD(typeMod);
@@ -554,11 +561,13 @@ _equalRestrictInfo(RestrictInfo *a, RestrictInfo *b)
{
COMPARE_NODE_FIELD(clause);
COMPARE_SCALAR_FIELD(ispusheddown);
+
/*
- * We ignore subclauseindices, eval_cost, this_selec, left/right_relids,
- * left/right_pathkey, and left/right_bucketsize, since they may not be
- * set yet, and should be derivable from the clause anyway. Probably it's
- * not really necessary to compare any of these remaining fields ...
+ * We ignore subclauseindices, eval_cost, this_selec,
+ * left/right_relids, left/right_pathkey, and left/right_bucketsize,
+ * since they may not be set yet, and should be derivable from the
+ * clause anyway. Probably it's not really necessary to compare any
+ * of these remaining fields ...
*/
COMPARE_SCALAR_FIELD(mergejoinoperator);
COMPARE_SCALAR_FIELD(left_sortop);
@@ -578,7 +587,7 @@ _equalJoinInfo(JoinInfo *a, JoinInfo *b)
}
static bool
-_equalInClauseInfo(InClauseInfo *a, InClauseInfo *b)
+_equalInClauseInfo(InClauseInfo * a, InClauseInfo * b)
{
COMPARE_BITMAPSET_FIELD(lefthand);
COMPARE_BITMAPSET_FIELD(righthand);
@@ -620,9 +629,9 @@ _equalQuery(Query *a, Query *b)
/*
* We do not check the other planner internal fields: base_rel_list,
- * other_rel_list, join_rel_list, equi_key_list, query_pathkeys.
- * They might not be set yet, and in any case they should be derivable
- * from the other fields.
+ * other_rel_list, join_rel_list, equi_key_list, query_pathkeys. They
+ * might not be set yet, and in any case they should be derivable from
+ * the other fields.
*/
return true;
}
@@ -706,7 +715,7 @@ _equalAlterTableStmt(AlterTableStmt *a, AlterTableStmt *b)
}
static bool
-_equalAlterDomainStmt(AlterDomainStmt *a, AlterDomainStmt *b)
+_equalAlterDomainStmt(AlterDomainStmt * a, AlterDomainStmt * b)
{
COMPARE_SCALAR_FIELD(subtype);
COMPARE_NODE_FIELD(typename);
@@ -750,7 +759,7 @@ _equalFuncWithArgs(FuncWithArgs *a, FuncWithArgs *b)
}
static bool
-_equalDeclareCursorStmt(DeclareCursorStmt *a, DeclareCursorStmt *b)
+_equalDeclareCursorStmt(DeclareCursorStmt * a, DeclareCursorStmt * b)
{
COMPARE_STRING_FIELD(portalname);
COMPARE_SCALAR_FIELD(options);
@@ -802,7 +811,7 @@ _equalCreateStmt(CreateStmt *a, CreateStmt *b)
}
static bool
-_equalInhRelation(InhRelation *a, InhRelation *b)
+_equalInhRelation(InhRelation * a, InhRelation * b)
{
COMPARE_NODE_FIELD(relation);
COMPARE_SCALAR_FIELD(including_defaults);
@@ -1113,7 +1122,7 @@ _equalCreateSeqStmt(CreateSeqStmt *a, CreateSeqStmt *b)
}
static bool
-_equalAlterSeqStmt(AlterSeqStmt *a, AlterSeqStmt *b)
+_equalAlterSeqStmt(AlterSeqStmt * a, AlterSeqStmt * b)
{
COMPARE_NODE_FIELD(sequence);
COMPARE_NODE_FIELD(options);
@@ -1156,7 +1165,7 @@ _equalCreateTrigStmt(CreateTrigStmt *a, CreateTrigStmt *b)
COMPARE_NODE_FIELD(args);
COMPARE_SCALAR_FIELD(before);
COMPARE_SCALAR_FIELD(row);
- if (strcmp(a->actions, b->actions) != 0) /* in-line string field */
+ if (strcmp(a->actions, b->actions) != 0) /* in-line string field */
return false;
COMPARE_SCALAR_FIELD(isconstraint);
COMPARE_SCALAR_FIELD(deferrable);
@@ -1400,7 +1409,7 @@ _equalParamRef(ParamRef *a, ParamRef *b)
static bool
_equalAConst(A_Const *a, A_Const *b)
{
- if (!equal(&a->val, &b->val)) /* hack for in-line Value field */
+ if (!equal(&a->val, &b->val)) /* hack for in-line Value field */
return false;
COMPARE_NODE_FIELD(typename);
@@ -1649,9 +1658,9 @@ equal(void *a, void *b)
switch (nodeTag(a))
{
- /*
- * PRIMITIVE NODES
- */
+ /*
+ * PRIMITIVE NODES
+ */
case T_Resdom:
retval = _equalResdom(a, b);
break;
@@ -1841,7 +1850,7 @@ equal(void *a, void *b)
retval = _equalCreateStmt(a, b);
break;
case T_InhRelation:
- retval = _equalInhRelation(a,b);
+ retval = _equalInhRelation(a, b);
break;
case T_DefineStmt:
retval = _equalDefineStmt(a, b);
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index b0f6821b8c4..354134caeaf 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.51 2003/07/22 23:30:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.52 2003/08/04 00:43:19 momjian Exp $
*
* NOTES
* XXX a few of the following functions are duplicated to handle
@@ -202,7 +202,7 @@ nconc(List *l1, List *l2)
* since we avoid having to chase down the list again each time.
*/
void
-FastAppend(FastList *fl, void *datum)
+FastAppend(FastList * fl, void *datum)
{
List *cell = makeList1(datum);
@@ -223,7 +223,7 @@ FastAppend(FastList *fl, void *datum)
* FastAppendi - same for integers
*/
void
-FastAppendi(FastList *fl, int datum)
+FastAppendi(FastList * fl, int datum)
{
List *cell = makeListi1(datum);
@@ -244,7 +244,7 @@ FastAppendi(FastList *fl, int datum)
* FastAppendo - same for Oids
*/
void
-FastAppendo(FastList *fl, Oid datum)
+FastAppendo(FastList * fl, Oid datum)
{
List *cell = makeListo1(datum);
@@ -267,14 +267,12 @@ FastAppendo(FastList *fl, Oid datum)
* Note that the cells of the second argument are absorbed into the FastList.
*/
void
-FastConc(FastList *fl, List *cells)
+FastConc(FastList * fl, List *cells)
{
if (cells == NIL)
return; /* nothing to do */
if (fl->tail)
- {
lnext(fl->tail) = cells;
- }
else
{
/* First cell of list */
@@ -292,14 +290,12 @@ FastConc(FastList *fl, List *cells)
* Note that the cells of the second argument are absorbed into the first.
*/
void
-FastConcFast(FastList *fl, FastList *fl2)
+FastConcFast(FastList * fl, FastList * fl2)
{
if (fl2->head == NIL)
return; /* nothing to do */
if (fl->tail)
- {
lnext(fl->tail) = fl2->head;
- }
else
{
/* First cell of list */
@@ -319,9 +315,7 @@ nth(int n, List *l)
{
/* XXX assume list is long enough */
while (n-- > 0)
- {
l = lnext(l);
- }
return lfirst(l);
}
@@ -781,4 +775,5 @@ lreverse(List *l)
result = lcons(lfirst(i), result);
return result;
}
+
#endif
diff --git a/src/backend/nodes/nodes.c b/src/backend/nodes/nodes.c
index f71bd020ce9..4a4e0c98f57 100644
--- a/src/backend/nodes/nodes.c
+++ b/src/backend/nodes/nodes.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/nodes.c,v 1.19 2002/12/16 16:22:46 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/nodes.c,v 1.20 2003/08/04 00:43:19 momjian Exp $
*
* HISTORY
* Andrew Yu Oct 20, 1994 file creation
@@ -24,4 +24,4 @@
* Support for newNode() macro
*/
-Node *newNodeMacroHolder;
+Node *newNodeMacroHolder;
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index fba59553793..9247bb00d2a 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -8,12 +8,12 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.214 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.215 2003/08/04 00:43:19 momjian Exp $
*
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
* have an output function defined here (as well as an input function
- * in readfuncs.c). For use in debugging, we also provide output
+ * in readfuncs.c). For use in debugging, we also provide output
* functions for nodes that appear in raw parsetrees, path, and plan trees.
* These nodes however need not have input functions.
*
@@ -31,8 +31,8 @@
/*
- * Macros to simplify output of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * Macros to simplify output of different kinds of fields. Use these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in an Out
* routine.
*/
@@ -184,7 +184,7 @@ _outOidList(StringInfo str, List *list)
* an integer List would be.
*/
static void
-_outBitmapset(StringInfo str, Bitmapset *bms)
+_outBitmapset(StringInfo str, Bitmapset * bms)
{
Bitmapset *tmpset;
int x;
@@ -192,9 +192,7 @@ _outBitmapset(StringInfo str, Bitmapset *bms)
appendStringInfoChar(str, '(');
tmpset = bms_copy(bms);
while ((x = bms_first_member(tmpset)) >= 0)
- {
appendStringInfo(str, " %d", x);
- }
bms_free(tmpset);
appendStringInfoChar(str, ')');
}
@@ -633,7 +631,7 @@ _outArrayRef(StringInfo str, ArrayRef *node)
}
static void
-_outFuncExpr(StringInfo str, FuncExpr *node)
+_outFuncExpr(StringInfo str, FuncExpr * node)
{
WRITE_NODE_TYPE("FUNCEXPR");
@@ -645,7 +643,7 @@ _outFuncExpr(StringInfo str, FuncExpr *node)
}
static void
-_outOpExpr(StringInfo str, OpExpr *node)
+_outOpExpr(StringInfo str, OpExpr * node)
{
WRITE_NODE_TYPE("OPEXPR");
@@ -657,7 +655,7 @@ _outOpExpr(StringInfo str, OpExpr *node)
}
static void
-_outDistinctExpr(StringInfo str, DistinctExpr *node)
+_outDistinctExpr(StringInfo str, DistinctExpr * node)
{
WRITE_NODE_TYPE("DISTINCTEXPR");
@@ -669,7 +667,7 @@ _outDistinctExpr(StringInfo str, DistinctExpr *node)
}
static void
-_outScalarArrayOpExpr(StringInfo str, ScalarArrayOpExpr *node)
+_outScalarArrayOpExpr(StringInfo str, ScalarArrayOpExpr * node)
{
WRITE_NODE_TYPE("SCALARARRAYOPEXPR");
@@ -680,7 +678,7 @@ _outScalarArrayOpExpr(StringInfo str, ScalarArrayOpExpr *node)
}
static void
-_outBoolExpr(StringInfo str, BoolExpr *node)
+_outBoolExpr(StringInfo str, BoolExpr * node)
{
char *opstr = NULL;
@@ -780,7 +778,7 @@ _outCaseWhen(StringInfo str, CaseWhen *node)
}
static void
-_outArrayExpr(StringInfo str, ArrayExpr *node)
+_outArrayExpr(StringInfo str, ArrayExpr * node)
{
WRITE_NODE_TYPE("ARRAY");
@@ -791,7 +789,7 @@ _outArrayExpr(StringInfo str, ArrayExpr *node)
}
static void
-_outCoalesceExpr(StringInfo str, CoalesceExpr *node)
+_outCoalesceExpr(StringInfo str, CoalesceExpr * node)
{
WRITE_NODE_TYPE("COALESCE");
@@ -800,7 +798,7 @@ _outCoalesceExpr(StringInfo str, CoalesceExpr *node)
}
static void
-_outNullIfExpr(StringInfo str, NullIfExpr *node)
+_outNullIfExpr(StringInfo str, NullIfExpr * node)
{
WRITE_NODE_TYPE("NULLIFEXPR");
@@ -830,7 +828,7 @@ _outBooleanTest(StringInfo str, BooleanTest *node)
}
static void
-_outCoerceToDomain(StringInfo str, CoerceToDomain *node)
+_outCoerceToDomain(StringInfo str, CoerceToDomain * node)
{
WRITE_NODE_TYPE("COERCETODOMAIN");
@@ -841,7 +839,7 @@ _outCoerceToDomain(StringInfo str, CoerceToDomain *node)
}
static void
-_outCoerceToDomainValue(StringInfo str, CoerceToDomainValue *node)
+_outCoerceToDomainValue(StringInfo str, CoerceToDomainValue * node)
{
WRITE_NODE_TYPE("COERCETODOMAINVALUE");
@@ -850,7 +848,7 @@ _outCoerceToDomainValue(StringInfo str, CoerceToDomainValue *node)
}
static void
-_outSetToDefault(StringInfo str, SetToDefault *node)
+_outSetToDefault(StringInfo str, SetToDefault * node)
{
WRITE_NODE_TYPE("SETTODEFAULT");
@@ -979,7 +977,7 @@ _outAppendPath(StringInfo str, AppendPath *node)
}
static void
-_outResultPath(StringInfo str, ResultPath *node)
+_outResultPath(StringInfo str, ResultPath * node)
{
WRITE_NODE_TYPE("RESULTPATH");
@@ -990,7 +988,7 @@ _outResultPath(StringInfo str, ResultPath *node)
}
static void
-_outMaterialPath(StringInfo str, MaterialPath *node)
+_outMaterialPath(StringInfo str, MaterialPath * node)
{
WRITE_NODE_TYPE("MATERIALPATH");
@@ -1000,7 +998,7 @@ _outMaterialPath(StringInfo str, MaterialPath *node)
}
static void
-_outUniquePath(StringInfo str, UniquePath *node)
+_outUniquePath(StringInfo str, UniquePath * node)
{
WRITE_NODE_TYPE("UNIQUEPATH");
@@ -1079,7 +1077,7 @@ _outJoinInfo(StringInfo str, JoinInfo *node)
}
static void
-_outInClauseInfo(StringInfo str, InClauseInfo *node)
+_outInClauseInfo(StringInfo str, InClauseInfo * node)
{
WRITE_NODE_TYPE("INCLAUSEINFO");
@@ -1132,7 +1130,7 @@ _outNotifyStmt(StringInfo str, NotifyStmt *node)
}
static void
-_outDeclareCursorStmt(StringInfo str, DeclareCursorStmt *node)
+_outDeclareCursorStmt(StringInfo str, DeclareCursorStmt * node)
{
WRITE_NODE_TYPE("DECLARECURSOR");
@@ -1820,9 +1818,11 @@ _outNode(StringInfo str, void *obj)
break;
default:
+
/*
* This should be an ERROR, but it's too useful to be able
- * to dump structures that _outNode only understands part of.
+ * to dump structures that _outNode only understands part
+ * of.
*/
elog(WARNING, "could not dump unrecognized node type: %d",
(int) nodeTag(obj));
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index abc3a1b0105..32af1d92923 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -8,11 +8,11 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.159 2003/07/22 23:30:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.160 2003/08/04 00:43:19 momjian Exp $
*
* NOTES
* Path and Plan nodes do not have any readfuncs support, because we
- * never have occasion to read them in. (There was once code here that
+ * never have occasion to read them in. (There was once code here that
* claimed to read them, but it was broken as well as unused.) We
* never read executor state trees, either.
*
@@ -28,7 +28,7 @@
/*
* Macros to simplify reading of different kinds of fields. Use these
- * wherever possible to reduce the chance for silly typos. Note that these
+ * wherever possible to reduce the chance for silly typos. Note that these
* hard-wire conventions about the names of the local variables in a Read
* routine.
*/
@@ -466,13 +466,14 @@ _readOpExpr(void)
READ_OID_FIELD(opno);
READ_OID_FIELD(opfuncid);
+
/*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
+ * The opfuncid is stored in the textual format primarily for
+ * debugging and documentation reasons. We want to always read it as
+ * zero to force it to be re-looked-up in the pg_operator entry. This
+ * ensures that stored rules don't have hidden dependencies on
+ * operators' functions. (We don't currently support an ALTER OPERATOR
+ * command, but might someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -493,13 +494,14 @@ _readDistinctExpr(void)
READ_OID_FIELD(opno);
READ_OID_FIELD(opfuncid);
+
/*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
+ * The opfuncid is stored in the textual format primarily for
+ * debugging and documentation reasons. We want to always read it as
+ * zero to force it to be re-looked-up in the pg_operator entry. This
+ * ensures that stored rules don't have hidden dependencies on
+ * operators' functions. (We don't currently support an ALTER OPERATOR
+ * command, but might someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -520,13 +522,14 @@ _readScalarArrayOpExpr(void)
READ_OID_FIELD(opno);
READ_OID_FIELD(opfuncid);
+
/*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
+ * The opfuncid is stored in the textual format primarily for
+ * debugging and documentation reasons. We want to always read it as
+ * zero to force it to be re-looked-up in the pg_operator entry. This
+ * ensures that stored rules don't have hidden dependencies on
+ * operators' functions. (We don't currently support an ALTER OPERATOR
+ * command, but might someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -685,13 +688,14 @@ _readNullIfExpr(void)
READ_OID_FIELD(opno);
READ_OID_FIELD(opfuncid);
+
/*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
+ * The opfuncid is stored in the textual format primarily for
+ * debugging and documentation reasons. We want to always read it as
+ * zero to force it to be re-looked-up in the pg_operator entry. This
+ * ensures that stored rules don't have hidden dependencies on
+ * operators' functions. (We don't currently support an ALTER OPERATOR
+ * command, but might someday.)
*/
local_node->opfuncid = InvalidOid;
@@ -955,6 +959,7 @@ Node *
parseNodeString(void)
{
void *return_value;
+
READ_TEMP_LOCALS();
token = pg_strtok(&length);
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index ee06560ca23..b1ff994f061 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.63 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.64 2003/08/04 00:43:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,9 +48,9 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
/*
* Because gimme_tree considers both left- and right-sided trees,
* there is no difference between a tour (a,b,c,d,...) and a tour
- * (b,a,c,d,...) --- the same join orders will be considered.
- * To avoid redundant cost calculations, we simply reject tours where
- * tour[0] > tour[1], assigning them an artificially bad fitness.
+ * (b,a,c,d,...) --- the same join orders will be considered. To avoid
+ * redundant cost calculations, we simply reject tours where tour[0] >
+ * tour[1], assigning them an artificially bad fitness.
*
* (It would be better to tweak the GEQO logic to not generate such tours
* in the first place, but I'm not sure of all the implications in the
@@ -65,8 +65,8 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
*
* Since geqo_eval() will be called many times, we can't afford to let
* all that memory go unreclaimed until end of statement. Note we
- * make the temp context a child of the planner's normal context, so that
- * it will be freed even if we abort via ereport(ERROR).
+ * make the temp context a child of the planner's normal context, so
+ * that it will be freed even if we abort via ereport(ERROR).
*/
mycontext = AllocSetContextCreate(CurrentMemoryContext,
"GEQO",
@@ -76,9 +76,9 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
oldcxt = MemoryContextSwitchTo(mycontext);
/*
- * preserve root->join_rel_list, which gimme_tree changes; without this,
- * it'll be pointing at recycled storage after the MemoryContextDelete
- * below.
+ * preserve root->join_rel_list, which gimme_tree changes; without
+ * this, it'll be pointing at recycled storage after the
+ * MemoryContextDelete below.
*/
savelist = root->join_rel_list;
@@ -151,9 +151,10 @@ gimme_tree(Query *root, List *initial_rels,
/*
* Construct a RelOptInfo representing the previous joinrel joined
- * to inner_rel. These are always inner joins. Note that we expect
- * the joinrel not to exist in root->join_rel_list yet, and so the
- * paths constructed for it will only include the ones we want.
+ * to inner_rel. These are always inner joins. Note that we
+ * expect the joinrel not to exist in root->join_rel_list yet, and
+ * so the paths constructed for it will only include the ones we
+ * want.
*/
new_rel = make_join_rel(root, joinrel, inner_rel, JOIN_INNER);
diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c
index f60fd7d4667..c91f16fd0e4 100644
--- a/src/backend/optimizer/geqo/geqo_main.c
+++ b/src/backend/optimizer/geqo/geqo_main.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.36 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.37 2003/08/04 00:43:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -229,8 +229,8 @@ geqo(Query *root, int number_of_rels, List *initial_rels)
/*
- * got the cheapest query tree processed by geqo;
- * first element of the population indicates the best query tree
+ * got the cheapest query tree processed by geqo; first element of the
+ * population indicates the best query tree
*/
best_tour = (Gene *) pool->data[0].string;
diff --git a/src/backend/optimizer/geqo/geqo_misc.c b/src/backend/optimizer/geqo/geqo_misc.c
index acc9285d005..329e19c4016 100644
--- a/src/backend/optimizer/geqo/geqo_misc.c
+++ b/src/backend/optimizer/geqo/geqo_misc.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.37 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/geqo/geqo_misc.c,v 1.38 2003/08/04 00:43:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,7 +42,7 @@ avg_pool(Pool *pool)
/*
* Since the pool may contain multiple occurrences of DBL_MAX, divide
- * by pool->size before summing, not after, to avoid overflow. This
+ * by pool->size before summing, not after, to avoid overflow. This
* loses a little in speed and accuracy, but this routine is only used
* for debug printouts, so we don't care that much.
*/
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 486dede0fb9..494f624d4cd 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.104 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.105 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,13 +50,13 @@ static void set_function_pathlist(Query *root, RelOptInfo *rel,
static RelOptInfo *make_one_rel_by_joins(Query *root, int levels_needed,
List *initial_rels);
static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
- bool *differentTypes);
+ bool *differentTypes);
static bool recurse_pushdown_safe(Node *setOp, Query *topquery,
- bool *differentTypes);
+ bool *differentTypes);
static void compare_tlist_datatypes(List *tlist, List *colTypes,
- bool *differentTypes);
+ bool *differentTypes);
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
- bool *differentTypes);
+ bool *differentTypes);
static void subquery_push_qual(Query *subquery, Index rti, Node *qual);
static void recurse_push_qual(Node *setOp, Query *topquery,
Index rti, Node *qual);
@@ -290,14 +290,14 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel,
rel->rows += childrel->rows;
if (childrel->width > rel->width)
rel->width = childrel->width;
-
+
childvars = FastListValue(&childrel->reltargetlist);
foreach(parentvars, FastListValue(&rel->reltargetlist))
{
- Var *parentvar = (Var *) lfirst(parentvars);
- Var *childvar = (Var *) lfirst(childvars);
- int parentndx = parentvar->varattno - rel->min_attr;
- int childndx = childvar->varattno - childrel->min_attr;
+ Var *parentvar = (Var *) lfirst(parentvars);
+ Var *childvar = (Var *) lfirst(childvars);
+ int parentndx = parentvar->varattno - rel->min_attr;
+ int childndx = childvar->varattno - childrel->min_attr;
if (childrel->attr_widths[childndx] > rel->attr_widths[parentndx])
rel->attr_widths[parentndx] = childrel->attr_widths[childndx];
@@ -343,8 +343,8 @@ set_subquery_pathlist(Query *root, RelOptInfo *rel,
*
* There are several cases where we cannot push down clauses.
* Restrictions involving the subquery are checked by
- * subquery_is_pushdown_safe(). Restrictions on individual clauses are
- * checked by qual_is_pushdown_safe().
+ * subquery_is_pushdown_safe(). Restrictions on individual clauses
+ * are checked by qual_is_pushdown_safe().
*
* Non-pushed-down clauses will get evaluated as qpquals of the
* SubqueryScan node.
@@ -725,15 +725,16 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
vars = pull_var_clause(qual, false);
foreach(vl, vars)
{
- Var *var = (Var *) lfirst(vl);
+ Var *var = (Var *) lfirst(vl);
List *tl;
TargetEntry *tle = NULL;
Assert(var->varno == rti);
+
/*
* We use a bitmapset to avoid testing the same attno more than
- * once. (NB: this only works because subquery outputs can't
- * have negative attnos.)
+ * once. (NB: this only works because subquery outputs can't have
+ * negative attnos.)
*/
if (bms_is_member(var->varattno, tested))
continue;
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index e1754a7a694..1a0e2da82fd 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -49,7 +49,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.111 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.112 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,10 +102,10 @@ bool enable_hashjoin = true;
static Selectivity estimate_hash_bucketsize(Query *root, Var *var,
- int nbuckets);
-static bool cost_qual_eval_walker(Node *node, QualCost *total);
+ int nbuckets);
+static bool cost_qual_eval_walker(Node *node, QualCost * total);
static Selectivity approx_selectivity(Query *root, List *quals,
- JoinType jointype);
+ JoinType jointype);
static void set_rel_width(Query *root, RelOptInfo *rel);
static double relation_byte_size(double tuples, int width);
static double page_size(double tuples, int width);
@@ -358,13 +358,13 @@ cost_index(Path *path, Query *root,
* Normally the indexquals will be removed from the list of restriction
* clauses that we have to evaluate as qpquals, so we should subtract
* their costs from baserestrictcost. But if we are doing a join then
- * some of the indexquals are join clauses and shouldn't be subtracted.
- * Rather than work out exactly how much to subtract, we don't subtract
- * anything.
+ * some of the indexquals are join clauses and shouldn't be
+ * subtracted. Rather than work out exactly how much to subtract, we
+ * don't subtract anything.
*
* XXX For a lossy index, not all the quals will be removed and so we
- * really shouldn't subtract their costs; but detecting that seems more
- * expensive than it's worth.
+ * really shouldn't subtract their costs; but detecting that seems
+ * more expensive than it's worth.
*/
startup_cost += baserel->baserestrictcost.startup;
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost.per_tuple;
@@ -433,8 +433,8 @@ cost_subqueryscan(Path *path, RelOptInfo *baserel)
/*
* Cost of path is cost of evaluating the subplan, plus cost of
* evaluating any restriction clauses that will be attached to the
- * SubqueryScan node, plus cpu_tuple_cost to account for selection
- * and projection overhead.
+ * SubqueryScan node, plus cpu_tuple_cost to account for selection and
+ * projection overhead.
*/
path->startup_cost = baserel->subplan->startup_cost;
path->total_cost = baserel->subplan->total_cost;
@@ -597,8 +597,9 @@ cost_material(Path *path,
}
/*
- * Also charge a small amount per extracted tuple. We use cpu_tuple_cost
- * so that it doesn't appear worthwhile to materialize a bare seqscan.
+ * Also charge a small amount per extracted tuple. We use
+ * cpu_tuple_cost so that it doesn't appear worthwhile to materialize
+ * a bare seqscan.
*/
run_cost += cpu_tuple_cost * tuples;
@@ -631,17 +632,17 @@ cost_agg(Path *path, Query *root,
* additional cpu_operator_cost per grouping column per input tuple
* for grouping comparisons.
*
- * We will produce a single output tuple if not grouping,
- * and a tuple per group otherwise.
+ * We will produce a single output tuple if not grouping, and a tuple per
+ * group otherwise.
*
* Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
- * same total CPU cost, but AGG_SORTED has lower startup cost. If the
+ * same total CPU cost, but AGG_SORTED has lower startup cost. If the
* input path is already sorted appropriately, AGG_SORTED should be
- * preferred (since it has no risk of memory overflow). This will happen
- * as long as the computed total costs are indeed exactly equal --- but
- * if there's roundoff error we might do the wrong thing. So be sure
- * that the computations below form the same intermediate values in the
- * same order.
+ * preferred (since it has no risk of memory overflow). This will
+ * happen as long as the computed total costs are indeed exactly equal
+ * --- but if there's roundoff error we might do the wrong thing. So
+ * be sure that the computations below form the same intermediate
+ * values in the same order.
*/
if (aggstrategy == AGG_PLAIN)
{
@@ -724,26 +725,26 @@ cost_nestloop(NestPath *path, Query *root)
double outer_path_rows = PATH_ROWS(outer_path);
double inner_path_rows = PATH_ROWS(inner_path);
double ntuples;
- Selectivity joininfactor;
+ Selectivity joininfactor;
if (!enable_nestloop)
startup_cost += disable_cost;
/*
- * If we're doing JOIN_IN then we will stop scanning inner tuples for an
- * outer tuple as soon as we have one match. Account for the effects of
- * this by scaling down the cost estimates in proportion to the expected
- * output size. (This assumes that all the quals attached to the join are
- * IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop scanning inner tuples for
+ * an outer tuple as soon as we have one match. Account for the
+ * effects of this by scaling down the cost estimates in proportion to
+ * the expected output size. (This assumes that all the quals
+ * attached to the join are IN quals, which should be true.)
*
* Note: it's probably bogus to use the normal selectivity calculation
* here when either the outer or inner path is a UniquePath.
*/
if (path->jointype == JOIN_IN)
{
- Selectivity qual_selec = approx_selectivity(root, restrictlist,
+ Selectivity qual_selec = approx_selectivity(root, restrictlist,
path->jointype);
- double qptuples;
+ double qptuples;
qptuples = ceil(qual_selec * outer_path_rows * inner_path_rows);
if (qptuples > path->path.parent->rows)
@@ -761,8 +762,8 @@ cost_nestloop(NestPath *path, Query *root)
* before we can start returning tuples, so the join's startup cost is
* their sum. What's not so clear is whether the inner path's
* startup_cost must be paid again on each rescan of the inner path.
- * This is not true if the inner path is materialized or is a hashjoin,
- * but probably is true otherwise.
+ * This is not true if the inner path is materialized or is a
+ * hashjoin, but probably is true otherwise.
*/
startup_cost += outer_path->startup_cost + inner_path->startup_cost;
run_cost += outer_path->total_cost - outer_path->startup_cost;
@@ -783,14 +784,15 @@ cost_nestloop(NestPath *path, Query *root)
(inner_path->total_cost - inner_path->startup_cost) * joininfactor;
/*
- * Compute number of tuples processed (not number emitted!).
- * If inner path is an indexscan, be sure to use its estimated output row
- * count, which may be lower than the restriction-clause-only row count of
- * its parent. (We don't include this case in the PATH_ROWS macro because
- * it applies *only* to a nestloop's inner relation.) Note: it is correct
- * to use the unadjusted inner_path_rows in the above calculation for
- * joininfactor, since otherwise we'd be double-counting the selectivity
- * of the join clause being used for the index.
+ * Compute number of tuples processed (not number emitted!). If inner
+ * path is an indexscan, be sure to use its estimated output row
+ * count, which may be lower than the restriction-clause-only row
+ * count of its parent. (We don't include this case in the PATH_ROWS
+ * macro because it applies *only* to a nestloop's inner relation.)
+ * Note: it is correct to use the unadjusted inner_path_rows in the
+ * above calculation for joininfactor, since otherwise we'd be
+ * double-counting the selectivity of the join clause being used for
+ * the index.
*/
if (IsA(inner_path, IndexPath))
inner_path_rows = ((IndexPath *) inner_path)->rows;
@@ -831,8 +833,8 @@ cost_mergejoin(MergePath *path, Query *root)
Cost startup_cost = 0;
Cost run_cost = 0;
Cost cpu_per_tuple;
- Selectivity merge_selec;
- Selectivity qp_selec;
+ Selectivity merge_selec;
+ Selectivity qp_selec;
QualCost merge_qual_cost;
QualCost qp_qual_cost;
RestrictInfo *firstclause;
@@ -847,7 +849,7 @@ cost_mergejoin(MergePath *path, Query *root)
double rescanratio;
Selectivity outerscansel,
innerscansel;
- Selectivity joininfactor;
+ Selectivity joininfactor;
Path sort_path; /* dummy for result of cost_sort */
if (!enable_mergejoin)
@@ -856,7 +858,8 @@ cost_mergejoin(MergePath *path, Query *root)
/*
* Compute cost and selectivity of the mergequals and qpquals (other
* restriction clauses) separately. We use approx_selectivity here
- * for speed --- in most cases, any errors won't affect the result much.
+ * for speed --- in most cases, any errors won't affect the result
+ * much.
*
* Note: it's probably bogus to use the normal selectivity calculation
* here when either the outer or inner path is a UniquePath.
@@ -876,29 +879,30 @@ cost_mergejoin(MergePath *path, Query *root)
qptuples = ceil(mergejointuples * qp_selec);
/*
- * When there are equal merge keys in the outer relation, the mergejoin
- * must rescan any matching tuples in the inner relation. This means
- * re-fetching inner tuples. Our cost model for this is that a re-fetch
- * costs the same as an original fetch, which is probably an overestimate;
- * but on the other hand we ignore the bookkeeping costs of mark/restore.
- * Not clear if it's worth developing a more refined model.
+ * When there are equal merge keys in the outer relation, the
+ * mergejoin must rescan any matching tuples in the inner relation.
+ * This means re-fetching inner tuples. Our cost model for this is
+ * that a re-fetch costs the same as an original fetch, which is
+ * probably an overestimate; but on the other hand we ignore the
+ * bookkeeping costs of mark/restore. Not clear if it's worth
+ * developing a more refined model.
*
* The number of re-fetches can be estimated approximately as size of
- * merge join output minus size of inner relation. Assume that the
- * distinct key values are 1, 2, ..., and denote the number of values of
- * each key in the outer relation as m1, m2, ...; in the inner relation,
- * n1, n2, ... Then we have
+ * merge join output minus size of inner relation. Assume that the
+ * distinct key values are 1, 2, ..., and denote the number of values
+ * of each key in the outer relation as m1, m2, ...; in the inner
+ * relation, n1, n2, ... Then we have
*
- * size of join = m1 * n1 + m2 * n2 + ...
+ * size of join = m1 * n1 + m2 * n2 + ...
*
- * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ...
- * = m1 * n1 + m2 * n2 + ... - (n1 + n2 + ...)
- * = size of join - size of inner relation
+ * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
+ * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
+ * relation
*
* This equation works correctly for outer tuples having no inner match
* (nk = 0), but not for inner tuples having no outer match (mk = 0);
* we are effectively subtracting those from the number of rescanned
- * tuples, when we should not. Can we do better without expensive
+ * tuples, when we should not. Can we do better without expensive
* selectivity computations?
*/
if (IsA(outer_path, UniquePath))
@@ -953,8 +957,9 @@ cost_mergejoin(MergePath *path, Query *root)
/*
* Readjust scan selectivities to account for above rounding. This is
- * normally an insignificant effect, but when there are only a few rows
- * in the inputs, failing to do this makes for a large percentage error.
+ * normally an insignificant effect, but when there are only a few
+ * rows in the inputs, failing to do this makes for a large percentage
+ * error.
*/
outerscansel = outer_rows / outer_path_rows;
innerscansel = inner_rows / inner_path_rows;
@@ -1002,11 +1007,11 @@ cost_mergejoin(MergePath *path, Query *root)
/* CPU costs */
/*
- * If we're doing JOIN_IN then we will stop outputting inner
- * tuples for an outer tuple as soon as we have one match. Account for
- * the effects of this by scaling down the cost estimates in proportion
- * to the expected output size. (This assumes that all the quals attached
- * to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop outputting inner tuples
+ * for an outer tuple as soon as we have one match. Account for the
+ * effects of this by scaling down the cost estimates in proportion to
+ * the expected output size. (This assumes that all the quals
+ * attached to the join are IN quals, which should be true.)
*/
if (path->jpath.jointype == JOIN_IN &&
qptuples > path->jpath.path.parent->rows)
@@ -1017,9 +1022,9 @@ cost_mergejoin(MergePath *path, Query *root)
/*
* The number of tuple comparisons needed is approximately number of
* outer rows plus number of inner rows plus number of rescanned
- * tuples (can we refine this?). At each one, we need to evaluate
- * the mergejoin quals. NOTE: JOIN_IN mode does not save any work
- * here, so do NOT include joininfactor.
+ * tuples (can we refine this?). At each one, we need to evaluate the
+ * mergejoin quals. NOTE: JOIN_IN mode does not save any work here,
+ * so do NOT include joininfactor.
*/
startup_cost += merge_qual_cost.startup;
run_cost += merge_qual_cost.per_tuple *
@@ -1028,7 +1033,7 @@ cost_mergejoin(MergePath *path, Query *root)
/*
* For each tuple that gets through the mergejoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic
+ * clauses that are to be applied at the join. (This is pessimistic
* since not all of the quals may get evaluated at each tuple.) This
* work is skipped in JOIN_IN mode, so apply the factor.
*/
@@ -1059,8 +1064,8 @@ cost_hashjoin(HashPath *path, Query *root)
Cost startup_cost = 0;
Cost run_cost = 0;
Cost cpu_per_tuple;
- Selectivity hash_selec;
- Selectivity qp_selec;
+ Selectivity hash_selec;
+ Selectivity qp_selec;
QualCost hash_qual_cost;
QualCost qp_qual_cost;
double hashjointuples;
@@ -1076,7 +1081,7 @@ cost_hashjoin(HashPath *path, Query *root)
int physicalbuckets;
int numbatches;
Selectivity innerbucketsize;
- Selectivity joininfactor;
+ Selectivity joininfactor;
List *hcl;
List *qpquals;
@@ -1086,7 +1091,8 @@ cost_hashjoin(HashPath *path, Query *root)
/*
* Compute cost and selectivity of the hashquals and qpquals (other
* restriction clauses) separately. We use approx_selectivity here
- * for speed --- in most cases, any errors won't affect the result much.
+ * for speed --- in most cases, any errors won't affect the result
+ * much.
*
* Note: it's probably bogus to use the normal selectivity calculation
* here when either the outer or inner path is a UniquePath.
@@ -1114,9 +1120,9 @@ cost_hashjoin(HashPath *path, Query *root)
* Cost of computing hash function: must do it once per input tuple.
* We charge one cpu_operator_cost for each column's hash function.
*
- * XXX when a hashclause is more complex than a single operator,
- * we really should charge the extra eval costs of the left or right
- * side, as appropriate, here. This seems more work than it's worth
+ * XXX when a hashclause is more complex than a single operator, we
+ * really should charge the extra eval costs of the left or right
+ * side, as appropriate, here. This seems more work than it's worth
* at the moment.
*/
startup_cost += cpu_operator_cost * num_hashclauses * inner_path_rows;
@@ -1131,13 +1137,13 @@ cost_hashjoin(HashPath *path, Query *root)
/*
* Determine bucketsize fraction for inner relation. We use the
- * smallest bucketsize estimated for any individual hashclause;
- * this is undoubtedly conservative.
+ * smallest bucketsize estimated for any individual hashclause; this
+ * is undoubtedly conservative.
*
- * BUT: if inner relation has been unique-ified, we can assume it's
- * good for hashing. This is important both because it's the right
- * answer, and because we avoid contaminating the cache with a value
- * that's wrong for non-unique-ified paths.
+ * BUT: if inner relation has been unique-ified, we can assume it's good
+ * for hashing. This is important both because it's the right answer,
+ * and because we avoid contaminating the cache with a value that's
+ * wrong for non-unique-ified paths.
*/
if (IsA(inner_path, UniquePath))
innerbucketsize = 1.0 / virtualbuckets;
@@ -1152,12 +1158,13 @@ cost_hashjoin(HashPath *path, Query *root)
Assert(IsA(restrictinfo, RestrictInfo));
/*
- * First we have to figure out which side of the hashjoin clause
- * is the inner side.
+ * First we have to figure out which side of the hashjoin
+ * clause is the inner side.
*
* Since we tend to visit the same clauses over and over when
- * planning a large query, we cache the bucketsize estimate in the
- * RestrictInfo node to avoid repeated lookups of statistics.
+ * planning a large query, we cache the bucketsize estimate in
+ * the RestrictInfo node to avoid repeated lookups of
+ * statistics.
*/
if (bms_is_subset(restrictinfo->right_relids,
inner_path->parent->relids))
@@ -1169,7 +1176,7 @@ cost_hashjoin(HashPath *path, Query *root)
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- (Var *) get_rightop(restrictinfo->clause),
+ (Var *) get_rightop(restrictinfo->clause),
virtualbuckets);
restrictinfo->right_bucketsize = thisbucketsize;
}
@@ -1185,7 +1192,7 @@ cost_hashjoin(HashPath *path, Query *root)
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- (Var *) get_leftop(restrictinfo->clause),
+ (Var *) get_leftop(restrictinfo->clause),
virtualbuckets);
restrictinfo->left_bucketsize = thisbucketsize;
}
@@ -1217,11 +1224,11 @@ cost_hashjoin(HashPath *path, Query *root)
/* CPU costs */
/*
- * If we're doing JOIN_IN then we will stop comparing inner
- * tuples to an outer tuple as soon as we have one match. Account for
- * the effects of this by scaling down the cost estimates in proportion
- * to the expected output size. (This assumes that all the quals attached
- * to the join are IN quals, which should be true.)
+ * If we're doing JOIN_IN then we will stop comparing inner tuples to
+ * an outer tuple as soon as we have one match. Account for the
+ * effects of this by scaling down the cost estimates in proportion to
+ * the expected output size. (This assumes that all the quals
+ * attached to the join are IN quals, which should be true.)
*/
if (path->jpath.jointype == JOIN_IN &&
qptuples > path->jpath.path.parent->rows)
@@ -1243,7 +1250,7 @@ cost_hashjoin(HashPath *path, Query *root)
/*
* For each tuple that gets through the hashjoin proper, we charge
* cpu_tuple_cost plus the cost of evaluating additional restriction
- * clauses that are to be applied at the join. (This is pessimistic
+ * clauses that are to be applied at the join. (This is pessimistic
* since not all of the quals may get evaluated at each tuple.)
*/
startup_cost += qp_qual_cost.startup;
@@ -1254,14 +1261,14 @@ cost_hashjoin(HashPath *path, Query *root)
* Bias against putting larger relation on inside. We don't want an
* absolute prohibition, though, since larger relation might have
* better bucketsize --- and we can't trust the size estimates
- * unreservedly, anyway. Instead, inflate the run cost by the
- * square root of the size ratio. (Why square root? No real good
- * reason, but it seems reasonable...)
+ * unreservedly, anyway. Instead, inflate the run cost by the square
+ * root of the size ratio. (Why square root? No real good reason,
+ * but it seems reasonable...)
*
- * Note: before 7.4 we implemented this by inflating startup cost;
- * but if there's a disable_cost component in the input paths'
- * startup cost, that unfairly penalizes the hash. Probably it'd
- * be better to keep track of disable penalty separately from cost.
+ * Note: before 7.4 we implemented this by inflating startup cost; but if
+ * there's a disable_cost component in the input paths' startup cost,
+ * that unfairly penalizes the hash. Probably it'd be better to keep
+ * track of disable penalty separately from cost.
*/
if (innerbytes > outerbytes && outerbytes > 0)
run_cost *= sqrt(innerbytes / outerbytes);
@@ -1442,7 +1449,7 @@ estimate_hash_bucketsize(Query *root, Var *var, int nbuckets)
* and a per-evaluation component.
*/
void
-cost_qual_eval(QualCost *cost, List *quals)
+cost_qual_eval(QualCost * cost, List *quals)
{
List *l;
@@ -1484,7 +1491,7 @@ cost_qual_eval(QualCost *cost, List *quals)
}
static bool
-cost_qual_eval_walker(Node *node, QualCost *total)
+cost_qual_eval_walker(Node *node, QualCost * total)
{
if (node == NULL)
return false;
@@ -1502,9 +1509,7 @@ cost_qual_eval_walker(Node *node, QualCost *total)
IsA(node, OpExpr) ||
IsA(node, DistinctExpr) ||
IsA(node, NullIfExpr))
- {
total->per_tuple += cpu_operator_cost;
- }
else if (IsA(node, ScalarArrayOpExpr))
{
/* should charge more than 1 op cost, but how many? */
@@ -1519,47 +1524,48 @@ cost_qual_eval_walker(Node *node, QualCost *total)
{
/*
* A subplan node in an expression typically indicates that the
- * subplan will be executed on each evaluation, so charge accordingly.
- * (Sub-selects that can be executed as InitPlans have already been
- * removed from the expression.)
+ * subplan will be executed on each evaluation, so charge
+ * accordingly. (Sub-selects that can be executed as InitPlans
+ * have already been removed from the expression.)
*
* An exception occurs when we have decided we can implement the
* subplan by hashing.
*
*/
- SubPlan *subplan = (SubPlan *) node;
+ SubPlan *subplan = (SubPlan *) node;
Plan *plan = subplan->plan;
if (subplan->useHashTable)
{
/*
* If we are using a hash table for the subquery outputs, then
- * the cost of evaluating the query is a one-time cost.
- * We charge one cpu_operator_cost per tuple for the work of
+ * the cost of evaluating the query is a one-time cost. We
+ * charge one cpu_operator_cost per tuple for the work of
* loading the hashtable, too.
*/
total->startup += plan->total_cost +
cpu_operator_cost * plan->plan_rows;
+
/*
* The per-tuple costs include the cost of evaluating the
- * lefthand expressions, plus the cost of probing the hashtable.
- * Recursion into the exprs list will handle the lefthand
- * expressions properly, and will count one cpu_operator_cost
- * for each comparison operator. That is probably too low for
- * the probing cost, but it's hard to make a better estimate,
- * so live with it for now.
+ * lefthand expressions, plus the cost of probing the
+ * hashtable. Recursion into the exprs list will handle the
+ * lefthand expressions properly, and will count one
+ * cpu_operator_cost for each comparison operator. That is
+ * probably too low for the probing cost, but it's hard to
+ * make a better estimate, so live with it for now.
*/
}
else
{
/*
* Otherwise we will be rescanning the subplan output on each
- * evaluation. We need to estimate how much of the output
- * we will actually need to scan. NOTE: this logic should
- * agree with the estimates used by make_subplan() in
+ * evaluation. We need to estimate how much of the output we
+ * will actually need to scan. NOTE: this logic should agree
+ * with the estimates used by make_subplan() in
* plan/subselect.c.
*/
- Cost plan_run_cost = plan->total_cost - plan->startup_cost;
+ Cost plan_run_cost = plan->total_cost - plan->startup_cost;
if (subplan->subLinkType == EXISTS_SUBLINK)
{
@@ -1579,23 +1585,20 @@ cost_qual_eval_walker(Node *node, QualCost *total)
/* assume we need all tuples */
total->per_tuple += plan_run_cost;
}
+
/*
- * Also account for subplan's startup cost.
- * If the subplan is uncorrelated or undirect correlated,
- * AND its topmost node is a Sort or Material node, assume
- * that we'll only need to pay its startup cost once;
- * otherwise assume we pay the startup cost every time.
+ * Also account for subplan's startup cost. If the subplan is
+ * uncorrelated or undirect correlated, AND its topmost node
+ * is a Sort or Material node, assume that we'll only need to
+ * pay its startup cost once; otherwise assume we pay the
+ * startup cost every time.
*/
if (subplan->parParam == NIL &&
(IsA(plan, Sort) ||
IsA(plan, Material)))
- {
total->startup += plan->startup_cost;
- }
else
- {
total->per_tuple += plan->startup_cost;
- }
}
}
@@ -1745,7 +1748,7 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
UniquePath *upath;
/*
- * Compute joinclause selectivity. Note that we are only considering
+ * Compute joinclause selectivity. Note that we are only considering
* clauses that become restriction clauses at this join level; we are
* not double-counting them because they were not considered in
* estimating the sizes of the component rels.
@@ -1758,8 +1761,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
/*
* Basically, we multiply size of Cartesian product by selectivity.
*
- * If we are doing an outer join, take that into account: the output
- * must be at least as large as the non-nullable input. (Is there any
+ * If we are doing an outer join, take that into account: the output must
+ * be at least as large as the non-nullable input. (Is there any
* chance of being even smarter?)
*
* For JOIN_IN and variants, the Cartesian product is figured with
@@ -1823,8 +1826,8 @@ set_joinrel_size_estimates(Query *root, RelOptInfo *rel,
rel->rows = temp;
/*
- * We need not compute the output width here, because build_joinrel_tlist
- * already did.
+ * We need not compute the output width here, because
+ * build_joinrel_tlist already did.
*/
}
@@ -1911,11 +1914,14 @@ set_rel_width(Query *root, RelOptInfo *rel)
Assert(IsA(var, Var));
- /* The width probably hasn't been cached yet, but may as well check */
+ /*
+ * The width probably hasn't been cached yet, but may as well
+ * check
+ */
if (rel->attr_widths[ndx] > 0)
{
- tuple_width += rel->attr_widths[ndx];
- continue;
+ tuple_width += rel->attr_widths[ndx];
+ continue;
}
relid = getrelid(var->varno, root->rtable);
@@ -1931,8 +1937,8 @@ set_rel_width(Query *root, RelOptInfo *rel)
}
/*
- * Not a plain relation, or can't find statistics for it.
- * Estimate using just the type info.
+ * Not a plain relation, or can't find statistics for it. Estimate
+ * using just the type info.
*/
item_width = get_typavgwidth(var->vartype, var->vartypmod);
Assert(item_width > 0);
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index fa19abe4717..67238b5361c 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.145 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.146 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,9 +64,9 @@ static List *group_clauses_by_indexkey_for_join(Query *root,
Relids outer_relids,
JoinType jointype, bool isouterjoin);
static bool match_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
- int indexcol, Oid opclass, Expr *clause);
-static bool match_join_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
int indexcol, Oid opclass, Expr *clause);
+static bool match_join_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
+ int indexcol, Oid opclass, Expr *clause);
static Oid indexable_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static bool pred_test(List *predicate_list, List *restrictinfo_list,
@@ -77,8 +77,8 @@ static bool pred_test_recurse_pred(Expr *predicate, Node *clause);
static bool pred_test_simple_clause(Expr *predicate, Node *clause);
static Relids indexable_outerrelids(RelOptInfo *rel, IndexOptInfo *index);
static Path *make_innerjoin_index_path(Query *root,
- RelOptInfo *rel, IndexOptInfo *index,
- List *clausegroups);
+ RelOptInfo *rel, IndexOptInfo *index,
+ List *clausegroups);
static bool match_index_to_operand(Node *operand, int indexcol,
RelOptInfo *rel, IndexOptInfo *index);
static bool match_special_index_operator(Expr *clause, Oid opclass,
@@ -87,7 +87,7 @@ static List *expand_indexqual_condition(Expr *clause, Oid opclass);
static List *prefix_quals(Node *leftop, Oid opclass,
Const *prefix, Pattern_Prefix_Status pstatus);
static List *network_prefix_quals(Node *leftop, Oid expr_op, Oid opclass,
- Datum rightop);
+ Datum rightop);
static Datum string_to_datum(const char *str, Oid datatype);
static Const *string_to_const(const char *str, Oid datatype);
@@ -114,7 +114,7 @@ static Const *string_to_const(const char *str, Oid datatype);
* scan this routine deems potentially interesting for the current query.
*
* We also determine the set of other relids that participate in join
- * clauses that could be used with each index. The actually best innerjoin
+ * clauses that could be used with each index. The actually best innerjoin
* path will be generated for each outer relation later on, but knowing the
* set of potential otherrels allows us to identify equivalent outer relations
* and avoid repeated computation.
@@ -219,10 +219,11 @@ create_index_paths(Query *root, RelOptInfo *rel)
/*
* 6. Examine join clauses to see which ones are potentially
- * usable with this index, and generate the set of all other relids
- * that participate in such join clauses. We'll use this set later
- * to recognize outer rels that are equivalent for joining purposes.
- * We compute both per-index and overall-for-relation sets.
+ * usable with this index, and generate the set of all other
+ * relids that participate in such join clauses. We'll use this
+ * set later to recognize outer rels that are equivalent for
+ * joining purposes. We compute both per-index and
+ * overall-for-relation sets.
*/
join_outerrelids = indexable_outerrelids(rel, index);
index->outer_relids = join_outerrelids;
@@ -274,7 +275,7 @@ match_index_orclauses(RelOptInfo *rel,
*/
restrictinfo->subclauseindices =
match_index_orclause(rel, index,
- ((BoolExpr *) restrictinfo->clause)->args,
+ ((BoolExpr *) restrictinfo->clause)->args,
restrictinfo->subclauseindices);
}
}
@@ -422,6 +423,7 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
Oid *classes = index->classlist;
FastListInit(&quals);
+
/*
* Extract relevant indexclauses in indexkey order. This is
* essentially just like group_clauses_by_indexkey() except that the
@@ -576,7 +578,7 @@ group_clauses_by_indexkey(RelOptInfo *rel, IndexOptInfo *index)
*
* This is much like group_clauses_by_indexkey(), but we consider both
* join and restriction clauses. Any joinclause that uses only otherrels
- * in the specified outer_relids is fair game. But there must be at least
+ * in the specified outer_relids is fair game. But there must be at least
* one such joinclause in the final list, otherwise we return NIL indicating
* that this index isn't interesting as an inner indexscan. (A scan using
* only restriction clauses shouldn't be created here, because a regular Path
@@ -641,10 +643,10 @@ group_clauses_by_indexkey_for_join(Query *root,
*/
if (FastListValue(&clausegroup) != NIL)
{
- List *nl;
+ List *nl;
nl = remove_redundant_join_clauses(root,
- FastListValue(&clausegroup),
+ FastListValue(&clausegroup),
jointype);
FastListFromList(&clausegroup, nl);
}
@@ -736,9 +738,9 @@ match_clause_to_indexcol(RelOptInfo *rel,
return false;
/*
- * Check for clauses of the form:
- * (indexkey operator constant) or (constant operator indexkey).
- * Anything that is a "pseudo constant" expression will do.
+ * Check for clauses of the form: (indexkey operator constant) or
+ * (constant operator indexkey). Anything that is a "pseudo constant"
+ * expression will do.
*/
if (match_index_to_operand(leftop, indexcol, rel, index) &&
is_pseudo_constant_clause(rightop))
@@ -747,8 +749,8 @@ match_clause_to_indexcol(RelOptInfo *rel,
return true;
/*
- * If we didn't find a member of the index's opclass, see
- * whether it is a "special" indexable operator.
+ * If we didn't find a member of the index's opclass, see whether
+ * it is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, true))
return true;
@@ -762,8 +764,8 @@ match_clause_to_indexcol(RelOptInfo *rel,
return true;
/*
- * If we didn't find a member of the index's opclass, see
- * whether it is a "special" indexable operator.
+ * If we didn't find a member of the index's opclass, see whether
+ * it is a "special" indexable operator.
*/
if (match_special_index_operator(clause, opclass, false))
return true;
@@ -824,10 +826,10 @@ match_join_clause_to_indexcol(RelOptInfo *rel,
return false;
/*
- * Check for an indexqual that could be handled by a nestloop
- * join. We need the index key to be compared against an
- * expression that uses none of the indexed relation's vars and
- * contains no volatile functions.
+ * Check for an indexqual that could be handled by a nestloop join. We
+ * need the index key to be compared against an expression that uses
+ * none of the indexed relation's vars and contains no volatile
+ * functions.
*/
if (match_index_to_operand(leftop, indexcol, rel, index))
{
@@ -1174,10 +1176,11 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
* 1. Find "btree" strategy numbers for the pred_op and clause_op.
*
* We must find a btree opclass that contains both operators, else the
- * implication can't be determined. If there are multiple such opclasses,
- * assume we can use any one to determine the logical relationship of the
- * two operators and the correct corresponding test operator. This should
- * work for any logically consistent opclasses.
+ * implication can't be determined. If there are multiple such
+ * opclasses, assume we can use any one to determine the logical
+ * relationship of the two operators and the correct corresponding
+ * test operator. This should work for any logically consistent
+ * opclasses.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(pred_op),
@@ -1269,7 +1272,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
/* And execute it. */
test_result = ExecEvalExprSwitchContext(test_exprstate,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull, NULL);
/* Get back to outer memory context */
@@ -1295,7 +1298,7 @@ pred_test_simple_clause(Expr *predicate, Node *clause)
/*
* indexable_outerrelids
* Finds all other relids that participate in any indexable join clause
- * for the specified index. Returns a set of relids.
+ * for the specified index. Returns a set of relids.
*
* 'rel' is the relation for which 'index' is defined
*/
@@ -1314,16 +1317,16 @@ indexable_outerrelids(RelOptInfo *rel, IndexOptInfo *index)
/*
* Examine each joinclause in the JoinInfo node's list to see if
* it matches any key of the index. If so, add the JoinInfo's
- * otherrels to the result. We can skip examining other joinclauses
- * in the same list as soon as we find a match (since by definition
- * they all have the same otherrels).
+ * otherrels to the result. We can skip examining other
+ * joinclauses in the same list as soon as we find a match (since
+ * by definition they all have the same otherrels).
*/
foreach(j, joininfo->jinfo_restrictinfo)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(j);
- Expr *clause = rinfo->clause;
- int indexcol = 0;
- Oid *classes = index->classlist;
+ Expr *clause = rinfo->clause;
+ int indexcol = 0;
+ Oid *classes = index->classlist;
do
{
@@ -1398,11 +1401,13 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
default:
return NULL;
}
+
/*
* If there are no indexable joinclauses for this rel, exit quickly.
*/
if (bms_is_empty(rel->index_outer_relids))
return NULL;
+
/*
* Otherwise, we have to do path selection in the memory context of
* the given rel, so that any created path can be safely attached to
@@ -1410,10 +1415,11 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
* issue for normal planning, but it is an issue for GEQO planning.)
*/
oldcontext = MemoryContextSwitchTo(GetMemoryChunkContext(rel));
+
/*
- * Intersect the given outer_relids with index_outer_relids
- * to find the set of outer relids actually relevant for this index.
- * If there are none, again we can fail immediately.
+ * Intersect the given outer_relids with index_outer_relids to find
+ * the set of outer relids actually relevant for this index. If there
+ * are none, again we can fail immediately.
*/
outer_relids = bms_intersect(rel->index_outer_relids, outer_relids);
if (bms_is_empty(outer_relids))
@@ -1422,11 +1428,13 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
MemoryContextSwitchTo(oldcontext);
return NULL;
}
+
/*
* Look to see if we already computed the result for this set of
- * relevant outerrels. (We include the isouterjoin status in the
+ * relevant outerrels. (We include the isouterjoin status in the
* cache lookup key for safety. In practice I suspect this is not
- * necessary because it should always be the same for a given innerrel.)
+ * necessary because it should always be the same for a given
+ * innerrel.)
*/
foreach(jlist, rel->index_inner_paths)
{
@@ -1441,15 +1449,15 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
}
/*
- * For each index of the rel, find the best path; then choose the
- * best overall. We cache the per-index results as well as the overall
- * result. (This is useful because different indexes may have different
- * relevant outerrel sets, so different overall outerrel sets might still
- * map to the same computation for a given index.)
+ * For each index of the rel, find the best path; then choose the best
+ * overall. We cache the per-index results as well as the overall
+ * result. (This is useful because different indexes may have
+ * different relevant outerrel sets, so different overall outerrel
+ * sets might still map to the same computation for a given index.)
*/
foreach(ilist, rel->indexlist)
{
- IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
+ IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
Relids index_outer_relids;
Path *path = NULL;
@@ -1461,6 +1469,7 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
bms_free(index_outer_relids);
continue;
}
+
/*
* Look to see if we already computed the result for this index.
*/
@@ -1471,7 +1480,7 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
info->isouterjoin == isouterjoin)
{
path = info->best_innerpath;
- bms_free(index_outer_relids); /* not needed anymore */
+ bms_free(index_outer_relids); /* not needed anymore */
break;
}
}
@@ -1484,9 +1493,9 @@ best_inner_indexscan(Query *root, RelOptInfo *rel,
clausegroups = group_clauses_by_indexkey_for_join(root,
rel,
index,
- index_outer_relids,
+ index_outer_relids,
jointype,
- isouterjoin);
+ isouterjoin);
if (clausegroups)
{
/* make the path */
@@ -1548,9 +1557,9 @@ make_innerjoin_index_path(Query *root,
pathnode->path.parent = rel;
/*
- * There's no point in marking the path with any pathkeys, since
- * it will only ever be used as the inner path of a nestloop, and
- * so its ordering does not matter.
+ * There's no point in marking the path with any pathkeys, since it
+ * will only ever be used as the inner path of a nestloop, and so its
+ * ordering does not matter.
*/
pathnode->path.pathkeys = NIL;
@@ -1582,19 +1591,19 @@ make_innerjoin_index_path(Query *root,
/*
* We must compute the estimated number of output rows for the
- * indexscan. This is less than rel->rows because of the
- * additional selectivity of the join clauses. Since clausegroups
- * may contain both restriction and join clauses, we have to do a
- * set union to get the full set of clauses that must be
- * considered to compute the correct selectivity. (Without the union
- * operation, we might have some restriction clauses appearing twice,
- * which'd mislead restrictlist_selectivity into double-counting their
- * selectivity. However, since RestrictInfo nodes aren't copied when
- * linking them into different lists, it should be sufficient to use
- * pointer comparison to remove duplicates.)
+ * indexscan. This is less than rel->rows because of the additional
+ * selectivity of the join clauses. Since clausegroups may contain
+ * both restriction and join clauses, we have to do a set union to get
+ * the full set of clauses that must be considered to compute the
+ * correct selectivity. (Without the union operation, we might have
+ * some restriction clauses appearing twice, which'd mislead
+ * restrictlist_selectivity into double-counting their selectivity.
+ * However, since RestrictInfo nodes aren't copied when linking them
+ * into different lists, it should be sufficient to use pointer
+ * comparison to remove duplicates.)
*
- * Always assume the join type is JOIN_INNER; even if some of the
- * join clauses come from other contexts, that's not our problem.
+ * Always assume the join type is JOIN_INNER; even if some of the join
+ * clauses come from other contexts, that's not our problem.
*/
allclauses = set_ptrUnion(rel->baserestrictinfo, allclauses);
pathnode->rows = rel->tuples *
@@ -1656,9 +1665,9 @@ match_index_to_operand(Node *operand,
else
{
/*
- * Index expression; find the correct expression. (This search could
- * be avoided, at the cost of complicating all the callers of this
- * routine; doesn't seem worth it.)
+ * Index expression; find the correct expression. (This search
+ * could be avoided, at the cost of complicating all the callers
+ * of this routine; doesn't seem worth it.)
*/
List *indexprs;
int i;
@@ -1677,6 +1686,7 @@ match_index_to_operand(Node *operand,
if (indexprs == NIL)
elog(ERROR, "wrong number of index expressions");
indexkey = (Node *) lfirst(indexprs);
+
/*
* Does it match the operand? Again, strip any relabeling.
*/
@@ -1776,12 +1786,12 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_LIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_BYTEA_LIKE_OP:
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICLIKE_OP:
@@ -1789,7 +1799,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICLIKE_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_REGEXEQ_OP:
@@ -1797,7 +1807,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_REGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_TEXT_ICREGEXEQ_OP:
@@ -1805,7 +1815,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
case OID_NAME_ICREGEXEQ_OP:
/* the right-hand const is type text for all of these */
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
break;
case OID_INET_SUB_OP:
@@ -1831,9 +1841,9 @@ match_special_index_operator(Expr *clause, Oid opclass,
* want to apply. (A hash index, for example, will not support ">=".)
* Currently, only btree supports the operators we need.
*
- * We insist on the opclass being the specific one we expect,
- * else we'd do the wrong thing if someone were to make a reverse-sort
- * opclass with the same operators.
+ * We insist on the opclass being the specific one we expect, else we'd
+ * do the wrong thing if someone were to make a reverse-sort opclass
+ * with the same operators.
*/
switch (expr_op)
{
@@ -1896,7 +1906,7 @@ match_special_index_operator(Expr *clause, Oid opclass,
* The input list is ordered by index key, and so the output list is too.
* (The latter is not depended on by any part of the planner, so far as I can
* tell; but some parts of the executor do assume that the indxqual list
- * ultimately delivered to the executor is so ordered. One such place is
+ * ultimately delivered to the executor is so ordered. One such place is
* _bt_orderkeys() in the btree support. Perhaps that ought to be fixed
* someday --- tgl 7/00)
*/
@@ -1930,7 +1940,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
} while (clausegroups != NIL && !DoneMatchingIndexKeys(classes));
- Assert(clausegroups == NIL); /* else more groups than indexkeys... */
+ Assert(clausegroups == NIL); /* else more groups than indexkeys... */
return FastListValue(&resultquals);
}
@@ -1953,11 +1963,12 @@ expand_indexqual_condition(Expr *clause, Oid opclass)
switch (expr_op)
{
- /*
- * LIKE and regex operators are not members of any index
- * opclass, so if we find one in an indexqual list we can
- * assume that it was accepted by match_special_index_operator().
- */
+ /*
+ * LIKE and regex operators are not members of any index
+ * opclass, so if we find one in an indexqual list we can
+ * assume that it was accepted by
+ * match_special_index_operator().
+ */
case OID_TEXT_LIKE_OP:
case OID_BPCHAR_LIKE_OP:
case OID_NAME_LIKE_OP:
@@ -2061,22 +2072,22 @@ prefix_quals(Node *leftop, Oid opclass,
}
/*
- * If necessary, coerce the prefix constant to the right type.
- * The given prefix constant is either text or bytea type.
+ * If necessary, coerce the prefix constant to the right type. The
+ * given prefix constant is either text or bytea type.
*/
if (prefix_const->consttype != datatype)
{
- char *prefix;
+ char *prefix;
switch (prefix_const->consttype)
{
case TEXTOID:
prefix = DatumGetCString(DirectFunctionCall1(textout,
- prefix_const->constvalue));
+ prefix_const->constvalue));
break;
case BYTEAOID:
prefix = DatumGetCString(DirectFunctionCall1(byteaout,
- prefix_const->constvalue));
+ prefix_const->constvalue));
break;
default:
elog(ERROR, "unexpected const type: %u",
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index cf7c4ee4331..695b8c98411 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.79 2003/07/25 00:01:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.80 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -300,7 +300,7 @@ sort_inner_and_outer(Query *root,
* We always generate a nestloop path for each available outer path.
* In fact we may generate as many as four: one on the cheapest-total-cost
* inner path, one on the same with materialization, one on the
- * cheapest-startup-cost inner path (if different),
+ * cheapest-startup-cost inner path (if different),
* and one on the best inner-indexscan path (if any).
*
* We also consider mergejoins if mergejoin clauses are available. We have
@@ -342,10 +342,10 @@ match_unsorted_outer(Query *root,
/*
* Nestloop only supports inner, left, and IN joins. Also, if we are
- * doing a right or full join, we must use *all* the mergeclauses as join
- * clauses, else we will not have a valid plan. (Although these two
- * flags are currently inverses, keep them separate for clarity and
- * possible future changes.)
+ * doing a right or full join, we must use *all* the mergeclauses as
+ * join clauses, else we will not have a valid plan. (Although these
+ * two flags are currently inverses, keep them separate for clarity
+ * and possible future changes.)
*/
switch (jointype)
{
@@ -371,8 +371,8 @@ match_unsorted_outer(Query *root,
}
/*
- * If we need to unique-ify the inner path, we will consider only
- * the cheapest inner.
+ * If we need to unique-ify the inner path, we will consider only the
+ * cheapest inner.
*/
if (jointype == JOIN_UNIQUE_INNER)
{
@@ -384,9 +384,10 @@ match_unsorted_outer(Query *root,
else if (nestjoinOK)
{
/*
- * If the cheapest inner path is a join or seqscan, we should consider
- * materializing it. (This is a heuristic: we could consider it
- * always, but for inner indexscans it's probably a waste of time.)
+ * If the cheapest inner path is a join or seqscan, we should
+ * consider materializing it. (This is a heuristic: we could
+ * consider it always, but for inner indexscans it's probably a
+ * waste of time.)
*/
if (!(IsA(inner_cheapest_total, IndexPath) ||
IsA(inner_cheapest_total, TidPath)))
@@ -394,8 +395,8 @@ match_unsorted_outer(Query *root,
create_material_path(innerrel, inner_cheapest_total);
/*
- * Get the best innerjoin indexpath (if any) for this outer rel. It's
- * the same for all outer paths.
+ * Get the best innerjoin indexpath (if any) for this outer rel.
+ * It's the same for all outer paths.
*/
bestinnerjoin = best_inner_indexscan(root, innerrel,
outerrel->relids, jointype);
@@ -414,8 +415,8 @@ match_unsorted_outer(Query *root,
int sortkeycnt;
/*
- * If we need to unique-ify the outer path, it's pointless to consider
- * any but the cheapest outer.
+ * If we need to unique-ify the outer path, it's pointless to
+ * consider any but the cheapest outer.
*/
if (save_jointype == JOIN_UNIQUE_OUTER)
{
@@ -709,7 +710,7 @@ hash_inner_and_outer(Query *root,
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
- bms_is_subset(restrictinfo->right_relids, outerrel->relids))
+ bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}
@@ -727,9 +728,9 @@ hash_inner_and_outer(Query *root,
* cheapest-startup-cost outer paths. There's no need to consider
* any but the cheapest-total-cost inner path, however.
*/
- Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
- Path *cheapest_total_outer = outerrel->cheapest_total_path;
- Path *cheapest_total_inner = innerrel->cheapest_total_path;
+ Path *cheapest_startup_outer = outerrel->cheapest_startup_path;
+ Path *cheapest_total_outer = outerrel->cheapest_total_path;
+ Path *cheapest_total_inner = innerrel->cheapest_total_path;
/* Unique-ify if need be */
if (jointype == JOIN_UNIQUE_OUTER)
@@ -840,7 +841,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
/* righthand side is inner */
}
else if (bms_is_subset(restrictinfo->left_relids, innerrel->relids) &&
- bms_is_subset(restrictinfo->right_relids, outerrel->relids))
+ bms_is_subset(restrictinfo->right_relids, outerrel->relids))
{
/* lefthand side is inner */
}
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 023bc397840..81e5080e4b7 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.61 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.62 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,11 +19,11 @@
static List *make_rels_by_clause_joins(Query *root,
- RelOptInfo *old_rel,
- List *other_rels);
+ RelOptInfo *old_rel,
+ List *other_rels);
static List *make_rels_by_clauseless_joins(Query *root,
- RelOptInfo *old_rel,
- List *other_rels);
+ RelOptInfo *old_rel,
+ List *other_rels);
/*
@@ -417,8 +417,8 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
/*
* If we are implementing IN clauses as joins, there are some joins
- * that are illegal. Check to see if the proposed join is trouble.
- * We can skip the work if looking at an outer join, however, because
+ * that are illegal. Check to see if the proposed join is trouble. We
+ * can skip the work if looking at an outer join, however, because
* only top-level joins might be affected.
*/
if (jointype == JOIN_INNER)
@@ -430,8 +430,8 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
InClauseInfo *ininfo = (InClauseInfo *) lfirst(l);
/*
- * Cannot join if proposed join contains part, but only
- * part, of the RHS, *and* it contains rels not in the RHS.
+ * Cannot join if proposed join contains part, but only part,
+ * of the RHS, *and* it contains rels not in the RHS.
*/
if (bms_overlap(ininfo->righthand, joinrelids) &&
!bms_is_subset(ininfo->righthand, joinrelids) &&
@@ -442,16 +442,17 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
}
/*
- * No issue unless we are looking at a join of the IN's RHS
- * to other stuff.
+ * No issue unless we are looking at a join of the IN's RHS to
+ * other stuff.
*/
- if (! (bms_is_subset(ininfo->righthand, joinrelids) &&
- !bms_equal(ininfo->righthand, joinrelids)))
+ if (!(bms_is_subset(ininfo->righthand, joinrelids) &&
+ !bms_equal(ininfo->righthand, joinrelids)))
continue;
+
/*
- * If we already joined IN's RHS to any part of its LHS in either
- * input path, then this join is not constrained (the necessary
- * work was done at a lower level).
+ * If we already joined IN's RHS to any part of its LHS in
+ * either input path, then this join is not constrained (the
+ * necessary work was done at a lower level).
*/
if (bms_overlap(ininfo->lefthand, rel1->relids) &&
bms_is_subset(ininfo->righthand, rel1->relids))
@@ -459,6 +460,7 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
if (bms_overlap(ininfo->lefthand, rel2->relids) &&
bms_is_subset(ininfo->righthand, rel2->relids))
continue;
+
/*
* JOIN_IN technique will work if outerrel includes LHS and
* innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles
@@ -478,22 +480,14 @@ make_join_rel(Query *root, RelOptInfo *rel1, RelOptInfo *rel2,
}
if (bms_is_subset(ininfo->lefthand, rel1->relids) &&
bms_equal(ininfo->righthand, rel2->relids))
- {
jointype = JOIN_IN;
- }
else if (bms_is_subset(ininfo->lefthand, rel2->relids) &&
bms_equal(ininfo->righthand, rel1->relids))
- {
jointype = JOIN_REVERSE_IN;
- }
else if (bms_equal(ininfo->righthand, rel1->relids))
- {
jointype = JOIN_UNIQUE_OUTER;
- }
else if (bms_equal(ininfo->righthand, rel2->relids))
- {
jointype = JOIN_UNIQUE_INNER;
- }
else
{
/* invalid join path */
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index a078b3f5a93..40d2de41417 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.51 2003/06/15 22:51:45 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,7 +99,7 @@ create_or_index_paths(Query *root, RelOptInfo *rel)
best_or_subclause_indices(root,
rel,
- ((BoolExpr *) restrictinfo->clause)->args,
+ ((BoolExpr *) restrictinfo->clause)->args,
restrictinfo->subclauseindices,
pathnode);
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 9fec73e2603..beb51a69966 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.51 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -198,8 +198,8 @@ generate_implied_equalities(Query *root)
/*
* Collect info about relids mentioned in each item. For this
* routine we only really care whether there are any at all in
- * each item, but process_implied_equality() needs the exact
- * sets, so we may as well pull them here.
+ * each item, but process_implied_equality() needs the exact sets,
+ * so we may as well pull them here.
*/
relids = (Relids *) palloc(nitems * sizeof(Relids));
have_consts = false;
@@ -233,8 +233,8 @@ generate_implied_equalities(Query *root)
/*
* If it's "const = const" then just ignore it altogether.
- * There is no place in the restrictinfo structure to store
- * it. (If the two consts are in fact unequal, then
+ * There is no place in the restrictinfo structure to
+ * store it. (If the two consts are in fact unequal, then
* propagating the comparison to Vars will cause us to
* produce zero rows out, as expected.)
*/
@@ -242,12 +242,12 @@ generate_implied_equalities(Query *root)
{
/*
* Tell process_implied_equality to delete the clause,
- * not add it, if it's "var = var" and we have constants
- * present in the list.
+ * not add it, if it's "var = var" and we have
+ * constants present in the list.
*/
- bool delete_it = (have_consts &&
- i1_is_variable &&
- i2_is_variable);
+ bool delete_it = (have_consts &&
+ i1_is_variable &&
+ i2_is_variable);
process_implied_equality(root,
item1->key, item2->key,
@@ -751,20 +751,21 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
* element might match none, one, or more of the output columns
* that are visible to the outer query. This means we may have
* multiple possible representations of the sub_pathkey in the
- * context of the outer query. Ideally we would generate them all
- * and put them all into a pathkey list of the outer query, thereby
- * propagating equality knowledge up to the outer query. Right now
- * we cannot do so, because the outer query's canonical pathkey
- * sets are already frozen when this is called. Instead we prefer
- * the one that has the highest "score" (number of canonical pathkey
- * peers, plus one if it matches the outer query_pathkeys).
- * This is the most likely to be useful in the outer query.
+ * context of the outer query. Ideally we would generate them all
+ * and put them all into a pathkey list of the outer query,
+ * thereby propagating equality knowledge up to the outer query.
+ * Right now we cannot do so, because the outer query's canonical
+ * pathkey sets are already frozen when this is called. Instead
+ * we prefer the one that has the highest "score" (number of
+ * canonical pathkey peers, plus one if it matches the outer
+ * query_pathkeys). This is the most likely to be useful in the
+ * outer query.
*/
foreach(j, sub_pathkey)
{
PathKeyItem *sub_item = (PathKeyItem *) lfirst(j);
- Node *sub_key = sub_item->key;
- List *k;
+ Node *sub_key = sub_item->key;
+ List *k;
foreach(k, subquery->targetList)
{
@@ -774,9 +775,9 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
equal(tle->expr, sub_key))
{
/* Found a representation for this sub_key */
- Var *outer_var;
+ Var *outer_var;
PathKeyItem *outer_item;
- int score;
+ int score;
outer_var = makeVar(rel->relid,
tle->resdom->resno,
@@ -802,8 +803,8 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
}
/*
- * If we couldn't find a representation of this sub_pathkey,
- * we're done (we can't use the ones to its right, either).
+ * If we couldn't find a representation of this sub_pathkey, we're
+ * done (we can't use the ones to its right, either).
*/
if (!best_item)
break;
@@ -812,8 +813,8 @@ build_subquery_pathkeys(Query *root, RelOptInfo *rel, Query *subquery)
cpathkey = make_canonical_pathkey(root, best_item);
/*
- * Eliminate redundant ordering info; could happen if outer
- * query equijoins subquery keys...
+ * Eliminate redundant ordering info; could happen if outer query
+ * equijoins subquery keys...
*/
if (!ptrMember(cpathkey, retval))
{
@@ -920,7 +921,7 @@ make_pathkeys_for_sortclauses(List *sortclauses,
* many times when dealing with a many-relation query.
*
* We have to be careful that the cached values are palloc'd in the same
- * context the RestrictInfo node itself is in. This is not currently a
+ * context the RestrictInfo node itself is in. This is not currently a
* problem for normal planning, but it is an issue for GEQO planning.
*/
void
@@ -1090,7 +1091,7 @@ make_pathkeys_for_mergeclauses(Query *root,
else
{
elog(ERROR, "could not identify which side of mergeclause to use");
- pathkey = NIL; /* keep compiler quiet */
+ pathkey = NIL; /* keep compiler quiet */
}
/*
diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c
index 761f03b967c..60093ec5e3d 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.14 2003/02/08 20:20:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/tidpath.c,v 1.15 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -27,7 +27,7 @@
static List *TidqualFromRestrictinfo(Relids relids, List *restrictinfo);
static bool isEvaluable(int varno, Node *node);
-static Node *TidequalClause(int varno, OpExpr *node);
+static Node *TidequalClause(int varno, OpExpr * node);
static List *TidqualFromExpr(int varno, Expr *expr);
static bool
@@ -66,7 +66,7 @@ isEvaluable(int varno, Node *node)
* or the left node if the opclause is ....=CTID
*/
static Node *
-TidequalClause(int varno, OpExpr *node)
+TidequalClause(int varno, OpExpr * node)
{
Node *rnode = NULL,
*arg1,
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 9ac746d34a9..e4e7490d82a 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.149 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.150 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,9 +40,9 @@ static bool use_physical_tlist(RelOptInfo *rel);
static void disuse_physical_tlist(Plan *plan, Path *path);
static Join *create_join_plan(Query *root, JoinPath *best_path);
static Append *create_append_plan(Query *root, AppendPath *best_path);
-static Result *create_result_plan(Query *root, ResultPath *best_path);
-static Material *create_material_plan(Query *root, MaterialPath *best_path);
-static Plan *create_unique_plan(Query *root, UniquePath *best_path);
+static Result *create_result_plan(Query *root, ResultPath * best_path);
+static Material *create_material_plan(Query *root, MaterialPath * best_path);
+static Plan *create_unique_plan(Query *root, UniquePath * best_path);
static SeqScan *create_seqscan_plan(Path *best_path, List *tlist,
List *scan_clauses);
static IndexScan *create_indexscan_plan(Query *root, IndexPath *best_path,
@@ -63,9 +63,9 @@ static void fix_indxqual_references(List *indexquals, IndexPath *index_path,
List **fixed_indexquals,
List **recheck_indexquals);
static void fix_indxqual_sublist(List *indexqual,
- Relids baserelids, int baserelid,
- IndexOptInfo *index,
- List **fixed_quals, List **recheck_quals);
+ Relids baserelids, int baserelid,
+ IndexOptInfo *index,
+ List **fixed_quals, List **recheck_quals);
static Node *fix_indxqual_operand(Node *node, int baserelid,
IndexOptInfo *index,
Oid *opclass);
@@ -98,9 +98,9 @@ static MergeJoin *make_mergejoin(List *tlist,
Plan *lefttree, Plan *righttree,
JoinType jointype);
static Sort *make_sort(Query *root, List *tlist, Plan *lefttree, int numCols,
- AttrNumber *sortColIdx, Oid *sortOperators);
+ AttrNumber *sortColIdx, Oid *sortOperators);
static Sort *make_sort_from_pathkeys(Query *root, Plan *lefttree,
- Relids relids, List *pathkeys);
+ Relids relids, List *pathkeys);
/*
@@ -148,7 +148,7 @@ create_plan(Query *root, Path *best_path)
break;
case T_Material:
plan = (Plan *) create_material_plan(root,
- (MaterialPath *) best_path);
+ (MaterialPath *) best_path);
break;
case T_Unique:
plan = (Plan *) create_unique_plan(root,
@@ -192,12 +192,12 @@ create_scan_plan(Query *root, Path *best_path)
Scan *plan;
/*
- * For table scans, rather than using the relation targetlist (which is
- * only those Vars actually needed by the query), we prefer to generate a
- * tlist containing all Vars in order. This will allow the executor to
- * optimize away projection of the table tuples, if possible. (Note that
- * planner.c may replace the tlist we generate here, forcing projection to
- * occur.)
+ * For table scans, rather than using the relation targetlist (which
+ * is only those Vars actually needed by the query), we prefer to
+ * generate a tlist containing all Vars in order. This will allow the
+ * executor to optimize away projection of the table tuples, if
+ * possible. (Note that planner.c may replace the tlist we generate
+ * here, forcing projection to occur.)
*/
if (use_physical_tlist(rel))
{
@@ -274,8 +274,8 @@ build_relation_tlist(RelOptInfo *rel)
FastListInit(&tlist);
foreach(v, FastListValue(&rel->reltargetlist))
{
- /* Do we really need to copy here? Not sure */
- Var *var = (Var *) copyObject(lfirst(v));
+ /* Do we really need to copy here? Not sure */
+ Var *var = (Var *) copyObject(lfirst(v));
FastAppend(&tlist, create_tl_element(var, resdomno));
resdomno++;
@@ -294,22 +294,24 @@ use_physical_tlist(RelOptInfo *rel)
int i;
/*
- * Currently, can't do this for subquery or function scans. (This
- * is mainly because we don't have an equivalent of build_physical_tlist
+ * Currently, can't do this for subquery or function scans. (This is
+ * mainly because we don't have an equivalent of build_physical_tlist
* for them; worth adding?)
*/
if (rel->rtekind != RTE_RELATION)
return false;
+
/*
* Can't do it with inheritance cases either (mainly because Append
* doesn't project).
*/
if (rel->reloptkind != RELOPT_BASEREL)
return false;
+
/*
- * Can't do it if any system columns are requested, either. (This could
- * possibly be fixed but would take some fragile assumptions in setrefs.c,
- * I think.)
+ * Can't do it if any system columns are requested, either. (This
+ * could possibly be fixed but would take some fragile assumptions in
+ * setrefs.c, I think.)
*/
for (i = rel->min_attr; i <= 0; i++)
{
@@ -325,7 +327,7 @@ use_physical_tlist(RelOptInfo *rel)
*
* If the plan node immediately above a scan would prefer to get only
* needed Vars and not a physical tlist, it must call this routine to
- * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
+ * undo the decision made by use_physical_tlist(). Currently, Hash, Sort,
* and Material nodes want this, so they don't have to store useless columns.
*/
static void
@@ -441,7 +443,7 @@ create_append_plan(Query *root, AppendPath *best_path)
* Returns a Plan node.
*/
static Result *
-create_result_plan(Query *root, ResultPath *best_path)
+create_result_plan(Query *root, ResultPath * best_path)
{
Result *plan;
List *tlist;
@@ -473,7 +475,7 @@ create_result_plan(Query *root, ResultPath *best_path)
* Returns a Plan node.
*/
static Material *
-create_material_plan(Query *root, MaterialPath *best_path)
+create_material_plan(Query *root, MaterialPath * best_path)
{
Material *plan;
Plan *subplan;
@@ -498,7 +500,7 @@ create_material_plan(Query *root, MaterialPath *best_path)
* Returns a Plan node.
*/
static Plan *
-create_unique_plan(Query *root, UniquePath *best_path)
+create_unique_plan(Query *root, UniquePath * best_path)
{
Plan *plan;
Plan *subplan;
@@ -509,9 +511,9 @@ create_unique_plan(Query *root, UniquePath *best_path)
subplan = create_plan(root, best_path->subpath);
/*
- * If the subplan came from an IN subselect (currently always the case),
- * we need to instantiate the correct output targetlist for the subselect,
- * rather than using the flattened tlist.
+ * If the subplan came from an IN subselect (currently always the
+ * case), we need to instantiate the correct output targetlist for the
+ * subselect, rather than using the flattened tlist.
*/
sub_targetlist = NIL;
foreach(l, root->in_info_list)
@@ -530,8 +532,8 @@ create_unique_plan(Query *root, UniquePath *best_path)
/*
* Transform list of plain Vars into targetlist
*/
- List *newtlist = NIL;
- int resno = 1;
+ List *newtlist = NIL;
+ int resno = 1;
foreach(l, sub_targetlist)
{
@@ -547,12 +549,13 @@ create_unique_plan(Query *root, UniquePath *best_path)
newtlist = lappend(newtlist, tle);
resno++;
}
+
/*
* If the top plan node can't do projections, we need to add a
* Result node to help it along.
*
- * Currently, the only non-projection-capable plan type
- * we can see here is Append.
+ * Currently, the only non-projection-capable plan type we can see
+ * here is Append.
*/
if (IsA(subplan, Append))
subplan = (Plan *) make_result(newtlist, NULL, subplan);
@@ -564,16 +567,16 @@ create_unique_plan(Query *root, UniquePath *best_path)
if (best_path->use_hash)
{
- int numGroupCols = length(my_tlist);
- long numGroups;
+ int numGroupCols = length(my_tlist);
+ long numGroups;
AttrNumber *groupColIdx;
- int i;
+ int i;
numGroups = (long) Min(best_path->rows, (double) LONG_MAX);
groupColIdx = (AttrNumber *) palloc(numGroupCols * sizeof(AttrNumber));
for (i = 0; i < numGroupCols; i++)
- groupColIdx[i] = i+1;
+ groupColIdx[i] = i + 1;
plan = (Plan *) make_agg(root,
my_tlist,
@@ -700,9 +703,7 @@ create_indexscan_plan(Query *root,
FastListInit(&orclauses);
foreach(orclause, indxqual)
- {
FastAppend(&orclauses, make_ands_explicit(lfirst(orclause)));
- }
indxqual_or_expr = make_orclause(FastListValue(&orclauses));
qpqual = set_difference(scan_clauses, makeList1(indxqual_or_expr));
@@ -861,9 +862,9 @@ create_nestloop_plan(Query *root,
/*
* An index is being used to reduce the number of tuples scanned
* in the inner relation. If there are join clauses being used
- * with the index, we may remove those join clauses from the list of
- * clauses that have to be checked as qpquals at the join node ---
- * but only if there's just one indexscan in the inner path
+ * with the index, we may remove those join clauses from the list
+ * of clauses that have to be checked as qpquals at the join node
+ * --- but only if there's just one indexscan in the inner path
* (otherwise, several different sets of clauses are being ORed
* together).
*
@@ -873,13 +874,14 @@ create_nestloop_plan(Query *root,
* been put in the same joininfo list.
*
* This would be a waste of time if the indexpath was an ordinary
- * indexpath and not a special innerjoin path. We will skip it in
- * that case since indexjoinclauses is NIL in an ordinary indexpath.
+ * indexpath and not a special innerjoin path. We will skip it in
+ * that case since indexjoinclauses is NIL in an ordinary
+ * indexpath.
*/
IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath;
List *indexjoinclauses = innerpath->indexjoinclauses;
- if (length(indexjoinclauses) == 1) /* single indexscan? */
+ if (length(indexjoinclauses) == 1) /* single indexscan? */
{
joinrestrictclauses =
select_nonredundant_join_clauses(root,
@@ -947,11 +949,11 @@ create_mergejoin_plan(Query *root,
joinclauses = set_difference(joinclauses, mergeclauses);
/*
- * Rearrange mergeclauses, if needed, so that the outer variable
- * is always on the left.
+ * Rearrange mergeclauses, if needed, so that the outer variable is
+ * always on the left.
*/
mergeclauses = get_switched_clauses(best_path->path_mergeclauses,
- best_path->jpath.outerjoinpath->parent->relids);
+ best_path->jpath.outerjoinpath->parent->relids);
/*
* Create explicit sort nodes for the outer and inner join paths if
@@ -964,7 +966,7 @@ create_mergejoin_plan(Query *root,
outer_plan = (Plan *)
make_sort_from_pathkeys(root,
outer_plan,
- best_path->jpath.outerjoinpath->parent->relids,
+ best_path->jpath.outerjoinpath->parent->relids,
best_path->outersortkeys);
}
@@ -974,7 +976,7 @@ create_mergejoin_plan(Query *root,
inner_plan = (Plan *)
make_sort_from_pathkeys(root,
inner_plan,
- best_path->jpath.innerjoinpath->parent->relids,
+ best_path->jpath.innerjoinpath->parent->relids,
best_path->innersortkeys);
}
@@ -1030,21 +1032,19 @@ create_hashjoin_plan(Query *root,
joinclauses = set_difference(joinclauses, hashclauses);
/*
- * Rearrange hashclauses, if needed, so that the outer variable
- * is always on the left.
+ * Rearrange hashclauses, if needed, so that the outer variable is
+ * always on the left.
*/
hashclauses = get_switched_clauses(best_path->path_hashclauses,
- best_path->jpath.outerjoinpath->parent->relids);
+ best_path->jpath.outerjoinpath->parent->relids);
/*
- * Extract the inner hash keys (right-hand operands of the hashclauses)
- * to put in the Hash node.
+ * Extract the inner hash keys (right-hand operands of the
+ * hashclauses) to put in the Hash node.
*/
innerhashkeys = NIL;
foreach(hcl, hashclauses)
- {
innerhashkeys = lappend(innerhashkeys, get_rightop(lfirst(hcl)));
- }
/* We don't want any excess columns in the hashed tuples */
disuse_physical_tlist(inner_plan, best_path->jpath.innerjoinpath);
@@ -1362,7 +1362,7 @@ order_qual_clauses(Query *root, List *clauses)
FastListInit(&withsubplans);
foreach(l, clauses)
{
- Node *clause = lfirst(l);
+ Node *clause = lfirst(l);
if (contain_subplans(clause))
FastAppend(&withsubplans, clause);
@@ -1507,8 +1507,8 @@ make_subqueryscan(List *qptlist,
/*
* Cost is figured here for the convenience of prepunion.c. Note this
- * is only correct for the case where qpqual is empty; otherwise caller
- * should overwrite cost with a better estimate.
+ * is only correct for the case where qpqual is empty; otherwise
+ * caller should overwrite cost with a better estimate.
*/
copy_plan_costsize(plan, subplan);
plan->total_cost += cpu_tuple_cost * subplan->plan_rows;
@@ -1709,7 +1709,7 @@ make_sort(Query *root, List *tlist, Plan *lefttree, int numCols,
* once as a sort key column; if so, the extra mentions are redundant.
*
* Caller is assumed to have allocated the arrays large enough for the
- * max possible number of columns. Return value is the new column count.
+ * max possible number of columns. Return value is the new column count.
*/
static int
add_sort_column(AttrNumber colIdx, Oid sortOp,
@@ -1777,8 +1777,8 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
/*
* We can sort by any one of the sort key items listed in this
* sublist. For now, we take the first one that corresponds to an
- * available Var in the tlist. If there isn't any, use the
- * first one that is an expression in the input's vars.
+ * available Var in the tlist. If there isn't any, use the first
+ * one that is an expression in the input's vars.
*
* XXX if we have a choice, is there any way of figuring out which
* might be cheapest to execute? (For example, int4lt is likely
@@ -1805,17 +1805,19 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
}
if (!j)
elog(ERROR, "could not find pathkey item to sort");
+
/*
* Do we need to insert a Result node?
*
- * Currently, the only non-projection-capable plan type
- * we can see here is Append.
+ * Currently, the only non-projection-capable plan type we can
+ * see here is Append.
*/
if (IsA(lefttree, Append))
{
tlist = copyObject(tlist);
lefttree = (Plan *) make_result(tlist, NULL, lefttree);
}
+
/*
* Add resjunk entry to input's tlist
*/
@@ -1827,8 +1829,9 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
tlist = lappend(tlist,
makeTargetEntry(resdom,
(Expr *) pathkey->key));
- lefttree->targetlist = tlist; /* just in case NIL before */
+ lefttree->targetlist = tlist; /* just in case NIL before */
}
+
/*
* The column might already be selected as a sort key, if the
* pathkeys contain duplicate entries. (This can happen in
@@ -1836,7 +1839,7 @@ make_sort_from_pathkeys(Query *root, Plan *lefttree,
* var, for example.) So enter it only once in the sort arrays.
*/
numsortkeys = add_sort_column(resdom->resno, pathkey->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
}
Assert(numsortkeys > 0);
@@ -1881,10 +1884,11 @@ make_sort_from_sortclauses(Query *root, List *tlist,
/*
* Check for the possibility of duplicate order-by clauses --- the
- * parser should have removed 'em, but no point in sorting redundantly.
+ * parser should have removed 'em, but no point in sorting
+ * redundantly.
*/
numsortkeys = add_sort_column(resdom->resno, sortcl->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
}
Assert(numsortkeys > 0);
@@ -1938,10 +1942,11 @@ make_sort_from_groupcols(Query *root,
/*
* Check for the possibility of duplicate group-by clauses --- the
- * parser should have removed 'em, but no point in sorting redundantly.
+ * parser should have removed 'em, but no point in sorting
+ * redundantly.
*/
numsortkeys = add_sort_column(resdom->resno, grpcl->sortop,
- numsortkeys, sortColIdx, sortOperators);
+ numsortkeys, sortColIdx, sortOperators);
grpno++;
}
@@ -1973,7 +1978,7 @@ make_material(List *tlist, Plan *lefttree)
* materialize_finished_plan: stick a Material node atop a completed plan
*
* There are a couple of places where we want to attach a Material node
- * after completion of subquery_planner(). This currently requires hackery.
+ * after completion of subquery_planner(). This currently requires hackery.
* Since subquery_planner has already run SS_finalize_plan on the subplan
* tree, we have to kluge up parameter lists for the Material node.
* Possibly this could be fixed by postponing SS_finalize_plan processing
@@ -2032,8 +2037,8 @@ make_agg(Query *root, List *tlist, List *qual,
plan->total_cost = agg_path.total_cost;
/*
- * We will produce a single output tuple if not grouping,
- * and a tuple per group otherwise.
+ * We will produce a single output tuple if not grouping, and a tuple
+ * per group otherwise.
*/
if (aggstrategy == AGG_PLAIN)
plan->plan_rows = 1;
@@ -2041,10 +2046,10 @@ make_agg(Query *root, List *tlist, List *qual,
plan->plan_rows = numGroups;
/*
- * We also need to account for the cost of evaluation of the qual
- * (ie, the HAVING clause) and the tlist. Note that cost_qual_eval
- * doesn't charge anything for Aggref nodes; this is okay since
- * they are really comparable to Vars.
+ * We also need to account for the cost of evaluation of the qual (ie,
+ * the HAVING clause) and the tlist. Note that cost_qual_eval doesn't
+ * charge anything for Aggref nodes; this is okay since they are
+ * really comparable to Vars.
*
* See notes in grouping_planner about why this routine and make_group
* are the only ones in this file that worry about tlist eval cost.
@@ -2100,13 +2105,13 @@ make_group(Query *root,
/*
* We also need to account for the cost of evaluation of the tlist.
*
- * XXX this double-counts the cost of evaluation of any expressions
- * used for grouping, since in reality those will have been evaluated
- * at a lower plan level and will only be copied by the Group node.
- * Worth fixing?
+ * XXX this double-counts the cost of evaluation of any expressions used
+ * for grouping, since in reality those will have been evaluated at a
+ * lower plan level and will only be copied by the Group node. Worth
+ * fixing?
*
- * See notes in grouping_planner about why this routine and make_agg
- * are the only ones in this file that worry about tlist eval cost.
+ * See notes in grouping_planner about why this routine and make_agg are
+ * the only ones in this file that worry about tlist eval cost.
*/
cost_qual_eval(&qual_cost, tlist);
plan->startup_cost += qual_cost.startup;
@@ -2139,15 +2144,15 @@ make_unique(List *tlist, Plan *lefttree, List *distinctList)
/*
* Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples. (XXX probably
- * this is an overestimate.)
+ * assume all columns get compared at most of the tuples. (XXX
+ * probably this is an overestimate.)
*/
plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols;
/*
* plan->plan_rows is left as a copy of the input subplan's plan_rows;
- * ie, we assume the filter removes nothing. The caller must alter this
- * if he has a better idea.
+ * ie, we assume the filter removes nothing. The caller must alter
+ * this if he has a better idea.
*/
plan->targetlist = tlist;
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index afcb4528326..61edf1c22d4 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.88 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.89 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,12 +36,12 @@
static void mark_baserels_for_outer_join(Query *root, Relids rels,
Relids outerrels);
static void distribute_qual_to_rels(Query *root, Node *clause,
- bool ispusheddown,
- bool isdeduced,
- Relids outerjoin_nonnullable,
- Relids qualscope);
+ bool ispusheddown,
+ bool isdeduced,
+ Relids outerjoin_nonnullable,
+ Relids qualscope);
static void add_vars_to_targetlist(Query *root, List *vars,
- Relids where_needed);
+ Relids where_needed);
static bool qual_is_redundant(Query *root, RestrictInfo *restrictinfo,
List *restrictlist);
static void check_mergejoinable(RestrictInfo *restrictinfo);
@@ -83,9 +83,7 @@ add_base_rels_to_query(Query *root, Node *jtnode)
List *l;
foreach(l, f->fromlist)
- {
add_base_rels_to_query(root, lfirst(l));
- }
}
else if (IsA(jtnode, JoinExpr))
{
@@ -93,13 +91,14 @@ add_base_rels_to_query(Query *root, Node *jtnode)
add_base_rels_to_query(root, j->larg);
add_base_rels_to_query(root, j->rarg);
+
/*
* Safety check: join RTEs should not be SELECT FOR UPDATE targets
*/
if (intMember(j->rtindex, root->rowMarks))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE cannot be applied to a join")));
+ errmsg("SELECT FOR UPDATE cannot be applied to a join")));
}
else
elog(ERROR, "unrecognized node type: %d",
@@ -247,14 +246,14 @@ distribute_quals_to_rels(Query *root, Node *jtnode)
* Order of operations here is subtle and critical. First we
* recurse to handle sub-JOINs. Their join quals will be placed
* without regard for whether this level is an outer join, which
- * is correct. Then we place our own join quals, which are restricted
- * by lower outer joins in any case, and are forced to this level if
- * this is an outer join and they mention the outer side. Finally, if
- * this is an outer join, we mark baserels contained within the inner
- * side(s) with our own rel set; this will prevent quals above us in
- * the join tree that use those rels from being pushed down below this
- * level. (It's okay for upper quals to be pushed down to the outer
- * side, however.)
+ * is correct. Then we place our own join quals, which are
+ * restricted by lower outer joins in any case, and are forced to
+ * this level if this is an outer join and they mention the outer
+ * side. Finally, if this is an outer join, we mark baserels
+ * contained within the inner side(s) with our own rel set; this
+ * will prevent quals above us in the join tree that use those
+ * rels from being pushed down below this level. (It's okay for
+ * upper quals to be pushed down to the outer side, however.)
*/
leftids = distribute_quals_to_rels(root, j->larg);
rightids = distribute_quals_to_rels(root, j->rarg);
@@ -390,9 +389,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
restrictinfo->clause = (Expr *) clause;
restrictinfo->subclauseindices = NIL;
- restrictinfo->eval_cost.startup = -1; /* not computed until needed */
+ restrictinfo->eval_cost.startup = -1; /* not computed until
+ * needed */
restrictinfo->this_selec = -1; /* not computed until needed */
- restrictinfo->left_relids = NULL; /* set below, if join clause */
+ restrictinfo->left_relids = NULL; /* set below, if join clause */
restrictinfo->right_relids = NULL;
restrictinfo->mergejoinoperator = InvalidOid;
restrictinfo->left_sortop = InvalidOid;
@@ -435,10 +435,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
if (isdeduced)
{
/*
- * If the qual came from implied-equality deduction, we can evaluate
- * the qual at its natural semantic level. It is not affected by
- * any outer-join rules (else we'd not have decided the vars were
- * equal).
+ * If the qual came from implied-equality deduction, we can
+ * evaluate the qual at its natural semantic level. It is not
+ * affected by any outer-join rules (else we'd not have decided
+ * the vars were equal).
*/
Assert(bms_equal(relids, qualscope));
can_be_equijoin = true;
@@ -446,12 +446,13 @@ distribute_qual_to_rels(Query *root, Node *clause,
else if (bms_overlap(relids, outerjoin_nonnullable))
{
/*
- * The qual is attached to an outer join and mentions (some of the)
- * rels on the nonnullable side. Force the qual to be evaluated
- * exactly at the level of joining corresponding to the outer join.
- * We cannot let it get pushed down into the nonnullable side, since
- * then we'd produce no output rows, rather than the intended single
- * null-extended row, for any nonnullable-side rows failing the qual.
+ * The qual is attached to an outer join and mentions (some of
+ * the) rels on the nonnullable side. Force the qual to be
+ * evaluated exactly at the level of joining corresponding to the
+ * outer join. We cannot let it get pushed down into the
+ * nonnullable side, since then we'd produce no output rows,
+ * rather than the intended single null-extended row, for any
+ * nonnullable-side rows failing the qual.
*
* Note: an outer-join qual that mentions only nullable-side rels can
* be pushed down into the nullable side without changing the join
@@ -464,13 +465,14 @@ distribute_qual_to_rels(Query *root, Node *clause,
{
/*
* For a non-outer-join qual, we can evaluate the qual as soon as
- * (1) we have all the rels it mentions, and (2) we are at or above
- * any outer joins that can null any of these rels and are below the
- * syntactic location of the given qual. To enforce the latter, scan
- * the base rels listed in relids, and merge their outer-join sets
- * into the clause's own reference list. At the time we are called,
- * the outerjoinset of each baserel will show exactly those outer
- * joins that are below the qual in the join tree.
+ * (1) we have all the rels it mentions, and (2) we are at or
+ * above any outer joins that can null any of these rels and are
+ * below the syntactic location of the given qual. To enforce the
+ * latter, scan the base rels listed in relids, and merge their
+ * outer-join sets into the clause's own reference list. At the
+ * time we are called, the outerjoinset of each baserel will show
+ * exactly those outer joins that are below the qual in the join
+ * tree.
*/
Relids addrelids = NULL;
Relids tmprelids;
@@ -496,9 +498,10 @@ distribute_qual_to_rels(Query *root, Node *clause,
relids = bms_union(relids, addrelids);
/* Should still be a subset of current scope ... */
Assert(bms_is_subset(relids, qualscope));
+
/*
- * Because application of the qual will be delayed by outer join,
- * we mustn't assume its vars are equal everywhere.
+ * Because application of the qual will be delayed by outer
+ * join, we mustn't assume its vars are equal everywhere.
*/
can_be_equijoin = false;
}
@@ -518,6 +521,7 @@ distribute_qual_to_rels(Query *root, Node *clause,
switch (bms_membership(relids))
{
case BMS_SINGLETON:
+
/*
* There is only one relation participating in 'clause', so
* 'clause' is a restriction clause for that relation.
@@ -525,28 +529,29 @@ distribute_qual_to_rels(Query *root, Node *clause,
rel = find_base_rel(root, bms_singleton_member(relids));
/*
- * Check for a "mergejoinable" clause even though it's not a join
- * clause. This is so that we can recognize that "a.x = a.y"
- * makes x and y eligible to be considered equal, even when they
- * belong to the same rel. Without this, we would not recognize
- * that "a.x = a.y AND a.x = b.z AND a.y = c.q" allows us to
- * consider z and q equal after their rels are joined.
+ * Check for a "mergejoinable" clause even though it's not a
+ * join clause. This is so that we can recognize that "a.x =
+ * a.y" makes x and y eligible to be considered equal, even
+ * when they belong to the same rel. Without this, we would
+ * not recognize that "a.x = a.y AND a.x = b.z AND a.y = c.q"
+ * allows us to consider z and q equal after their rels are
+ * joined.
*/
if (can_be_equijoin)
check_mergejoinable(restrictinfo);
/*
- * If the clause was deduced from implied equality, check to see
- * whether it is redundant with restriction clauses we already
- * have for this rel. Note we cannot apply this check to
- * user-written clauses, since we haven't found the canonical
- * pathkey sets yet while processing user clauses. (NB: no
- * comparable check is done in the join-clause case; redundancy
- * will be detected when the join clause is moved into a join
- * rel's restriction list.)
+ * If the clause was deduced from implied equality, check to
+ * see whether it is redundant with restriction clauses we
+ * already have for this rel. Note we cannot apply this check
+ * to user-written clauses, since we haven't found the
+ * canonical pathkey sets yet while processing user clauses.
+ * (NB: no comparable check is done in the join-clause case;
+ * redundancy will be detected when the join clause is moved
+ * into a join rel's restriction list.)
*/
if (!isdeduced ||
- !qual_is_redundant(root, restrictinfo, rel->baserestrictinfo))
+ !qual_is_redundant(root, restrictinfo, rel->baserestrictinfo))
{
/* Add clause to rel's restriction list */
rel->baserestrictinfo = lappend(rel->baserestrictinfo,
@@ -554,13 +559,14 @@ distribute_qual_to_rels(Query *root, Node *clause,
}
break;
case BMS_MULTIPLE:
+
/*
- * 'clause' is a join clause, since there is more than one rel in
- * the relid set. Set additional RestrictInfo fields for
- * joining. First, does it look like a normal join clause, i.e.,
- * a binary operator relating expressions that come from distinct
- * relations? If so we might be able to use it in a join
- * algorithm.
+ * 'clause' is a join clause, since there is more than one rel
+ * in the relid set. Set additional RestrictInfo fields for
+ * joining. First, does it look like a normal join clause,
+ * i.e., a binary operator relating expressions that come from
+ * distinct relations? If so we might be able to use it in a
+ * join algorithm.
*/
if (is_opclause(clause) && length(((OpExpr *) clause)->args) == 2)
{
@@ -582,9 +588,9 @@ distribute_qual_to_rels(Query *root, Node *clause,
* Now check for hash or mergejoinable operators.
*
* We don't bother setting the hashjoin info if we're not going
- * to need it. We do want to know about mergejoinable ops in all
- * cases, however, because we use mergejoinable ops for other
- * purposes such as detecting redundant clauses.
+ * to need it. We do want to know about mergejoinable ops in
+ * all cases, however, because we use mergejoinable ops for
+ * other purposes such as detecting redundant clauses.
*/
check_mergejoinable(restrictinfo);
if (enable_hashjoin)
@@ -597,16 +603,18 @@ distribute_qual_to_rels(Query *root, Node *clause,
/*
* Add vars used in the join clause to targetlists of their
- * relations, so that they will be emitted by the plan nodes that
- * scan those relations (else they won't be available at the join
- * node!).
+ * relations, so that they will be emitted by the plan nodes
+ * that scan those relations (else they won't be available at
+ * the join node!).
*/
add_vars_to_targetlist(root, vars, relids);
break;
default:
+
/*
- * 'clause' references no rels, and therefore we have no place to
- * attach it. Shouldn't get here if callers are working properly.
+ * 'clause' references no rels, and therefore we have no place
+ * to attach it. Shouldn't get here if callers are working
+ * properly.
*/
elog(ERROR, "cannot cope with variable-free clause");
break;
@@ -634,7 +642,7 @@ distribute_qual_to_rels(Query *root, Node *clause,
*
* This processing is a consequence of transitivity of mergejoin equality:
* if we have mergejoinable clauses A = B and B = C, we can deduce A = C
- * (where = is an appropriate mergejoinable operator). See path/pathkeys.c
+ * (where = is an appropriate mergejoinable operator). See path/pathkeys.c
* for more details.
*/
void
@@ -695,8 +703,8 @@ process_implied_equality(Query *root,
}
/*
- * Scan to see if equality is already known. If so, we're done in
- * the add case, and done after removing it in the delete case.
+ * Scan to see if equality is already known. If so, we're done in the
+ * add case, and done after removing it in the delete case.
*/
foreach(itm, restrictlist)
{
@@ -719,7 +727,7 @@ process_implied_equality(Query *root,
{
/* delete it from local restrictinfo list */
rel1->baserestrictinfo = lremove(restrictinfo,
- rel1->baserestrictinfo);
+ rel1->baserestrictinfo);
}
else
{
@@ -768,9 +776,9 @@ process_implied_equality(Query *root,
errmsg("equality operator for types %s and %s should be mergejoinable, but isn't",
format_type_be(ltype), format_type_be(rtype))));
- clause = make_opclause(oprid(eq_operator), /* opno */
- BOOLOID, /* opresulttype */
- false, /* opretset */
+ clause = make_opclause(oprid(eq_operator), /* opno */
+ BOOLOID, /* opresulttype */
+ false, /* opretset */
(Expr *) item1,
(Expr *) item2);
@@ -797,9 +805,9 @@ process_implied_equality(Query *root,
* too-small selectivity, not to mention wasting time at execution.
*
* Note: quals of the form "var = const" are never considered redundant,
- * only those of the form "var = var". This is needed because when we
+ * only those of the form "var = var". This is needed because when we
* have constants in an implied-equality set, we use a different strategy
- * that suppresses all "var = var" deductions. We must therefore keep
+ * that suppresses all "var = var" deductions. We must therefore keep
* all the "var = const" quals.
*/
static bool
@@ -858,7 +866,8 @@ qual_is_redundant(Query *root,
* left side of the new qual. We traverse the old-quals list
* repeatedly to transitively expand the exprs list. If at any point
* we find we can reach the right-side expr of the new qual, we are
- * done. We give up when we can't expand the equalexprs list any more.
+ * done. We give up when we can't expand the equalexprs list any
+ * more.
*/
equalexprs = makeList1(newleft);
do
@@ -945,7 +954,7 @@ check_mergejoinable(RestrictInfo *restrictinfo)
* info fields in the restrictinfo.
*
* Currently, we support hashjoin for binary opclauses where
- * the operator is a hashjoinable operator. The arguments can be
+ * the operator is a hashjoinable operator. The arguments can be
* anything --- as long as there are no volatile functions in them.
*/
static void
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index 8883310f66d..4f0ede34520 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.76 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.77 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@
* which may involve joins but not any fancier features.
*
* Since query_planner does not handle the toplevel processing (grouping,
- * sorting, etc) it cannot select the best path by itself. It selects
+ * sorting, etc) it cannot select the best path by itself. It selects
* two paths: the cheapest path that produces all the required tuples,
* independent of any ordering considerations, and the cheapest path that
* produces the expected fraction of the required tuples in the required
@@ -84,7 +84,7 @@ query_planner(Query *root, List *tlist, double tuple_fraction,
if (root->jointree->fromlist == NIL)
{
*cheapest_path = (Path *) create_result_path(NULL, NULL,
- (List *) root->jointree->quals);
+ (List *) root->jointree->quals);
*sorted_path = NULL;
return;
}
@@ -125,9 +125,9 @@ query_planner(Query *root, List *tlist, double tuple_fraction,
* relations. We also build lists of equijoined keys for pathkey
* construction.
*
- * Note: all subplan nodes will have "flat" (var-only) tlists.
- * This implies that all expression evaluations are done at the root of
- * the plan tree. Once upon a time there was code to try to push
+ * Note: all subplan nodes will have "flat" (var-only) tlists. This
+ * implies that all expression evaluations are done at the root of the
+ * plan tree. Once upon a time there was code to try to push
* expensive function calls down to lower plan nodes, but that's dead
* code and has been for a long time...
*/
@@ -223,7 +223,8 @@ query_planner(Query *root, List *tlist, double tuple_fraction,
}
/*
- * If we have constant quals, add a toplevel Result step to process them.
+ * If we have constant quals, add a toplevel Result step to process
+ * them.
*/
if (constant_quals)
{
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 1896982f02e..c2aec37470a 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.157 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.158 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,10 +45,10 @@
/* Expression kind codes for preprocess_expression */
#define EXPRKIND_QUAL 0
-#define EXPRKIND_TARGET 1
-#define EXPRKIND_RTFUNC 2
+#define EXPRKIND_TARGET 1
+#define EXPRKIND_RTFUNC 2
#define EXPRKIND_LIMIT 3
-#define EXPRKIND_ININFO 4
+#define EXPRKIND_ININFO 4
static Node *preprocess_expression(Query *parse, Node *expr, int kind);
@@ -59,9 +59,9 @@ static bool hash_safe_grouping(Query *parse);
static List *make_subplanTargetList(Query *parse, List *tlist,
AttrNumber **groupColIdx, bool *need_tlist_eval);
static void locate_grouping_columns(Query *parse,
- List *tlist,
- List *sub_tlist,
- AttrNumber *groupColIdx);
+ List *tlist,
+ List *sub_tlist,
+ AttrNumber *groupColIdx);
static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
@@ -103,9 +103,9 @@ planner(Query *parse, bool isCursor, int cursorOptions)
{
/*
* We have no real idea how many tuples the user will ultimately
- * FETCH from a cursor, but it seems a good bet that he
- * doesn't want 'em all. Optimize for 10% retrieval (you
- * gotta better number? Should this be a SETtable parameter?)
+ * FETCH from a cursor, but it seems a good bet that he doesn't
+ * want 'em all. Optimize for 10% retrieval (you gotta better
+ * number? Should this be a SETtable parameter?)
*/
tuple_fraction = 0.10;
}
@@ -121,8 +121,8 @@ planner(Query *parse, bool isCursor, int cursorOptions)
Assert(PlannerQueryLevel == 0);
/*
- * If creating a plan for a scrollable cursor, make sure it can
- * run backwards on demand. Add a Material node at the top at need.
+ * If creating a plan for a scrollable cursor, make sure it can run
+ * backwards on demand. Add a Material node at the top at need.
*/
if (isCursor && (cursorOptions & CURSOR_OPT_SCROLL))
{
@@ -181,14 +181,14 @@ subquery_planner(Query *parse, double tuple_fraction)
/*
* Look for IN clauses at the top level of WHERE, and transform them
- * into joins. Note that this step only handles IN clauses originally
- * at top level of WHERE; if we pull up any subqueries in the next step,
- * their INs are processed just before pulling them up.
+ * into joins. Note that this step only handles IN clauses originally
+ * at top level of WHERE; if we pull up any subqueries in the next
+ * step, their INs are processed just before pulling them up.
*/
parse->in_info_list = NIL;
if (parse->hasSubLinks)
parse->jointree->quals = pull_up_IN_clauses(parse,
- parse->jointree->quals);
+ parse->jointree->quals);
/*
* Check to see if any subqueries in the rangetable can be merged into
@@ -198,10 +198,11 @@ subquery_planner(Query *parse, double tuple_fraction)
pull_up_subqueries(parse, (Node *) parse->jointree, false);
/*
- * Detect whether any rangetable entries are RTE_JOIN kind; if not,
- * we can avoid the expense of doing flatten_join_alias_vars(). Also
- * check for outer joins --- if none, we can skip reduce_outer_joins().
- * This must be done after we have done pull_up_subqueries, of course.
+ * Detect whether any rangetable entries are RTE_JOIN kind; if not, we
+ * can avoid the expense of doing flatten_join_alias_vars(). Also
+ * check for outer joins --- if none, we can skip
+ * reduce_outer_joins(). This must be done after we have done
+ * pull_up_subqueries, of course.
*/
parse->hasJoinRTEs = false;
hasOuterJoins = false;
@@ -283,19 +284,20 @@ subquery_planner(Query *parse, double tuple_fraction)
parse->havingQual = (Node *) newHaving;
/*
- * If we have any outer joins, try to reduce them to plain inner joins.
- * This step is most easily done after we've done expression preprocessing.
+ * If we have any outer joins, try to reduce them to plain inner
+ * joins. This step is most easily done after we've done expression
+ * preprocessing.
*/
if (hasOuterJoins)
reduce_outer_joins(parse);
/*
- * See if we can simplify the jointree; opportunities for this may come
- * from having pulled up subqueries, or from flattening explicit JOIN
- * syntax. We must do this after flattening JOIN alias variables, since
- * eliminating explicit JOIN nodes from the jointree will cause
- * get_relids_for_join() to fail. But it should happen after
- * reduce_outer_joins, anyway.
+ * See if we can simplify the jointree; opportunities for this may
+ * come from having pulled up subqueries, or from flattening explicit
+ * JOIN syntax. We must do this after flattening JOIN alias
+ * variables, since eliminating explicit JOIN nodes from the jointree
+ * will cause get_relids_for_join() to fail. But it should happen
+ * after reduce_outer_joins, anyway.
*/
parse->jointree = (FromExpr *)
simplify_jointree(parse, (Node *) parse->jointree);
@@ -318,26 +320,26 @@ subquery_planner(Query *parse, double tuple_fraction)
*/
if (PlannerPlanId != saved_planid || PlannerQueryLevel > 1)
{
- Cost initplan_cost = 0;
+ Cost initplan_cost = 0;
/* Prepare extParam/allParam sets for all nodes in tree */
SS_finalize_plan(plan, parse->rtable);
/*
- * SS_finalize_plan doesn't handle initPlans, so we have to manually
- * attach them to the topmost plan node, and add their extParams to
- * the topmost node's, too.
+ * SS_finalize_plan doesn't handle initPlans, so we have to
+ * manually attach them to the topmost plan node, and add their
+ * extParams to the topmost node's, too.
*
- * We also add the total_cost of each initPlan to the startup cost
- * of the top node. This is a conservative overestimate, since in
- * fact each initPlan might be executed later than plan startup, or
- * even not at all.
+ * We also add the total_cost of each initPlan to the startup cost of
+ * the top node. This is a conservative overestimate, since in
+ * fact each initPlan might be executed later than plan startup,
+ * or even not at all.
*/
plan->initPlan = PlannerInitPlan;
foreach(lst, plan->initPlan)
{
- SubPlan *initplan = (SubPlan *) lfirst(lst);
+ SubPlan *initplan = (SubPlan *) lfirst(lst);
plan->extParam = bms_add_members(plan->extParam,
initplan->plan->extParam);
@@ -368,7 +370,8 @@ preprocess_expression(Query *parse, Node *expr, int kind)
/*
* If the query has any join RTEs, replace join alias variables with
* base-relation variables. We must do this before sublink processing,
- * else sublinks expanded out from join aliases wouldn't get processed.
+ * else sublinks expanded out from join aliases wouldn't get
+ * processed.
*/
if (parse->hasJoinRTEs)
expr = flatten_join_alias_vars(parse, expr);
@@ -403,8 +406,8 @@ preprocess_expression(Query *parse, Node *expr, int kind)
expr = SS_process_sublinks(expr, (kind == EXPRKIND_QUAL));
/*
- * XXX do not insert anything here unless you have grokked the comments
- * in SS_replace_correlation_vars ...
+ * XXX do not insert anything here unless you have grokked the
+ * comments in SS_replace_correlation_vars ...
*/
/* Replace uplevel vars with Param nodes */
@@ -498,20 +501,21 @@ inheritance_planner(Query *parse, List *inheritlist)
/* Generate plan */
subplan = grouping_planner(subquery, 0.0 /* retrieve all tuples */ );
subplans = lappend(subplans, subplan);
+
/*
* It's possible that additional RTEs got added to the rangetable
* due to expansion of inherited source tables (see allpaths.c).
* If so, we must copy 'em back to the main parse tree's rtable.
*
- * XXX my goodness this is ugly. Really need to think about ways
- * to rein in planner's habit of scribbling on its input.
+ * XXX my goodness this is ugly. Really need to think about ways to
+ * rein in planner's habit of scribbling on its input.
*/
subrtlength = length(subquery->rtable);
if (subrtlength > mainrtlength)
{
- List *subrt = subquery->rtable;
+ List *subrt = subquery->rtable;
- while (mainrtlength-- > 0) /* wish we had nthcdr() */
+ while (mainrtlength-- > 0) /* wish we had nthcdr() */
subrt = lnext(subrt);
parse->rtable = nconc(parse->rtable, subrt);
mainrtlength = subrtlength;
@@ -684,7 +688,7 @@ grouping_planner(Query *parse, double tuple_fraction)
* from tlist if grouping or aggregation is needed.
*/
sub_tlist = make_subplanTargetList(parse, tlist,
- &groupColIdx, &need_tlist_eval);
+ &groupColIdx, &need_tlist_eval);
/*
* Calculate pathkeys that represent grouping/ordering
@@ -700,8 +704,8 @@ grouping_planner(Query *parse, double tuple_fraction)
* Also, it's possible that optimization has eliminated all
* aggregates, and we may as well check for that here.
*
- * Note: we do not attempt to detect duplicate aggregates here;
- * a somewhat-overestimated count is okay for our present purposes.
+ * Note: we do not attempt to detect duplicate aggregates here; a
+ * somewhat-overestimated count is okay for our present purposes.
*/
if (parse->hasAggs)
{
@@ -892,8 +896,8 @@ grouping_planner(Query *parse, double tuple_fraction)
&cheapest_path, &sorted_path);
/*
- * We couldn't canonicalize group_pathkeys and sort_pathkeys before
- * running query_planner(), so do it now.
+ * We couldn't canonicalize group_pathkeys and sort_pathkeys
+ * before running query_planner(), so do it now.
*/
group_pathkeys = canonicalize_pathkeys(parse, group_pathkeys);
sort_pathkeys = canonicalize_pathkeys(parse, sort_pathkeys);
@@ -903,9 +907,9 @@ grouping_planner(Query *parse, double tuple_fraction)
*/
if (parse->groupClause)
{
- List *groupExprs;
- double cheapest_path_rows;
- int cheapest_path_width;
+ List *groupExprs;
+ double cheapest_path_rows;
+ int cheapest_path_width;
/*
* Beware in this section of the possibility that
@@ -919,13 +923,13 @@ grouping_planner(Query *parse, double tuple_fraction)
}
else
{
- cheapest_path_rows = 1; /* assume non-set result */
- cheapest_path_width = 100; /* arbitrary */
+ cheapest_path_rows = 1; /* assume non-set result */
+ cheapest_path_width = 100; /* arbitrary */
}
/*
- * Always estimate the number of groups. We can't do this until
- * after running query_planner(), either.
+ * Always estimate the number of groups. We can't do this
+ * until after running query_planner(), either.
*/
groupExprs = get_sortgrouplist_exprs(parse->groupClause,
parse->targetList);
@@ -936,12 +940,13 @@ grouping_planner(Query *parse, double tuple_fraction)
numGroups = (long) Min(dNumGroups, (double) LONG_MAX);
/*
- * Check can't-do-it conditions, including whether the grouping
- * operators are hashjoinable.
+ * Check can't-do-it conditions, including whether the
+ * grouping operators are hashjoinable.
*
* Executor doesn't support hashed aggregation with DISTINCT
- * aggregates. (Doing so would imply storing *all* the input
- * values in the hash table, which seems like a certain loser.)
+ * aggregates. (Doing so would imply storing *all* the input
+ * values in the hash table, which seems like a certain
+ * loser.)
*/
if (!enable_hashagg || !hash_safe_grouping(parse))
use_hashed_grouping = false;
@@ -953,32 +958,30 @@ grouping_planner(Query *parse, double tuple_fraction)
{
/*
* Use hashed grouping if (a) we think we can fit the
- * hashtable into SortMem, *and* (b) the estimated cost
- * is no more than doing it the other way. While avoiding
+ * hashtable into SortMem, *and* (b) the estimated cost is
+ * no more than doing it the other way. While avoiding
* the need for sorted input is usually a win, the fact
* that the output won't be sorted may be a loss; so we
* need to do an actual cost comparison.
*
* In most cases we have no good way to estimate the size of
- * the transition value needed by an aggregate; arbitrarily
- * assume it is 100 bytes. Also set the overhead per hashtable
- * entry at 64 bytes.
+ * the transition value needed by an aggregate;
+ * arbitrarily assume it is 100 bytes. Also set the
+ * overhead per hashtable entry at 64 bytes.
*/
- int hashentrysize = cheapest_path_width + 64 + numAggs * 100;
+ int hashentrysize = cheapest_path_width + 64 + numAggs * 100;
if (hashentrysize * dNumGroups <= SortMem * 1024L)
{
/*
* Okay, do the cost comparison. We need to consider
- * cheapest_path + hashagg [+ final sort]
- * versus either
- * cheapest_path [+ sort] + group or agg [+ final sort]
- * or
- * presorted_path + group or agg [+ final sort]
- * where brackets indicate a step that may not be needed.
- * We assume query_planner() will have returned a
- * presorted path only if it's a winner compared to
- * cheapest_path for this purpose.
+ * cheapest_path + hashagg [+ final sort] versus
+ * either cheapest_path [+ sort] + group or agg [+
+ * final sort] or presorted_path + group or agg [+
+ * final sort] where brackets indicate a step that may
+ * not be needed. We assume query_planner() will have
+ * returned a presorted path only if it's a winner
+ * compared to cheapest_path for this purpose.
*
* These path variables are dummies that just hold cost
* fields; we don't make actual Paths for these steps.
@@ -1065,9 +1068,9 @@ grouping_planner(Query *parse, double tuple_fraction)
/*
* Select the best path and create a plan to execute it.
*
- * If we are doing hashed grouping, we will always read all the
- * input tuples, so use the cheapest-total path. Otherwise,
- * trust query_planner's decision about which to use.
+ * If we are doing hashed grouping, we will always read all the input
+ * tuples, so use the cheapest-total path. Otherwise, trust
+ * query_planner's decision about which to use.
*/
if (sorted_path && !use_hashed_grouping)
{
@@ -1081,19 +1084,19 @@ grouping_planner(Query *parse, double tuple_fraction)
}
/*
- * create_plan() returns a plan with just a "flat" tlist of required
- * Vars. Usually we need to insert the sub_tlist as the tlist of the
- * top plan node. However, we can skip that if we determined that
- * whatever query_planner chose to return will be good enough.
+ * create_plan() returns a plan with just a "flat" tlist of
+ * required Vars. Usually we need to insert the sub_tlist as the
+ * tlist of the top plan node. However, we can skip that if we
+ * determined that whatever query_planner chose to return will be
+ * good enough.
*/
if (need_tlist_eval)
{
/*
* If the top-level plan node is one that cannot do expression
- * evaluation, we must insert a Result node to project the desired
- * tlist.
- * Currently, the only plan node we might see here that falls into
- * that category is Append.
+ * evaluation, we must insert a Result node to project the
+ * desired tlist. Currently, the only plan node we might see
+ * here that falls into that category is Append.
*/
if (IsA(result_plan, Append))
{
@@ -1108,23 +1111,25 @@ grouping_planner(Query *parse, double tuple_fraction)
*/
result_plan->targetlist = sub_tlist;
}
+
/*
* Also, account for the cost of evaluation of the sub_tlist.
*
* Up to now, we have only been dealing with "flat" tlists,
* containing just Vars. So their evaluation cost is zero
* according to the model used by cost_qual_eval() (or if you
- * prefer, the cost is factored into cpu_tuple_cost). Thus we can
- * avoid accounting for tlist cost throughout query_planner() and
- * subroutines. But now we've inserted a tlist that might contain
- * actual operators, sub-selects, etc --- so we'd better account
- * for its cost.
+ * prefer, the cost is factored into cpu_tuple_cost). Thus we
+ * can avoid accounting for tlist cost throughout
+ * query_planner() and subroutines. But now we've inserted a
+ * tlist that might contain actual operators, sub-selects, etc
+ * --- so we'd better account for its cost.
*
- * Below this point, any tlist eval cost for added-on nodes should
- * be accounted for as we create those nodes. Presently, of the
- * node types we can add on, only Agg and Group project new tlists
- * (the rest just copy their input tuples) --- so make_agg() and
- * make_group() are responsible for computing the added cost.
+ * Below this point, any tlist eval cost for added-on nodes
+ * should be accounted for as we create those nodes.
+ * Presently, of the node types we can add on, only Agg and
+ * Group project new tlists (the rest just copy their input
+ * tuples) --- so make_agg() and make_group() are responsible
+ * for computing the added cost.
*/
cost_qual_eval(&tlist_cost, sub_tlist);
result_plan->startup_cost += tlist_cost.startup;
@@ -1135,8 +1140,8 @@ grouping_planner(Query *parse, double tuple_fraction)
{
/*
* Since we're using query_planner's tlist and not the one
- * make_subplanTargetList calculated, we have to refigure
- * any grouping-column indexes make_subplanTargetList computed.
+ * make_subplanTargetList calculated, we have to refigure any
+ * grouping-column indexes make_subplanTargetList computed.
*/
locate_grouping_columns(parse, tlist, result_plan->targetlist,
groupColIdx);
@@ -1180,6 +1185,7 @@ grouping_planner(Query *parse, double tuple_fraction)
current_pathkeys = group_pathkeys;
}
aggstrategy = AGG_SORTED;
+
/*
* The AGG node will not change the sort ordering of its
* groups, so current_pathkeys describes the result too.
@@ -1205,7 +1211,8 @@ grouping_planner(Query *parse, double tuple_fraction)
else
{
/*
- * If there are no Aggs, we shouldn't have any HAVING qual anymore
+ * If there are no Aggs, we shouldn't have any HAVING qual
+ * anymore
*/
Assert(parse->havingQual == NULL);
@@ -1216,8 +1223,8 @@ grouping_planner(Query *parse, double tuple_fraction)
if (parse->groupClause)
{
/*
- * Add an explicit sort if we couldn't make the path come out
- * the way the GROUP node needs it.
+ * Add an explicit sort if we couldn't make the path come
+ * out the way the GROUP node needs it.
*/
if (!pathkeys_contained_in(group_pathkeys, current_pathkeys))
{
@@ -1238,7 +1245,7 @@ grouping_planner(Query *parse, double tuple_fraction)
/* The Group node won't change sort ordering */
}
}
- } /* end of if (setOperations) */
+ } /* end of if (setOperations) */
/*
* If we were not able to make the plan come out in the right order,
@@ -1264,6 +1271,7 @@ grouping_planner(Query *parse, double tuple_fraction)
{
result_plan = (Plan *) make_unique(tlist, result_plan,
parse->distinctClause);
+
/*
* If there was grouping or aggregation, leave plan_rows as-is
* (ie, assume the result was already mostly unique). If not,
@@ -1272,13 +1280,13 @@ grouping_planner(Query *parse, double tuple_fraction)
*/
if (!parse->groupClause && !parse->hasAggs)
{
- List *distinctExprs;
+ List *distinctExprs;
distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
parse->targetList);
result_plan->plan_rows = estimate_num_groups(parse,
distinctExprs,
- result_plan->plan_rows);
+ result_plan->plan_rows);
}
}
@@ -1443,7 +1451,7 @@ make_subplanTargetList(Query *parse,
false),
(Expr *) groupexpr);
sub_tlist = lappend(sub_tlist, te);
- *need_tlist_eval = true; /* it's not flat anymore */
+ *need_tlist_eval = true; /* it's not flat anymore */
}
/* and save its resno */
@@ -1459,7 +1467,7 @@ make_subplanTargetList(Query *parse,
* Locate grouping columns in the tlist chosen by query_planner.
*
* This is only needed if we don't use the sub_tlist chosen by
- * make_subplanTargetList. We have to forget the column indexes found
+ * make_subplanTargetList. We have to forget the column indexes found
* by that routine and re-locate the grouping vars in the real sub_tlist.
*/
static void
@@ -1528,7 +1536,7 @@ postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
Assert(orig_tlist != NIL);
orig_tle = (TargetEntry *) lfirst(orig_tlist);
orig_tlist = lnext(orig_tlist);
- if (orig_tle->resdom->resjunk) /* should not happen */
+ if (orig_tle->resdom->resjunk) /* should not happen */
elog(ERROR, "resjunk output columns are not implemented");
Assert(new_tle->resdom->resno == orig_tle->resdom->resno);
Assert(new_tle->resdom->restype == orig_tle->resdom->restype);
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 18395be022b..91396575b3f 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/setrefs.c,v 1.94 2003/07/25 00:01:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/setrefs.c,v 1.95 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,11 +46,11 @@ static void set_join_references(Join *join, List *rtable);
static void set_uppernode_references(Plan *plan, Index subvarno);
static bool targetlist_has_non_vars(List *tlist);
static List *join_references(List *clauses,
- List *rtable,
- List *outer_tlist,
- List *inner_tlist,
- Index acceptable_rel,
- bool tlists_have_non_vars);
+ List *rtable,
+ List *outer_tlist,
+ List *inner_tlist,
+ Index acceptable_rel,
+ bool tlists_have_non_vars);
static Node *join_references_mutator(Node *node,
join_references_context *context);
static Node *replace_vars_with_subplan_refs(Node *node,
@@ -60,7 +60,7 @@ static Node *replace_vars_with_subplan_refs(Node *node,
static Node *replace_vars_with_subplan_refs_mutator(Node *node,
replace_vars_with_subplan_refs_context *context);
static bool fix_opfuncids_walker(Node *node, void *context);
-static void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
+static void set_sa_opfuncid(ScalarArrayOpExpr * opexpr);
/*****************************************************************************
@@ -167,12 +167,13 @@ set_plan_references(Plan *plan, List *rtable)
(Node *) ((HashJoin *) plan)->hashclauses);
break;
case T_Hash:
+
/*
* Hash does not evaluate its targetlist or quals, so don't
- * touch those (see comments below). But we do need to fix its
- * hashkeys. The hashkeys are a little bizarre because they
- * need to match the hashclauses of the parent HashJoin node,
- * so we use join_references to fix them.
+ * touch those (see comments below). But we do need to fix
+ * its hashkeys. The hashkeys are a little bizarre because
+ * they need to match the hashclauses of the parent HashJoin
+ * node, so we use join_references to fix them.
*/
((Hash *) plan)->hashkeys =
join_references(((Hash *) plan)->hashkeys,
@@ -180,7 +181,7 @@ set_plan_references(Plan *plan, List *rtable)
NIL,
plan->lefttree->targetlist,
(Index) 0,
- targetlist_has_non_vars(plan->lefttree->targetlist));
+ targetlist_has_non_vars(plan->lefttree->targetlist));
fix_expr_references(plan,
(Node *) ((Hash *) plan)->hashkeys);
break;
@@ -196,9 +197,9 @@ set_plan_references(Plan *plan, List *rtable)
* unmodified input tuples). The optimizer is lazy about
* creating really valid targetlists for them. Best to just
* leave the targetlist alone. In particular, we do not want
- * to process subplans for them, since we will likely end
- * up reprocessing subplans that also appear in lower levels
- * of the plan tree!
+ * to process subplans for them, since we will likely end up
+ * reprocessing subplans that also appear in lower levels of
+ * the plan tree!
*/
break;
case T_Agg:
@@ -253,7 +254,7 @@ set_plan_references(Plan *plan, List *rtable)
foreach(pl, plan->initPlan)
{
- SubPlan *sp = (SubPlan *) lfirst(pl);
+ SubPlan *sp = (SubPlan *) lfirst(pl);
Assert(IsA(sp, SubPlan));
set_plan_references(sp->plan, sp->rtable);
@@ -284,14 +285,14 @@ fix_expr_references_walker(Node *node, void *context)
if (IsA(node, OpExpr))
set_opfuncid((OpExpr *) node);
else if (IsA(node, DistinctExpr))
- set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
+ set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
else if (IsA(node, ScalarArrayOpExpr))
set_sa_opfuncid((ScalarArrayOpExpr *) node);
else if (IsA(node, NullIfExpr))
- set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
+ set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
else if (IsA(node, SubPlan))
{
- SubPlan *sp = (SubPlan *) node;
+ SubPlan *sp = (SubPlan *) node;
set_plan_references(sp->plan, sp->rtable);
}
@@ -350,10 +351,10 @@ set_join_references(Join *join, List *rtable)
if (IsA(inner_plan, IndexScan))
{
/*
- * An index is being used to reduce the number of tuples scanned
- * in the inner relation. If there are join clauses being used
- * with the index, we must update their outer-rel var nodes to
- * refer to the outer side of the join.
+ * An index is being used to reduce the number of tuples
+ * scanned in the inner relation. If there are join clauses
+ * being used with the index, we must update their outer-rel
+ * var nodes to refer to the outer side of the join.
*/
IndexScan *innerscan = (IndexScan *) inner_plan;
List *indxqualorig = innerscan->indxqualorig;
@@ -369,17 +370,18 @@ set_join_references(Join *join, List *rtable)
outer_tlist,
NIL,
innerrel,
- tlists_have_non_vars);
+ tlists_have_non_vars);
innerscan->indxqual = join_references(innerscan->indxqual,
rtable,
outer_tlist,
NIL,
innerrel,
- tlists_have_non_vars);
+ tlists_have_non_vars);
+
/*
- * We must fix the inner qpqual too, if it has join clauses
- * (this could happen if the index is lossy: some indxquals
- * may get rechecked as qpquals).
+ * We must fix the inner qpqual too, if it has join
+ * clauses (this could happen if the index is lossy: some
+ * indxquals may get rechecked as qpquals).
*/
if (NumRelids((Node *) inner_plan->qual) > 1)
inner_plan->qual = join_references(inner_plan->qual,
@@ -387,7 +389,7 @@ set_join_references(Join *join, List *rtable)
outer_tlist,
NIL,
innerrel,
- tlists_have_non_vars);
+ tlists_have_non_vars);
}
}
else if (IsA(inner_plan, TidScan))
@@ -470,8 +472,8 @@ set_uppernode_references(Plan *plan, Index subvarno)
subplan_targetlist,
tlist_has_non_vars);
output_targetlist = lappend(output_targetlist,
- makeTargetEntry(tle->resdom,
- (Expr *) newexpr));
+ makeTargetEntry(tle->resdom,
+ (Expr *) newexpr));
}
plan->targetlist = output_targetlist;
@@ -491,7 +493,7 @@ set_uppernode_references(Plan *plan, Index subvarno)
static bool
targetlist_has_non_vars(List *tlist)
{
- List *l;
+ List *l;
foreach(l, tlist)
{
@@ -740,11 +742,11 @@ fix_opfuncids_walker(Node *node, void *context)
if (IsA(node, OpExpr))
set_opfuncid((OpExpr *) node);
else if (IsA(node, DistinctExpr))
- set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
+ set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
else if (IsA(node, ScalarArrayOpExpr))
set_sa_opfuncid((ScalarArrayOpExpr *) node);
else if (IsA(node, NullIfExpr))
- set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
+ set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
return expression_tree_walker(node, fix_opfuncids_walker, context);
}
@@ -757,7 +759,7 @@ fix_opfuncids_walker(Node *node, void *context)
* DistinctExpr and NullIfExpr nodes.
*/
void
-set_opfuncid(OpExpr *opexpr)
+set_opfuncid(OpExpr * opexpr)
{
if (opexpr->opfuncid == InvalidOid)
opexpr->opfuncid = get_opcode(opexpr->opno);
@@ -768,7 +770,7 @@ set_opfuncid(OpExpr *opexpr)
* As above, for ScalarArrayOpExpr nodes.
*/
static void
-set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
+set_sa_opfuncid(ScalarArrayOpExpr * opexpr)
{
if (opexpr->opfuncid == InvalidOid)
opexpr->opfuncid = get_opcode(opexpr->opno);
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index 780bed6c2bf..154804d3d0f 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/subselect.c,v 1.79 2003/07/25 00:01:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/subselect.c,v 1.80 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -71,26 +71,26 @@ typedef struct PlannerParamItem
{
Node *item; /* the Var, Aggref, or Param */
Index abslevel; /* its absolute query level */
-} PlannerParamItem;
+} PlannerParamItem;
typedef struct finalize_primnode_context
{
- Bitmapset *paramids; /* Set of PARAM_EXEC paramids found */
- Bitmapset *outer_params; /* Set of accessible outer paramids */
-} finalize_primnode_context;
+ Bitmapset *paramids; /* Set of PARAM_EXEC paramids found */
+ Bitmapset *outer_params; /* Set of accessible outer paramids */
+} finalize_primnode_context;
static List *convert_sublink_opers(List *lefthand, List *operOids,
- List *targetlist, int rtindex,
- List **righthandIds);
+ List *targetlist, int rtindex,
+ List **righthandIds);
static bool subplan_is_hashable(SubLink *slink, SubPlan *node);
static Node *replace_correlation_vars_mutator(Node *node, void *context);
static Node *process_sublinks_mutator(Node *node, bool *isTopQual);
static Bitmapset *finalize_plan(Plan *plan, List *rtable,
- Bitmapset *outer_params,
- Bitmapset *valid_params);
-static bool finalize_primnode(Node *node, finalize_primnode_context *context);
+ Bitmapset * outer_params,
+ Bitmapset * valid_params);
+static bool finalize_primnode(Node *node, finalize_primnode_context * context);
/*
@@ -125,7 +125,7 @@ replace_outer_var(Var *var)
pitem = (PlannerParamItem *) lfirst(ppl);
if (pitem->abslevel == abslevel && IsA(pitem->item, Var))
{
- Var *pvar = (Var *) pitem->item;
+ Var *pvar = (Var *) pitem->item;
if (pvar->varno == var->varno &&
pvar->varattno == var->varattno &&
@@ -177,7 +177,7 @@ replace_outer_agg(Aggref *agg)
* Just make a new slot every time.
*/
agg = (Aggref *) copyObject(agg);
- IncrementVarSublevelsUp((Node *) agg, - ((int) agg->agglevelsup), 0);
+ IncrementVarSublevelsUp((Node *) agg, -((int) agg->agglevelsup), 0);
Assert(agg->agglevelsup == 0);
pitem = (PlannerParamItem *) palloc(sizeof(PlannerParamItem));
@@ -238,7 +238,7 @@ generate_new_param(Oid paramtype, int32 paramtypmod)
static Node *
make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
{
- SubPlan *node = makeNode(SubPlan);
+ SubPlan *node = makeNode(SubPlan);
Query *subquery = (Query *) (slink->subselect);
double tuple_fraction;
Plan *plan;
@@ -268,8 +268,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
* in path/costsize.c.
*
* XXX If an ALL/ANY subplan is uncorrelated, we may decide to hash or
- * materialize its result below. In that case it would've been better to
- * specify full retrieval. At present, however, we can only detect
+ * materialize its result below. In that case it would've been better
+ * to specify full retrieval. At present, however, we can only detect
* correlation or lack of it after we've made the subplan :-(. Perhaps
* detection of correlation should be done as a separate step.
* Meanwhile, we don't want to be too optimistic about the percentage
@@ -323,12 +323,13 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
bms_free(tmpset);
/*
- * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY, or
- * MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or ARRAY,
- * we just produce a Param referring to the result of evaluating the
- * initPlan. For MULTIEXPR, we must build an AND or OR-clause of the
- * individual comparison operators, using the appropriate lefthand
- * side expressions and Params for the initPlan's target items.
+ * Un-correlated or undirect correlated plans of EXISTS, EXPR, ARRAY,
+ * or MULTIEXPR types can be used as initPlans. For EXISTS, EXPR, or
+ * ARRAY, we just produce a Param referring to the result of
+ * evaluating the initPlan. For MULTIEXPR, we must build an AND or
+ * OR-clause of the individual comparison operators, using the
+ * appropriate lefthand side expressions and Params for the initPlan's
+ * target items.
*/
if (node->parParam == NIL && slink->subLinkType == EXISTS_SUBLINK)
{
@@ -368,7 +369,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
}
else if (node->parParam == NIL && slink->subLinkType == MULTIEXPR_SUBLINK)
{
- List *exprs;
+ List *exprs;
/* Convert the lefthand exprs and oper OIDs into executable exprs */
exprs = convert_sublink_opers(lefthand,
@@ -378,6 +379,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
&node->paramIds);
node->setParam = listCopy(node->paramIds);
PlannerInitPlan = lappend(PlannerInitPlan, node);
+
/*
* The executable expressions are returned to become part of the
* outer plan's expression tree; they are not kept in the initplan
@@ -402,15 +404,16 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
*/
if (subplan_is_hashable(slink, node))
node->useHashTable = true;
+
/*
- * Otherwise, we have the option to tack a MATERIAL node onto the top
- * of the subplan, to reduce the cost of reading it repeatedly. This
- * is pointless for a direct-correlated subplan, since we'd have to
- * recompute its results each time anyway. For uncorrelated/undirect
- * correlated subplans, we add MATERIAL if the subplan's top plan node
- * is anything more complicated than a plain sequential scan, and we
- * do it even for seqscan if the qual appears selective enough to
- * eliminate many tuples.
+ * Otherwise, we have the option to tack a MATERIAL node onto the
+ * top of the subplan, to reduce the cost of reading it
+ * repeatedly. This is pointless for a direct-correlated subplan,
+ * since we'd have to recompute its results each time anyway. For
+ * uncorrelated/undirect correlated subplans, we add MATERIAL if
+ * the subplan's top plan node is anything more complicated than a
+ * plain sequential scan, and we do it even for seqscan if the
+ * qual appears selective enough to eliminate many tuples.
*/
else if (node->parParam == NIL)
{
@@ -448,9 +451,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
break;
}
if (use_material)
- {
node->plan = plan = materialize_finished_plan(plan);
- }
}
/* Convert the lefthand exprs and oper OIDs into executable exprs */
@@ -470,7 +471,7 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
/*
* The Var or Aggref has already been adjusted to have the
- * correct varlevelsup or agglevelsup. We probably don't even
+ * correct varlevelsup or agglevelsup. We probably don't even
* need to copy it again, but be safe.
*/
args = lappend(args, copyObject(pitem->item));
@@ -485,14 +486,14 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
/*
* convert_sublink_opers: given a lefthand-expressions list and a list of
- * operator OIDs, build a list of actually executable expressions. The
+ * operator OIDs, build a list of actually executable expressions. The
* righthand sides of the expressions are Params or Vars representing the
* results of the sub-select.
*
* If rtindex is 0, we build Params to represent the sub-select outputs.
* The paramids of the Params created are returned in the *righthandIds list.
*
- * If rtindex is not 0, we build Vars using that rtindex as varno. The
+ * If rtindex is not 0, we build Vars using that rtindex as varno. The
* Vars themselves are returned in *righthandIds (this is a bit of a type
* cheat, but we can get away with it).
*/
@@ -549,10 +550,10 @@ convert_sublink_opers(List *lefthand, List *operOids,
/*
* Make the expression node.
*
- * Note: we use make_op_expr in case runtime type conversion
- * function calls must be inserted for this operator! (But we
- * are not expecting to have to resolve unknown Params, so
- * it's okay to pass a null pstate.)
+ * Note: we use make_op_expr in case runtime type conversion function
+ * calls must be inserted for this operator! (But we are not
+ * expecting to have to resolve unknown Params, so it's okay to
+ * pass a null pstate.)
*/
result = lappend(result,
make_op_expr(NULL,
@@ -584,9 +585,9 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
List *opids;
/*
- * The sublink type must be "= ANY" --- that is, an IN operator.
- * (We require the operator name to be unqualified, which may be
- * overly paranoid, or may not be.) XXX since we also check that the
+ * The sublink type must be "= ANY" --- that is, an IN operator. (We
+ * require the operator name to be unqualified, which may be overly
+ * paranoid, or may not be.) XXX since we also check that the
* operators are hashable, the test on operator name may be redundant?
*/
if (slink->subLinkType != ANY_SUBLINK)
@@ -594,33 +595,37 @@ subplan_is_hashable(SubLink *slink, SubPlan *node)
if (length(slink->operName) != 1 ||
strcmp(strVal(lfirst(slink->operName)), "=") != 0)
return false;
+
/*
* The subplan must not have any direct correlation vars --- else we'd
- * have to recompute its output each time, so that the hashtable wouldn't
- * gain anything.
+ * have to recompute its output each time, so that the hashtable
+ * wouldn't gain anything.
*/
if (node->parParam != NIL)
return false;
+
/*
- * The estimated size of the subquery result must fit in SortMem.
- * (XXX what about hashtable overhead?)
+ * The estimated size of the subquery result must fit in SortMem. (XXX
+ * what about hashtable overhead?)
*/
subquery_size = node->plan->plan_rows *
(MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData)));
if (subquery_size > SortMem * 1024L)
return false;
+
/*
- * The combining operators must be hashable, strict, and self-commutative.
- * The need for hashability is obvious, since we want to use hashing.
- * Without strictness, behavior in the presence of nulls is too
- * unpredictable. (We actually must assume even more than plain
- * strictness, see nodeSubplan.c for details.) And commutativity ensures
- * that the left and right datatypes are the same; this allows us to
- * assume that the combining operators are equality for the righthand
- * datatype, so that they can be used to compare righthand tuples as
- * well as comparing lefthand to righthand tuples. (This last restriction
- * could be relaxed by using two different sets of operators with the
- * hash table, but there is no obvious usefulness to that at present.)
+ * The combining operators must be hashable, strict, and
+ * self-commutative. The need for hashability is obvious, since we
+ * want to use hashing. Without strictness, behavior in the presence
+ * of nulls is too unpredictable. (We actually must assume even more
+ * than plain strictness, see nodeSubplan.c for details.) And
+ * commutativity ensures that the left and right datatypes are the
+ * same; this allows us to assume that the combining operators are
+ * equality for the righthand datatype, so that they can be used to
+ * compare righthand tuples as well as comparing lefthand to righthand
+ * tuples. (This last restriction could be relaxed by using two
+ * different sets of operators with the hash table, but there is no
+ * obvious usefulness to that at present.)
*/
foreach(opids, slink->operOids)
{
@@ -665,25 +670,27 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
int rtindex;
RangeTblEntry *rte;
RangeTblRef *rtr;
- InClauseInfo *ininfo;
+ InClauseInfo *ininfo;
List *exprs;
/*
- * The sublink type must be "= ANY" --- that is, an IN operator.
- * (We require the operator name to be unqualified, which may be
- * overly paranoid, or may not be.)
+ * The sublink type must be "= ANY" --- that is, an IN operator. (We
+ * require the operator name to be unqualified, which may be overly
+ * paranoid, or may not be.)
*/
if (sublink->subLinkType != ANY_SUBLINK)
return NULL;
if (length(sublink->operName) != 1 ||
strcmp(strVal(lfirst(sublink->operName)), "=") != 0)
return NULL;
+
/*
* The sub-select must not refer to any Vars of the parent query.
* (Vars of higher levels should be okay, though.)
*/
if (contain_vars_of_level((Node *) subselect, 1))
return NULL;
+
/*
* The left-hand expressions must contain some Vars of the current
* query, else it's not gonna be a join.
@@ -691,6 +698,7 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
left_varnos = pull_varnos((Node *) sublink->lefthand);
if (bms_is_empty(left_varnos))
return NULL;
+
/*
* The left-hand expressions mustn't be volatile. (Perhaps we should
* test the combining operators, too? We'd only need to point the
@@ -698,13 +706,14 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
*/
if (contain_volatile_functions((Node *) sublink->lefthand))
return NULL;
+
/*
* Okay, pull up the sub-select into top range table and jointree.
*
* We rely here on the assumption that the outer query has no references
* to the inner (necessarily true, other than the Vars that we build
- * below). Therefore this is a lot easier than what pull_up_subqueries
- * has to go through.
+ * below). Therefore this is a lot easier than what
+ * pull_up_subqueries has to go through.
*/
rte = addRangeTableEntryForSubquery(NULL,
subselect,
@@ -715,6 +724,7 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
rtr = makeNode(RangeTblRef);
rtr->rtindex = rtindex;
parse->jointree->fromlist = lappend(parse->jointree->fromlist, rtr);
+
/*
* Now build the InClauseInfo node.
*/
@@ -722,6 +732,7 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
ininfo->lefthand = left_varnos;
ininfo->righthand = bms_make_singleton(rtindex);
parse->in_info_list = lcons(ininfo, parse->in_info_list);
+
/*
* Build the result qual expressions. As a side effect,
* ininfo->sub_targetlist is filled with a list of the Vars
@@ -744,9 +755,9 @@ convert_IN_to_join(Query *parse, SubLink *sublink)
* Since we do not recurse into the arguments of uplevel aggregates, they will
* get copied to the appropriate subplan args list in the parent query with
* uplevel vars not replaced by Params, but only adjusted in level (see
- * replace_outer_agg). That's exactly what we want for the vars of the parent
+ * replace_outer_agg). That's exactly what we want for the vars of the parent
* level --- but if an aggregate's argument contains any further-up variables,
- * they have to be replaced with Params in their turn. That will happen when
+ * they have to be replaced with Params in their turn. That will happen when
* the parent level runs SS_replace_correlation_vars. Therefore it must do
* so after expanding its sublinks to subplans. And we don't want any steps
* in between, else those steps would never get applied to the aggregate
@@ -796,7 +807,7 @@ SS_process_sublinks(Node *expr, bool isQual)
static Node *
process_sublinks_mutator(Node *node, bool *isTopQual)
{
- bool locTopQual;
+ bool locTopQual;
if (node == NULL)
return NULL;
@@ -806,11 +817,13 @@ process_sublinks_mutator(Node *node, bool *isTopQual)
List *lefthand;
/*
- * First, recursively process the lefthand-side expressions, if any.
+ * First, recursively process the lefthand-side expressions, if
+ * any.
*/
locTopQual = false;
lefthand = (List *)
process_sublinks_mutator((Node *) sublink->lefthand, &locTopQual);
+
/*
* Now build the SubPlan node and make the expr to return.
*/
@@ -818,9 +831,9 @@ process_sublinks_mutator(Node *node, bool *isTopQual)
}
/*
- * We should never see a SubPlan expression in the input (since this is
- * the very routine that creates 'em to begin with). We shouldn't find
- * ourselves invoked directly on a Query, either.
+ * We should never see a SubPlan expression in the input (since this
+ * is the very routine that creates 'em to begin with). We shouldn't
+ * find ourselves invoked directly on a Query, either.
*/
Assert(!is_subplan(node));
Assert(!IsA(node, Query));
@@ -854,9 +867,9 @@ SS_finalize_plan(Plan *plan, List *rtable)
List *lst;
/*
- * First, scan the param list to discover the sets of params that
- * are available from outer query levels and my own query level.
- * We do this once to save time in the per-plan recursion steps.
+ * First, scan the param list to discover the sets of params that are
+ * available from outer query levels and my own query level. We do
+ * this once to save time in the per-plan recursion steps.
*/
paramid = 0;
foreach(lst, PlannerParamList)
@@ -896,7 +909,7 @@ SS_finalize_plan(Plan *plan, List *rtable)
*/
static Bitmapset *
finalize_plan(Plan *plan, List *rtable,
- Bitmapset *outer_params, Bitmapset *valid_params)
+ Bitmapset * outer_params, Bitmapset * valid_params)
{
finalize_primnode_context context;
List *lst;
@@ -1038,8 +1051,8 @@ finalize_plan(Plan *plan, List *rtable,
plan->allParam = context.paramids;
/*
- * For speed at execution time, make sure extParam/allParam are actually
- * NULL if they are empty sets.
+ * For speed at execution time, make sure extParam/allParam are
+ * actually NULL if they are empty sets.
*/
if (bms_is_empty(plan->extParam))
{
@@ -1060,7 +1073,7 @@ finalize_plan(Plan *plan, List *rtable,
* expression tree to the result set.
*/
static bool
-finalize_primnode(Node *node, finalize_primnode_context *context)
+finalize_primnode(Node *node, finalize_primnode_context * context)
{
if (node == NULL)
return false;
@@ -1076,12 +1089,12 @@ finalize_primnode(Node *node, finalize_primnode_context *context)
}
if (is_subplan(node))
{
- SubPlan *subplan = (SubPlan *) node;
+ SubPlan *subplan = (SubPlan *) node;
/* Add outer-level params needed by the subplan to paramids */
context->paramids = bms_join(context->paramids,
- bms_intersect(subplan->plan->extParam,
- context->outer_params));
+ bms_intersect(subplan->plan->extParam,
+ context->outer_params));
/* fall through to recurse into subplan args */
}
return expression_tree_walker(node, finalize_primnode,
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 6bddf04206f..25617206d4f 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.8 2003/07/25 00:01:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.9 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,28 +32,28 @@
/* These parameters are set by GUC */
-int from_collapse_limit;
-int join_collapse_limit;
+int from_collapse_limit;
+int join_collapse_limit;
typedef struct reduce_outer_joins_state
{
Relids relids; /* base relids within this subtree */
- bool contains_outer; /* does subtree contain outer join(s)? */
+ bool contains_outer; /* does subtree contain outer join(s)? */
List *sub_states; /* List of states for subtree components */
-} reduce_outer_joins_state;
+} reduce_outer_joins_state;
static bool is_simple_subquery(Query *subquery);
static bool has_nullable_targetlist(Query *subquery);
static void resolvenew_in_jointree(Node *jtnode, int varno, List *subtlist);
static reduce_outer_joins_state *reduce_outer_joins_pass1(Node *jtnode);
static void reduce_outer_joins_pass2(Node *jtnode,
- reduce_outer_joins_state *state,
- Query *parse,
- Relids nonnullable_rels);
+ reduce_outer_joins_state * state,
+ Query *parse,
+ Relids nonnullable_rels);
static Relids find_nonnullable_rels(Node *node, bool top_level);
static void fix_in_clause_relids(List *in_info_list, int varno,
- Relids subrelids);
+ Relids subrelids);
static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
@@ -84,7 +84,7 @@ pull_up_IN_clauses(Query *parse, Node *node)
return NULL;
if (IsA(node, SubLink))
{
- SubLink *sublink = (SubLink *) node;
+ SubLink *sublink = (SubLink *) node;
Node *subst;
/* Is it a convertible IN clause? If not, return it as-is */
@@ -95,12 +95,12 @@ pull_up_IN_clauses(Query *parse, Node *node)
}
if (and_clause(node))
{
- List *newclauses = NIL;
- List *oldclauses;
+ List *newclauses = NIL;
+ List *oldclauses;
foreach(oldclauses, ((BoolExpr *) node)->args)
{
- Node *oldclause = lfirst(oldclauses);
+ Node *oldclause = lfirst(oldclauses);
newclauses = lappend(newclauses,
pull_up_IN_clauses(parse,
@@ -172,22 +172,22 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
*/
if (subquery->hasSubLinks)
subquery->jointree->quals = pull_up_IN_clauses(subquery,
- subquery->jointree->quals);
+ subquery->jointree->quals);
/*
- * Now, recursively pull up the subquery's subqueries, so
- * that this routine's processing is complete for its jointree
- * and rangetable. NB: if the same subquery is referenced
- * from multiple jointree items (which can't happen normally,
- * but might after rule rewriting), then we will invoke this
+ * Now, recursively pull up the subquery's subqueries, so that
+ * this routine's processing is complete for its jointree and
+ * rangetable. NB: if the same subquery is referenced from
+ * multiple jointree items (which can't happen normally, but
+ * might after rule rewriting), then we will invoke this
* processing multiple times on that subquery. OK because
* nothing will happen after the first time. We do have to be
* careful to copy everything we pull up, however, or risk
* having chunks of structure multiply linked.
*
* Note: 'false' is correct here even if we are within an outer
- * join in the upper query; the lower query starts with a clean
- * slate for outer-join semantics.
+ * join in the upper query; the lower query starts with a
+ * clean slate for outer-join semantics.
*/
subquery->jointree = (FromExpr *)
pull_up_subqueries(subquery, (Node *) subquery->jointree,
@@ -207,8 +207,8 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
OffsetVarNodes((Node *) subquery, rtoffset, 0);
/*
- * Upper-level vars in subquery are now one level closer to their
- * parent than before.
+ * Upper-level vars in subquery are now one level closer to
+ * their parent than before.
*/
IncrementVarSublevelsUp((Node *) subquery, -1, 1);
@@ -257,13 +257,14 @@ pull_up_subqueries(Query *parse, Node *jtnode, bool below_outer_join)
parse->rowMarks = nconc(parse->rowMarks, subquery->rowMarks);
/*
- * We also have to fix the relid sets of any parent InClauseInfo
- * nodes. (This could perhaps be done by ResolveNew, but it
- * would clutter that routine's API unreasonably.)
+ * We also have to fix the relid sets of any parent
+ * InClauseInfo nodes. (This could perhaps be done by
+ * ResolveNew, but it would clutter that routine's API
+ * unreasonably.)
*/
if (parse->in_info_list)
{
- Relids subrelids;
+ Relids subrelids;
subrelids = get_relids_in_jointree((Node *) subquery->jointree);
fix_in_clause_relids(parse->in_info_list, varno, subrelids);
@@ -513,14 +514,14 @@ reduce_outer_joins(Query *parse)
reduce_outer_joins_state *state;
/*
- * To avoid doing strictness checks on more quals than necessary,
- * we want to stop descending the jointree as soon as there are no
- * outer joins below our current point. This consideration forces
- * a two-pass process. The first pass gathers information about which
+ * To avoid doing strictness checks on more quals than necessary, we
+ * want to stop descending the jointree as soon as there are no outer
+ * joins below our current point. This consideration forces a
+ * two-pass process. The first pass gathers information about which
* base rels appear below each side of each join clause, and about
- * whether there are outer join(s) below each side of each join clause.
- * The second pass examines qual clauses and changes join types as
- * it descends the tree.
+ * whether there are outer join(s) below each side of each join
+ * clause. The second pass examines qual clauses and changes join
+ * types as it descends the tree.
*/
state = reduce_outer_joins_pass1((Node *) parse->jointree);
@@ -608,7 +609,7 @@ reduce_outer_joins_pass1(Node *jtnode)
*/
static void
reduce_outer_joins_pass2(Node *jtnode,
- reduce_outer_joins_state *state,
+ reduce_outer_joins_state * state,
Query *parse,
Relids nonnullable_rels)
{
@@ -619,9 +620,7 @@ reduce_outer_joins_pass2(Node *jtnode,
if (jtnode == NULL)
elog(ERROR, "reached empty jointree");
if (IsA(jtnode, RangeTblRef))
- {
elog(ERROR, "reached base rel");
- }
else if (IsA(jtnode, FromExpr))
{
FromExpr *f = (FromExpr *) jtnode;
@@ -701,10 +700,11 @@ reduce_outer_joins_pass2(Node *jtnode,
/*
* If this join is (now) inner, we can add any nonnullability
* constraints its quals provide to those we got from above.
- * But if it is outer, we can only pass down the local constraints
- * into the nullable side, because an outer join never eliminates
- * any rows from its non-nullable side. If it's a FULL join then
- * it doesn't eliminate anything from either side.
+ * But if it is outer, we can only pass down the local
+ * constraints into the nullable side, because an outer join
+ * never eliminates any rows from its non-nullable side. If
+ * it's a FULL join then it doesn't eliminate anything from
+ * either side.
*/
if (jointype != JOIN_FULL)
{
@@ -713,7 +713,8 @@ reduce_outer_joins_pass2(Node *jtnode,
nonnullable_rels);
}
else
- local_nonnullable = NULL; /* no use in calculating it */
+ local_nonnullable = NULL; /* no use in calculating
+ * it */
if (left_state->contains_outer)
{
@@ -747,7 +748,7 @@ reduce_outer_joins_pass2(Node *jtnode,
*
* We don't use expression_tree_walker here because we don't want to
* descend through very many kinds of nodes; only the ones we can be sure
- * are strict. We can descend through the top level of implicit AND'ing,
+ * are strict. We can descend through the top level of implicit AND'ing,
* but not through any explicit ANDs (or ORs) below that, since those are not
* strict constructs. The List case handles the top-level implicit AND list
* as well as lists of arguments to strict operators/functions.
@@ -785,7 +786,7 @@ find_nonnullable_rels(Node *node, bool top_level)
}
else if (IsA(node, OpExpr))
{
- OpExpr *expr = (OpExpr *) node;
+ OpExpr *expr = (OpExpr *) node;
if (op_strict(expr->opno))
result = find_nonnullable_rels((Node *) expr->args, false);
@@ -800,7 +801,7 @@ find_nonnullable_rels(Node *node, bool top_level)
}
else if (IsA(node, RelabelType))
{
- RelabelType *expr = (RelabelType *) node;
+ RelabelType *expr = (RelabelType *) node;
result = find_nonnullable_rels((Node *) expr->arg, top_level);
}
@@ -817,7 +818,7 @@ find_nonnullable_rels(Node *node, bool top_level)
}
else if (IsA(node, BooleanTest))
{
- BooleanTest *expr = (BooleanTest *) node;
+ BooleanTest *expr = (BooleanTest *) node;
/*
* Appropriate boolean tests are strict at top level.
@@ -894,10 +895,11 @@ simplify_jointree(Query *parse, Node *jtnode)
(childlen + myothers) <= from_collapse_limit)
{
newlist = nconc(newlist, subf->fromlist);
+
/*
- * By now, the quals have been converted to implicit-AND
- * lists, so we just need to join the lists. NOTE: we
- * put the pulled-up quals first.
+ * By now, the quals have been converted to
+ * implicit-AND lists, so we just need to join the
+ * lists. NOTE: we put the pulled-up quals first.
*/
f->quals = (Node *) nconc((List *) subf->quals,
(List *) f->quals);
@@ -917,16 +919,17 @@ simplify_jointree(Query *parse, Node *jtnode)
/* Recursively simplify the children... */
j->larg = simplify_jointree(parse, j->larg);
j->rarg = simplify_jointree(parse, j->rarg);
+
/*
- * If it is an outer join, we must not flatten it. An inner join
+ * If it is an outer join, we must not flatten it. An inner join
* is semantically equivalent to a FromExpr; we convert it to one,
* allowing it to be flattened into its parent, if the resulting
* FromExpr would have no more than join_collapse_limit members.
*/
if (j->jointype == JOIN_INNER && join_collapse_limit > 1)
{
- int leftlen,
- rightlen;
+ int leftlen,
+ rightlen;
if (j->larg && IsA(j->larg, FromExpr))
leftlen = length(((FromExpr *) j->larg)->fromlist);
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
index 24ea1316e11..cbe822448e4 100644
--- a/src/backend/optimizer/prep/prepqual.c
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepqual.c,v 1.35 2003/05/28 22:32:49 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepqual.c,v 1.36 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -21,12 +21,12 @@
#include "utils/lsyscache.h"
static Expr *flatten_andors(Expr *qual);
-static void flatten_andors_and_walker(FastList *out_list, List *andlist);
-static void flatten_andors_or_walker(FastList *out_list, List *orlist);
+static void flatten_andors_and_walker(FastList * out_list, List *andlist);
+static void flatten_andors_or_walker(FastList * out_list, List *orlist);
static List *pull_ands(List *andlist);
-static void pull_ands_walker(FastList *out_list, List *andlist);
+static void pull_ands_walker(FastList * out_list, List *andlist);
static List *pull_ors(List *orlist);
-static void pull_ors_walker(FastList *out_list, List *orlist);
+static void pull_ors_walker(FastList * out_list, List *orlist);
static Expr *find_nots(Expr *qual);
static Expr *push_nots(Expr *qual);
static Expr *find_ors(Expr *qual);
@@ -328,7 +328,7 @@ flatten_andors(Expr *qual)
}
static void
-flatten_andors_and_walker(FastList *out_list, List *andlist)
+flatten_andors_and_walker(FastList * out_list, List *andlist)
{
List *arg;
@@ -344,7 +344,7 @@ flatten_andors_and_walker(FastList *out_list, List *andlist)
}
static void
-flatten_andors_or_walker(FastList *out_list, List *orlist)
+flatten_andors_or_walker(FastList * out_list, List *orlist)
{
List *arg;
@@ -377,7 +377,7 @@ pull_ands(List *andlist)
}
static void
-pull_ands_walker(FastList *out_list, List *andlist)
+pull_ands_walker(FastList * out_list, List *andlist)
{
List *arg;
@@ -410,7 +410,7 @@ pull_ors(List *orlist)
}
static void
-pull_ors_walker(FastList *out_list, List *orlist)
+pull_ors_walker(FastList * out_list, List *orlist)
{
List *arg;
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index 9c937f17602..d33ff7417d4 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/preptlist.c,v 1.62 2003/07/25 00:01:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/preptlist.c,v 1.63 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -174,12 +174,12 @@ expand_targetlist(List *tlist, int command_type,
*
* When generating a NULL constant for a dropped column, we label
* it INT4 (any other guaranteed-to-exist datatype would do as
- * well). We can't label it with the dropped column's datatype
- * since that might not exist anymore. It does not really
- * matter what we claim the type is, since NULL is NULL --- its
- * representation is datatype-independent. This could perhaps
- * confuse code comparing the finished plan to the target
- * relation, however.
+ * well). We can't label it with the dropped column's
+ * datatype since that might not exist anymore. It does not
+ * really matter what we claim the type is, since NULL is NULL
+ * --- its representation is datatype-independent. This could
+ * perhaps confuse code comparing the finished plan to the
+ * target relation, however.
*/
Oid atttype = att_tup->atttypid;
int32 atttypmod = att_tup->atttypmod;
@@ -193,7 +193,7 @@ expand_targetlist(List *tlist, int command_type,
new_expr = (Node *) makeConst(atttype,
att_tup->attlen,
(Datum) 0,
- true, /* isnull */
+ true, /* isnull */
att_tup->attbyval);
new_expr = coerce_to_domain(new_expr,
InvalidOid,
@@ -206,8 +206,8 @@ expand_targetlist(List *tlist, int command_type,
new_expr = (Node *) makeConst(INT4OID,
sizeof(int32),
(Datum) 0,
- true, /* isnull */
- true /* byval */);
+ true, /* isnull */
+ true /* byval */ );
/* label resdom with INT4, too */
atttype = INT4OID;
atttypmod = -1;
@@ -228,8 +228,8 @@ expand_targetlist(List *tlist, int command_type,
new_expr = (Node *) makeConst(INT4OID,
sizeof(int32),
(Datum) 0,
- true, /* isnull */
- true /* byval */);
+ true, /* isnull */
+ true /* byval */ );
/* label resdom with INT4, too */
atttype = INT4OID;
atttypmod = -1;
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 5422d591728..49ebc822087 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepunion.c,v 1.101 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepunion.c,v 1.102 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -427,7 +427,7 @@ generate_setop_tlist(List *colTypes, int flag,
}
else
{
- expr = coerce_to_common_type(NULL, /* no UNKNOWNs here */
+ expr = coerce_to_common_type(NULL, /* no UNKNOWNs here */
expr,
colType,
"UNION/INTERSECT/EXCEPT");
@@ -849,11 +849,11 @@ adjust_inherited_attrs_mutator(Node *node,
if (IsA(node, InClauseInfo))
{
/* Copy the InClauseInfo node with correct mutation of subnodes */
- InClauseInfo *ininfo;
+ InClauseInfo *ininfo;
ininfo = (InClauseInfo *) expression_tree_mutator(node,
adjust_inherited_attrs_mutator,
- (void *) context);
+ (void *) context);
/* now fix InClauseInfo's relid sets */
ininfo->lefthand = adjust_relid_set(ininfo->lefthand,
context->old_rt_index,
@@ -880,9 +880,9 @@ adjust_inherited_attrs_mutator(Node *node,
adjust_inherited_attrs_mutator((Node *) oldinfo->clause, context);
/*
- * We do NOT want to copy the original subclauseindices list, since
- * the new rel will have different indices. The list will be rebuilt
- * when needed during later planning.
+ * We do NOT want to copy the original subclauseindices list,
+ * since the new rel will have different indices. The list will
+ * be rebuilt when needed during later planning.
*/
newinfo->subclauseindices = NIL;
@@ -896,7 +896,7 @@ adjust_inherited_attrs_mutator(Node *node,
context->old_rt_index,
context->new_rt_index);
- newinfo->eval_cost.startup = -1; /* reset these too */
+ newinfo->eval_cost.startup = -1; /* reset these too */
newinfo->this_selec = -1;
newinfo->left_pathkey = NIL; /* and these */
newinfo->right_pathkey = NIL;
@@ -925,7 +925,7 @@ adjust_inherited_attrs_mutator(Node *node,
*/
if (is_subplan(node))
{
- SubPlan *subplan;
+ SubPlan *subplan;
/* Copy the node and process subplan args */
node = expression_tree_mutator(node, adjust_inherited_attrs_mutator,
@@ -963,7 +963,7 @@ adjust_relid_set(Relids relids, Index oldrelid, Index newrelid)
*
* The expressions have already been fixed, but we have to make sure that
* the target resnos match the child table (they may not, in the case of
- * a column that was added after-the-fact by ALTER TABLE). In some cases
+ * a column that was added after-the-fact by ALTER TABLE). In some cases
* this can force us to re-order the tlist to preserve resno ordering.
* (We do all this work in special cases so that preptlist.c is fast for
* the typical case.)
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 3ebc1c650e8..4a419a35dc8 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/clauses.c,v 1.149 2003/08/03 23:46:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/clauses.c,v 1.150 2003/08/04 00:43:20 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -50,7 +50,7 @@ typedef struct
int nargs;
List *args;
int *usecounts;
-} substitute_actual_parameters_context;
+} substitute_actual_parameters_context;
static bool contain_agg_clause_walker(Node *node, void *context);
static bool contain_distinct_agg_clause_walker(Node *node, void *context);
@@ -62,15 +62,15 @@ static bool contain_volatile_functions_walker(Node *node, void *context);
static bool contain_nonstrict_functions_walker(Node *node, void *context);
static Node *eval_const_expressions_mutator(Node *node, List *active_fns);
static Expr *simplify_function(Oid funcid, Oid result_type, List *args,
- bool allow_inline, List *active_fns);
+ bool allow_inline, List *active_fns);
static Expr *evaluate_function(Oid funcid, Oid result_type, List *args,
- HeapTuple func_tuple);
+ HeapTuple func_tuple);
static Expr *inline_function(Oid funcid, Oid result_type, List *args,
- HeapTuple func_tuple, List *active_fns);
+ HeapTuple func_tuple, List *active_fns);
static Node *substitute_actual_parameters(Node *expr, int nargs, List *args,
- int *usecounts);
+ int *usecounts);
static Node *substitute_actual_parameters_mutator(Node *node,
- substitute_actual_parameters_context *context);
+ substitute_actual_parameters_context * context);
static void sql_inline_error_callback(void *arg);
static Expr *evaluate_expr(Expr *expr, Oid result_type);
@@ -110,7 +110,7 @@ make_opclause(Oid opno, Oid opresulttype, bool opretset,
Node *
get_leftop(Expr *clause)
{
- OpExpr *expr = (OpExpr *) clause;
+ OpExpr *expr = (OpExpr *) clause;
if (expr->args != NIL)
return lfirst(expr->args);
@@ -127,7 +127,7 @@ get_leftop(Expr *clause)
Node *
get_rightop(Expr *clause)
{
- OpExpr *expr = (OpExpr *) clause;
+ OpExpr *expr = (OpExpr *) clause;
if (expr->args != NIL && lnext(expr->args) != NIL)
return lfirst(lnext(expr->args));
@@ -408,7 +408,7 @@ count_agg_clause_walker(Node *node, int *count)
if (contain_agg_clause((Node *) ((Aggref *) node)->target))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregate function calls may not be nested")));
+ errmsg("aggregate function calls may not be nested")));
/*
* Having checked that, we need not recurse into the argument.
@@ -454,7 +454,7 @@ expression_returns_set_walker(Node *node, void *context)
}
if (IsA(node, OpExpr))
{
- OpExpr *expr = (OpExpr *) node;
+ OpExpr *expr = (OpExpr *) node;
if (expr->opretset)
return true;
@@ -556,7 +556,7 @@ contain_mutable_functions_walker(Node *node, void *context)
}
if (IsA(node, OpExpr))
{
- OpExpr *expr = (OpExpr *) node;
+ OpExpr *expr = (OpExpr *) node;
if (op_volatile(expr->opno) != PROVOLATILE_IMMUTABLE)
return true;
@@ -564,7 +564,7 @@ contain_mutable_functions_walker(Node *node, void *context)
}
if (IsA(node, DistinctExpr))
{
- DistinctExpr *expr = (DistinctExpr *) node;
+ DistinctExpr *expr = (DistinctExpr *) node;
if (op_volatile(expr->opno) != PROVOLATILE_IMMUTABLE)
return true;
@@ -572,7 +572,7 @@ contain_mutable_functions_walker(Node *node, void *context)
}
if (IsA(node, ScalarArrayOpExpr))
{
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
if (op_volatile(expr->opno) != PROVOLATILE_IMMUTABLE)
return true;
@@ -580,7 +580,7 @@ contain_mutable_functions_walker(Node *node, void *context)
}
if (IsA(node, NullIfExpr))
{
- NullIfExpr *expr = (NullIfExpr *) node;
+ NullIfExpr *expr = (NullIfExpr *) node;
if (op_volatile(expr->opno) != PROVOLATILE_IMMUTABLE)
return true;
@@ -588,7 +588,7 @@ contain_mutable_functions_walker(Node *node, void *context)
}
if (IsA(node, SubLink))
{
- SubLink *sublink = (SubLink *) node;
+ SubLink *sublink = (SubLink *) node;
List *opid;
foreach(opid, sublink->operOids)
@@ -639,7 +639,7 @@ contain_volatile_functions_walker(Node *node, void *context)
}
if (IsA(node, OpExpr))
{
- OpExpr *expr = (OpExpr *) node;
+ OpExpr *expr = (OpExpr *) node;
if (op_volatile(expr->opno) == PROVOLATILE_VOLATILE)
return true;
@@ -647,7 +647,7 @@ contain_volatile_functions_walker(Node *node, void *context)
}
if (IsA(node, DistinctExpr))
{
- DistinctExpr *expr = (DistinctExpr *) node;
+ DistinctExpr *expr = (DistinctExpr *) node;
if (op_volatile(expr->opno) == PROVOLATILE_VOLATILE)
return true;
@@ -655,7 +655,7 @@ contain_volatile_functions_walker(Node *node, void *context)
}
if (IsA(node, ScalarArrayOpExpr))
{
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
if (op_volatile(expr->opno) == PROVOLATILE_VOLATILE)
return true;
@@ -663,7 +663,7 @@ contain_volatile_functions_walker(Node *node, void *context)
}
if (IsA(node, NullIfExpr))
{
- NullIfExpr *expr = (NullIfExpr *) node;
+ NullIfExpr *expr = (NullIfExpr *) node;
if (op_volatile(expr->opno) == PROVOLATILE_VOLATILE)
return true;
@@ -671,7 +671,7 @@ contain_volatile_functions_walker(Node *node, void *context)
}
if (IsA(node, SubLink))
{
- SubLink *sublink = (SubLink *) node;
+ SubLink *sublink = (SubLink *) node;
List *opid;
foreach(opid, sublink->operOids)
@@ -698,7 +698,7 @@ contain_volatile_functions_walker(Node *node, void *context)
* could produce non-NULL output with a NULL input.
*
* XXX we do not examine sub-selects to see if they contain uses of
- * nonstrict functions. It's not real clear if that is correct or not...
+ * nonstrict functions. It's not real clear if that is correct or not...
* for the current usage it does not matter, since inline_function()
* rejects cases with sublinks.
*/
@@ -723,7 +723,7 @@ contain_nonstrict_functions_walker(Node *node, void *context)
}
if (IsA(node, OpExpr))
{
- OpExpr *expr = (OpExpr *) node;
+ OpExpr *expr = (OpExpr *) node;
if (!op_strict(expr->opno))
return true;
@@ -766,7 +766,7 @@ contain_nonstrict_functions_walker(Node *node, void *context)
return true;
if (IsA(node, SubLink))
{
- SubLink *sublink = (SubLink *) node;
+ SubLink *sublink = (SubLink *) node;
List *opid;
foreach(opid, sublink->operOids)
@@ -981,7 +981,7 @@ NumRelids(Node *clause)
* XXX the clause is destructively modified!
*/
void
-CommuteClause(OpExpr *clause)
+CommuteClause(OpExpr * clause)
{
Oid opoid;
Node *temp;
@@ -1062,18 +1062,20 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
args = (List *) expression_tree_mutator((Node *) expr->args,
eval_const_expressions_mutator,
(void *) active_fns);
+
/*
- * Code for op/func reduction is pretty bulky, so split it out
- * as a separate function.
+ * Code for op/func reduction is pretty bulky, so split it out as
+ * a separate function.
*/
simple = simplify_function(expr->funcid, expr->funcresulttype, args,
true, active_fns);
if (simple) /* successfully simplified it */
return (Node *) simple;
+
/*
* The expression cannot be simplified any further, so build and
- * return a replacement FuncExpr node using the possibly-simplified
- * arguments.
+ * return a replacement FuncExpr node using the
+ * possibly-simplified arguments.
*/
newexpr = makeNode(FuncExpr);
newexpr->funcid = expr->funcid;
@@ -1098,19 +1100,22 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
args = (List *) expression_tree_mutator((Node *) expr->args,
eval_const_expressions_mutator,
(void *) active_fns);
+
/*
- * Need to get OID of underlying function. Okay to scribble on
+ * Need to get OID of underlying function. Okay to scribble on
* input to this extent.
*/
set_opfuncid(expr);
+
/*
- * Code for op/func reduction is pretty bulky, so split it out
- * as a separate function.
+ * Code for op/func reduction is pretty bulky, so split it out as
+ * a separate function.
*/
simple = simplify_function(expr->opfuncid, expr->opresulttype, args,
true, active_fns);
if (simple) /* successfully simplified it */
return (Node *) simple;
+
/*
* The expression cannot be simplified any further, so build and
* return a replacement OpExpr node using the possibly-simplified
@@ -1136,8 +1141,8 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
DistinctExpr *newexpr;
/*
- * Reduce constants in the DistinctExpr's arguments. We know args is
- * either NIL or a List node, so we can call
+ * Reduce constants in the DistinctExpr's arguments. We know args
+ * is either NIL or a List node, so we can call
* expression_tree_mutator directly rather than recursing to self.
*/
args = (List *) expression_tree_mutator((Node *) expr->args,
@@ -1145,9 +1150,9 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
(void *) active_fns);
/*
- * We must do our own check for NULLs because
- * DistinctExpr has different results for NULL input
- * than the underlying operator does.
+ * We must do our own check for NULLs because DistinctExpr has
+ * different results for NULL input than the underlying operator
+ * does.
*/
foreach(arg, args)
{
@@ -1175,10 +1180,12 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
/* (NOT okay to try to inline it, though!) */
/*
- * Need to get OID of underlying function. Okay to scribble on
- * input to this extent.
+ * Need to get OID of underlying function. Okay to scribble
+ * on input to this extent.
*/
- set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
+ set_opfuncid((OpExpr *) expr); /* rely on struct
+ * equivalence */
+
/*
* Code for op/func reduction is pretty bulky, so split it out
* as a separate function.
@@ -1191,7 +1198,7 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
* Since the underlying operator is "=", must negate its
* result
*/
- Const *csimple = (Const *) simple;
+ Const *csimple = (Const *) simple;
Assert(IsA(csimple, Const));
csimple->constvalue =
@@ -1359,8 +1366,7 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
if (IsA(node, SubPlan))
{
/*
- * Return a SubPlan unchanged --- too late to do anything
- * with it.
+ * Return a SubPlan unchanged --- too late to do anything with it.
*
* XXX should we ereport() here instead? Probably this routine
* should never be invoked after SubPlan creation.
@@ -1487,16 +1493,16 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
}
if (IsA(node, ArrayExpr))
{
- ArrayExpr *arrayexpr = (ArrayExpr *) node;
- ArrayExpr *newarray;
- bool all_const = true;
+ ArrayExpr *arrayexpr = (ArrayExpr *) node;
+ ArrayExpr *newarray;
+ bool all_const = true;
FastList newelems;
- List *element;
+ List *element;
FastListInit(&newelems);
foreach(element, arrayexpr->elements)
{
- Node *e;
+ Node *e;
e = eval_const_expressions_mutator((Node *) lfirst(element),
active_fns);
@@ -1522,19 +1528,20 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
CoalesceExpr *coalesceexpr = (CoalesceExpr *) node;
CoalesceExpr *newcoalesce;
FastList newargs;
- List *arg;
+ List *arg;
FastListInit(&newargs);
foreach(arg, coalesceexpr->args)
{
- Node *e;
+ Node *e;
e = eval_const_expressions_mutator((Node *) lfirst(arg),
active_fns);
- /*
- * We can remove null constants from the list.
- * For a non-null constant, if it has not been preceded by any
- * other non-null-constant expressions then that is the result.
+
+ /*
+ * We can remove null constants from the list. For a non-null
+ * constant, if it has not been preceded by any other
+ * non-null-constant expressions then that is the result.
*/
if (IsA(e, Const))
{
@@ -1555,10 +1562,11 @@ eval_const_expressions_mutator(Node *node, List *active_fns)
{
/*
* We can optimize field selection from a whole-row Var into a
- * simple Var. (This case won't be generated directly by the
- * parser, because ParseComplexProjection short-circuits it.
- * But it can arise while simplifying functions.) If the argument
- * isn't a whole-row Var, just fall through to do generic processing.
+ * simple Var. (This case won't be generated directly by the
+ * parser, because ParseComplexProjection short-circuits it. But
+ * it can arise while simplifying functions.) If the argument
+ * isn't a whole-row Var, just fall through to do generic
+ * processing.
*/
FieldSelect *fselect = (FieldSelect *) node;
Var *argvar = (Var *) fselect->arg;
@@ -1604,12 +1612,12 @@ simplify_function(Oid funcid, Oid result_type, List *args,
Expr *newexpr;
/*
- * We have two strategies for simplification: either execute the function
- * to deliver a constant result, or expand in-line the body of the
- * function definition (which only works for simple SQL-language
- * functions, but that is a common case). In either case we need access
- * to the function's pg_proc tuple, so fetch it just once to use in both
- * attempts.
+ * We have two strategies for simplification: either execute the
+ * function to deliver a constant result, or expand in-line the body
+ * of the function definition (which only works for simple
+ * SQL-language functions, but that is a common case). In either case
+ * we need access to the function's pg_proc tuple, so fetch it just
+ * once to use in both attempts.
*/
func_tuple = SearchSysCache(PROCOID,
ObjectIdGetDatum(funcid),
@@ -1668,15 +1676,15 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
/*
* If the function is strict and has a constant-NULL input, it will
* never be called at all, so we can replace the call by a NULL
- * constant, even if there are other inputs that aren't constant,
- * and even if the function is not otherwise immutable.
+ * constant, even if there are other inputs that aren't constant, and
+ * even if the function is not otherwise immutable.
*/
if (funcform->proisstrict && has_null_input)
return (Expr *) makeNullConst(result_type);
/*
- * Otherwise, can simplify only if the function is immutable and
- * all inputs are constants. (For a non-strict function, constant NULL
+ * Otherwise, can simplify only if the function is immutable and all
+ * inputs are constants. (For a non-strict function, constant NULL
* inputs are treated the same as constant non-NULL inputs.)
*/
if (funcform->provolatile != PROVOLATILE_IMMUTABLE ||
@@ -1692,7 +1700,7 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
newexpr->funcid = funcid;
newexpr->funcresulttype = result_type;
newexpr->funcretset = false;
- newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
+ newexpr->funcformat = COERCE_EXPLICIT_CALL; /* doesn't matter */
newexpr->args = args;
return evaluate_expr((Expr *) newexpr, result_type);
@@ -1712,7 +1720,7 @@ evaluate_function(Oid funcid, Oid result_type, List *args,
* do not re-expand them. Also, if a parameter is used more than once
* in the SQL-function body, we require it not to contain any volatile
* functions (volatiles might deliver inconsistent answers) nor to be
- * unreasonably expensive to evaluate. The expensiveness check not only
+ * unreasonably expensive to evaluate. The expensiveness check not only
* prevents us from doing multiple evaluations of an expensive parameter
* at runtime, but is a safety value to limit growth of an expression due
* to repeated inlining.
@@ -1747,7 +1755,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
/*
* Forget it if the function is not SQL-language or has other
- * showstopper properties. (The nargs check is just paranoia.)
+ * showstopper properties. (The nargs check is just paranoia.)
*/
if (funcform->prolang != SQLlanguageId ||
funcform->prosecdef ||
@@ -1755,7 +1763,10 @@ inline_function(Oid funcid, Oid result_type, List *args,
funcform->pronargs != length(args))
return NULL;
- /* Forget it if declared return type is not base, domain, or polymorphic */
+ /*
+ * Forget it if declared return type is not base, domain, or
+ * polymorphic
+ */
result_typtype = get_typtype(funcform->prorettype);
if (result_typtype != 'b' &&
result_typtype != 'd')
@@ -1788,8 +1799,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
}
/*
- * Setup error traceback support for ereport(). This is so that we can
- * finger the function that bad information came from.
+ * Setup error traceback support for ereport(). This is so that we
+ * can finger the function that bad information came from.
*/
sqlerrcontext.callback = sql_inline_error_callback;
sqlerrcontext.arg = funcform;
@@ -1818,9 +1829,9 @@ inline_function(Oid funcid, Oid result_type, List *args,
/*
* We just do parsing and parse analysis, not rewriting, because
- * rewriting will not affect table-free-SELECT-only queries, which is all
- * that we care about. Also, we can punt as soon as we detect more than
- * one command in the function body.
+ * rewriting will not affect table-free-SELECT-only queries, which is
+ * all that we care about. Also, we can punt as soon as we detect
+ * more than one command in the function body.
*/
raw_parsetree_list = pg_parse_query(src);
if (length(raw_parsetree_list) != 1)
@@ -1863,8 +1874,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
* then it wasn't type-checked at definition time; must do so now.
* (This will raise an error if wrong, but that's okay since the
* function would fail at runtime anyway. Note we do not try this
- * until we have verified that no rewriting was needed; that's probably
- * not important, but let's be careful.)
+ * until we have verified that no rewriting was needed; that's
+ * probably not important, but let's be careful.)
*/
if (polymorphic)
check_sql_fn_retval(result_type, get_typtype(result_type),
@@ -1875,9 +1886,9 @@ inline_function(Oid funcid, Oid result_type, List *args,
* set, and it mustn't be more volatile than the surrounding function
* (this is to avoid breaking hacks that involve pretending a function
* is immutable when it really ain't). If the surrounding function is
- * declared strict, then the expression must contain only strict constructs
- * and must use all of the function parameters (this is overkill, but
- * an exact analysis is hard).
+ * declared strict, then the expression must contain only strict
+ * constructs and must use all of the function parameters (this is
+ * overkill, but an exact analysis is hard).
*/
if (expression_returns_set(newexpr))
goto fail;
@@ -1886,7 +1897,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
contain_mutable_functions(newexpr))
goto fail;
else if (funcform->provolatile == PROVOLATILE_STABLE &&
- contain_volatile_functions(newexpr))
+ contain_volatile_functions(newexpr))
goto fail;
if (funcform->proisstrict &&
@@ -1907,7 +1918,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
i = 0;
foreach(arg, args)
{
- Node *param = lfirst(arg);
+ Node *param = lfirst(arg);
if (usecounts[i] == 0)
{
@@ -1932,6 +1943,7 @@ inline_function(Oid funcid, Oid result_type, List *args,
if (eval_cost.startup + eval_cost.per_tuple >
10 * cpu_operator_cost)
goto fail;
+
/*
* Check volatility last since this is more expensive than the
* above tests
@@ -1943,8 +1955,8 @@ inline_function(Oid funcid, Oid result_type, List *args,
}
/*
- * Whew --- we can make the substitution. Copy the modified expression
- * out of the temporary memory context, and clean up.
+ * Whew --- we can make the substitution. Copy the modified
+ * expression out of the temporary memory context, and clean up.
*/
MemoryContextSwitchTo(oldcxt);
@@ -1981,7 +1993,7 @@ substitute_actual_parameters(Node *expr, int nargs, List *args,
{
substitute_actual_parameters_context context;
- context.nargs = nargs;
+ context.nargs = nargs;
context.args = args;
context.usecounts = usecounts;
@@ -1990,7 +2002,7 @@ substitute_actual_parameters(Node *expr, int nargs, List *args,
static Node *
substitute_actual_parameters_mutator(Node *node,
- substitute_actual_parameters_context *context)
+ substitute_actual_parameters_context * context)
{
if (node == NULL)
return NULL;
@@ -2059,10 +2071,10 @@ evaluate_expr(Expr *expr, Oid result_type)
/*
* And evaluate it.
*
- * It is OK to use a default econtext because none of the
- * ExecEvalExpr() code used in this situation will use econtext. That
- * might seem fortuitous, but it's not so unreasonable --- a constant
- * expression does not depend on context, by definition, n'est ce pas?
+ * It is OK to use a default econtext because none of the ExecEvalExpr()
+ * code used in this situation will use econtext. That might seem
+ * fortuitous, but it's not so unreasonable --- a constant expression
+ * does not depend on context, by definition, n'est ce pas?
*/
const_val = ExecEvalExprSwitchContext(exprstate,
GetPerTupleExprContext(estate),
@@ -2177,7 +2189,7 @@ evaluate_expr(Expr *expr, Oid result_type)
*
* expression_tree_walker will handle SubPlan nodes by recursing normally
* into the "exprs" and "args" lists (which are expressions belonging to
- * the outer plan). It will not touch the completed subplan, however. Since
+ * the outer plan). It will not touch the completed subplan, however. Since
* there is no link to the original Query, it is not possible to recurse into
* subselects of an already-planned expression tree. This is OK for current
* uses, but may need to be revisited in future.
@@ -2283,6 +2295,7 @@ expression_tree_walker(Node *node,
if (expression_tree_walker((Node *) sublink->lefthand,
walker, context))
return true;
+
/*
* Also invoke the walker on the sublink's Query node, so
* it can recurse into the sub-query if it wants to.
@@ -2292,7 +2305,7 @@ expression_tree_walker(Node *node,
break;
case T_SubPlan:
{
- SubPlan *subplan = (SubPlan *) node;
+ SubPlan *subplan = (SubPlan *) node;
/* recurse into the exprs list, but not into the Plan */
if (expression_tree_walker((Node *) subplan->exprs,
@@ -2457,12 +2470,12 @@ query_tree_walker(Query *query,
/* nothing to do */
break;
case RTE_SUBQUERY:
- if (! (flags & QTW_IGNORE_RT_SUBQUERIES))
+ if (!(flags & QTW_IGNORE_RT_SUBQUERIES))
if (walker(rte->subquery, context))
return true;
break;
case RTE_JOIN:
- if (! (flags & QTW_IGNORE_JOINALIASES))
+ if (!(flags & QTW_IGNORE_JOINALIASES))
if (walker(rte->joinaliasvars, context))
return true;
break;
@@ -2622,8 +2635,8 @@ expression_tree_mutator(Node *node,
break;
case T_DistinctExpr:
{
- DistinctExpr *expr = (DistinctExpr *) node;
- DistinctExpr *newnode;
+ DistinctExpr *expr = (DistinctExpr *) node;
+ DistinctExpr *newnode;
FLATCOPY(newnode, expr, DistinctExpr);
MUTATE(newnode->args, expr->args, List *);
@@ -2632,8 +2645,8 @@ expression_tree_mutator(Node *node,
break;
case T_ScalarArrayOpExpr:
{
- ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
- ScalarArrayOpExpr *newnode;
+ ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
+ ScalarArrayOpExpr *newnode;
FLATCOPY(newnode, expr, ScalarArrayOpExpr);
MUTATE(newnode->args, expr->args, List *);
@@ -2657,6 +2670,7 @@ expression_tree_mutator(Node *node,
FLATCOPY(newnode, sublink, SubLink);
MUTATE(newnode->lefthand, sublink->lefthand, List *);
+
/*
* Also invoke the mutator on the sublink's Query node, so
* it can recurse into the sub-query if it wants to.
@@ -2667,8 +2681,8 @@ expression_tree_mutator(Node *node,
break;
case T_SubPlan:
{
- SubPlan *subplan = (SubPlan *) node;
- SubPlan *newnode;
+ SubPlan *subplan = (SubPlan *) node;
+ SubPlan *newnode;
FLATCOPY(newnode, subplan, SubPlan);
/* transform exprs list */
@@ -2725,8 +2739,8 @@ expression_tree_mutator(Node *node,
break;
case T_ArrayExpr:
{
- ArrayExpr *arrayexpr = (ArrayExpr *) node;
- ArrayExpr *newnode;
+ ArrayExpr *arrayexpr = (ArrayExpr *) node;
+ ArrayExpr *newnode;
FLATCOPY(newnode, arrayexpr, ArrayExpr);
MUTATE(newnode->elements, arrayexpr->elements, List *);
@@ -2745,8 +2759,8 @@ expression_tree_mutator(Node *node,
break;
case T_NullIfExpr:
{
- NullIfExpr *expr = (NullIfExpr *) node;
- NullIfExpr *newnode;
+ NullIfExpr *expr = (NullIfExpr *) node;
+ NullIfExpr *newnode;
FLATCOPY(newnode, expr, NullIfExpr);
MUTATE(newnode->args, expr->args, List *);
@@ -2891,7 +2905,7 @@ expression_tree_mutator(Node *node,
* indicated items. (More flag bits may be added as needed.)
*
* Normally the Query node itself is copied, but some callers want it to be
- * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
+ * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All
* modified substructure is safely copied in any case.
*/
Query *
@@ -2905,9 +2919,9 @@ query_tree_mutator(Query *query,
Assert(query != NULL && IsA(query, Query));
- if (! (flags & QTW_DONT_COPY_QUERY))
+ if (!(flags & QTW_DONT_COPY_QUERY))
{
- Query *newquery;
+ Query *newquery;
FLATCOPY(newquery, query, Query);
query = newquery;
@@ -2933,7 +2947,7 @@ query_tree_mutator(Query *query,
/* nothing to do, don't bother to make a copy */
break;
case RTE_SUBQUERY:
- if (! (flags & QTW_IGNORE_RT_SUBQUERIES))
+ if (!(flags & QTW_IGNORE_RT_SUBQUERIES))
{
FLATCOPY(newrte, rte, RangeTblEntry);
CHECKFLATCOPY(newrte->subquery, rte->subquery, Query);
@@ -2942,7 +2956,7 @@ query_tree_mutator(Query *query,
}
break;
case RTE_JOIN:
- if (! (flags & QTW_IGNORE_JOINALIASES))
+ if (!(flags & QTW_IGNORE_JOINALIASES))
{
FLATCOPY(newrte, rte, RangeTblEntry);
MUTATE(newrte->joinaliasvars, rte->joinaliasvars, List *);
diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c
index 599dcf44d9b..add36b55d3b 100644
--- a/src/backend/optimizer/util/joininfo.c
+++ b/src/backend/optimizer/util/joininfo.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/joininfo.c,v 1.34 2003/02/08 20:20:55 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/joininfo.c,v 1.35 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,7 +73,7 @@ make_joininfo_node(RelOptInfo *this_rel, Relids join_relids)
* appropriate rel node if necessary).
*
* Note that the same copy of the restrictinfo node is linked to by all the
- * lists it is in. This allows us to exploit caching of information about
+ * lists it is in. This allows us to exploit caching of information about
* the restriction clause (but we must be careful that the information does
* not depend on context).
*
@@ -109,9 +109,10 @@ add_join_clause_to_rels(Query *root,
unjoined_relids);
joininfo->jinfo_restrictinfo = lappend(joininfo->jinfo_restrictinfo,
restrictinfo);
+
/*
* Can't bms_free(unjoined_relids) because new joininfo node may
- * link to it. We could avoid leaking memory by doing bms_copy()
+ * link to it. We could avoid leaking memory by doing bms_copy()
* in make_joininfo_node, but for now speed seems better.
*/
}
@@ -156,13 +157,14 @@ remove_join_clause_from_rels(Query *root,
joininfo = find_joininfo_node(find_base_rel(root, cur_relid),
unjoined_relids);
Assert(joininfo);
+
/*
- * Remove the restrictinfo from the list. Pointer comparison
- * is sufficient.
+ * Remove the restrictinfo from the list. Pointer comparison is
+ * sufficient.
*/
Assert(ptrMember(restrictinfo, joininfo->jinfo_restrictinfo));
joininfo->jinfo_restrictinfo = lremove(restrictinfo,
- joininfo->jinfo_restrictinfo);
+ joininfo->jinfo_restrictinfo);
bms_free(unjoined_relids);
}
bms_free(tmprelids);
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 61ab51747c1..3a14d6b871f 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/pathnode.c,v 1.93 2003/07/25 00:01:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/pathnode.c,v 1.94 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -159,7 +159,7 @@ set_cheapest(RelOptInfo *parent_rel)
parent_rel->cheapest_startup_path = cheapest_startup_path;
parent_rel->cheapest_total_path = cheapest_total_path;
- parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
+ parent_rel->cheapest_unique_path = NULL; /* computed only if needed */
}
/*
@@ -452,7 +452,7 @@ create_result_path(RelOptInfo *rel, Path *subpath, List *constantqual)
ResultPath *pathnode = makeNode(ResultPath);
pathnode->path.pathtype = T_Result;
- pathnode->path.parent = rel; /* may be NULL */
+ pathnode->path.parent = rel; /* may be NULL */
if (subpath)
pathnode->path.pathkeys = subpath->pathkeys;
@@ -545,8 +545,8 @@ create_unique_path(Query *root, RelOptInfo *rel, Path *subpath)
pathnode->path.parent = rel;
/*
- * Treat the output as always unsorted, since we don't necessarily have
- * pathkeys to represent it.
+ * Treat the output as always unsorted, since we don't necessarily
+ * have pathkeys to represent it.
*/
pathnode->path.pathkeys = NIL;
@@ -591,26 +591,28 @@ create_unique_path(Query *root, RelOptInfo *rel, Path *subpath)
subpath->total_cost,
rel->rows,
rel->width);
+
/*
* Charge one cpu_operator_cost per comparison per input tuple. We
- * assume all columns get compared at most of the tuples. (XXX probably
- * this is an overestimate.) This should agree with make_unique.
+ * assume all columns get compared at most of the tuples. (XXX
+ * probably this is an overestimate.) This should agree with
+ * make_unique.
*/
sort_path.total_cost += cpu_operator_cost * rel->rows * numCols;
/*
* Is it safe to use a hashed implementation? If so, estimate and
- * compare costs. We only try this if we know the targetlist for
- * sure (else we can't be sure about the datatypes involved).
+ * compare costs. We only try this if we know the targetlist for sure
+ * (else we can't be sure about the datatypes involved).
*/
pathnode->use_hash = false;
if (enable_hashagg && sub_targetlist && hash_safe_tlist(sub_targetlist))
{
/*
- * Estimate the overhead per hashtable entry at 64 bytes (same
- * as in planner.c).
+ * Estimate the overhead per hashtable entry at 64 bytes (same as
+ * in planner.c).
*/
- int hashentrysize = rel->width + 64;
+ int hashentrysize = rel->width + 64;
if (hashentrysize * pathnode->rows <= SortMem * 1024L)
{
@@ -647,7 +649,7 @@ create_unique_path(Query *root, RelOptInfo *rel, Path *subpath)
* We assume hashed aggregation will work if the datatype's equality operator
* is marked hashjoinable.
*
- * XXX this probably should be somewhere else. See also hash_safe_grouping
+ * XXX this probably should be somewhere else. See also hash_safe_grouping
* in plan/planner.c.
*/
static bool
@@ -788,6 +790,7 @@ create_mergejoin_path(Query *root,
if (innersortkeys &&
pathkeys_contained_in(innersortkeys, inner_path->pathkeys))
innersortkeys = NIL;
+
/*
* If we are not sorting the inner path, we may need a materialize
* node to ensure it can be marked/restored. (Sort does support
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 6823f03267e..3ae98ed2536 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/plancat.c,v 1.85 2003/07/25 00:01:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/plancat.c,v 1.86 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,7 +65,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
rel->max_attr = RelationGetNumberOfAttributes(relation);
/*
- * Make list of indexes. Ignore indexes on system catalogs if told to.
+ * Make list of indexes. Ignore indexes on system catalogs if told
+ * to.
*/
if (IsIgnoringSystemIndexes() && IsSystemClass(relation->rd_rel))
hasindex = false;
@@ -99,8 +100,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->ncolumns = ncolumns = index->indnatts;
/*
- * Need to make classlist and ordering arrays large enough to put
- * a terminating 0 at the end of each one.
+ * Need to make classlist and ordering arrays large enough to
+ * put a terminating 0 at the end of each one.
*/
info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
info->classlist = (Oid *) palloc0(sizeof(Oid) * (ncolumns + 1));
@@ -118,7 +119,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
info->amcostestimate = index_cost_estimator(indexRelation);
/*
- * Fetch the ordering operators associated with the index, if any.
+ * Fetch the ordering operators associated with the index, if
+ * any.
*/
amorderstrategy = indexRelation->rd_am->amorderstrategy;
if (amorderstrategy != 0)
@@ -135,8 +137,8 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Fetch the index expressions and predicate, if any. We must
* modify the copies we obtain from the relcache to have the
- * correct varno for the parent relation, so that they match up
- * correctly against qual clauses.
+ * correct varno for the parent relation, so that they match
+ * up correctly against qual clauses.
*/
info->indexprs = RelationGetIndexExpressions(indexRelation);
info->indpred = RelationGetIndexPredicate(indexRelation);
@@ -177,7 +179,7 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
* Exception: if there are any dropped columns, we punt and return NIL.
* Ideally we would like to handle the dropped-column case too. However this
* creates problems for ExecTypeFromTL, which may be asked to build a tupdesc
- * for a tlist that includes vars of no-longer-existent types. In theory we
+ * for a tlist that includes vars of no-longer-existent types. In theory we
* could dig out the required info from the pg_attribute entries of the
* relation, but that data is not readily available to ExecTypeFromTL.
* For now, we don't apply the physical-tlist optimization when there are
@@ -389,11 +391,11 @@ has_unique_index(RelOptInfo *rel, AttrNumber attno)
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
/*
- * Note: ignore partial indexes, since they don't allow us to conclude
- * that all attr values are distinct. We don't take any interest in
- * expressional indexes either. Also, a multicolumn unique index
- * doesn't allow us to conclude that just the specified attr is
- * unique.
+ * Note: ignore partial indexes, since they don't allow us to
+ * conclude that all attr values are distinct. We don't take any
+ * interest in expressional indexes either. Also, a multicolumn
+ * unique index doesn't allow us to conclude that just the
+ * specified attr is unique.
*/
if (index->unique &&
index->ncolumns == 1 &&
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 9a4a2069765..cc8632899e8 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/relnode.c,v 1.50 2003/07/25 00:01:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/relnode.c,v 1.51 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -322,7 +322,8 @@ build_join_rel(Query *root,
/*
* Create a new tlist containing just the vars that need to be output
- * from this join (ie, are needed for higher joinclauses or final output).
+ * from this join (ie, are needed for higher joinclauses or final
+ * output).
*/
build_joinrel_tlist(root, joinrel);
@@ -389,8 +390,8 @@ build_joinrel_tlist(Query *root, RelOptInfo *joinrel)
foreach(vars, FastListValue(&baserel->reltargetlist))
{
- Var *var = (Var *) lfirst(vars);
- int ndx = var->varattno - baserel->min_attr;
+ Var *var = (Var *) lfirst(vars);
+ int ndx = var->varattno - baserel->min_attr;
if (bms_nonempty_difference(baserel->attr_needed[ndx], relids))
{
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 334fc5784cf..673e76c8c31 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.17 2003/06/15 22:51:45 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.18 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -21,9 +21,9 @@
static bool join_clause_is_redundant(Query *root,
- RestrictInfo *rinfo,
- List *reference_list,
- JoinType jointype);
+ RestrictInfo *rinfo,
+ List *reference_list,
+ JoinType jointype);
/*
@@ -174,7 +174,7 @@ select_nonredundant_join_clauses(Query *root,
* left and right pathkeys, which uniquely identify the sets of equijoined
* variables in question. All the members of a pathkey set that are in the
* left relation have already been forced to be equal; likewise for those in
- * the right relation. So, we need to have only one clause that checks
+ * the right relation. So, we need to have only one clause that checks
* equality between any set member on the left and any member on the right;
* by transitivity, all the rest are then equal.
*
@@ -228,10 +228,9 @@ join_clause_is_redundant(Query *root,
if (redundant)
{
/*
- * It looks redundant, now check for "var = const" case.
- * If left_relids/right_relids are set, then there are
- * definitely vars on both sides; else we must check the
- * hard way.
+ * It looks redundant, now check for "var = const" case. If
+ * left_relids/right_relids are set, then there are definitely
+ * vars on both sides; else we must check the hard way.
*/
if (rinfo->left_relids)
return true; /* var = var, so redundant */
diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c
index 26d7d6fb245..10568371425 100644
--- a/src/backend/optimizer/util/tlist.c
+++ b/src/backend/optimizer/util/tlist.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/tlist.c,v 1.58 2003/07/25 00:01:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/tlist.c,v 1.59 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -211,8 +211,8 @@ get_sortgroupclause_expr(SortClause *sortClause, List *targetList)
List *
get_sortgrouplist_exprs(List *sortClauses, List *targetList)
{
- List *result = NIL;
- List *l;
+ List *result = NIL;
+ List *l;
foreach(l, sortClauses)
{
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index bdd5baf521a..edbb5f085cd 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/util/var.c,v 1.51 2003/06/06 15:04:02 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/util/var.c,v 1.52 2003/08/04 00:43:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@ typedef struct
{
int min_varlevel;
int sublevels_up;
-} find_minimum_var_level_context;
+} find_minimum_var_level_context;
typedef struct
{
@@ -61,7 +61,7 @@ static bool contain_var_clause_walker(Node *node, void *context);
static bool contain_vars_of_level_walker(Node *node, int *sublevels_up);
static bool contain_vars_above_level_walker(Node *node, int *sublevels_up);
static bool find_minimum_var_level_walker(Node *node,
- find_minimum_var_level_context *context);
+ find_minimum_var_level_context * context);
static bool pull_var_clause_walker(Node *node,
pull_var_clause_context *context);
static Node *flatten_join_alias_vars_mutator(Node *node,
@@ -242,12 +242,12 @@ contain_var_clause_walker(Node *node, void *context)
*
* Returns true if any such Var found.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
bool
contain_vars_of_level(Node *node, int levelsup)
{
- int sublevels_up = levelsup;
+ int sublevels_up = levelsup;
return query_or_expression_tree_walker(node,
contain_vars_of_level_walker,
@@ -286,20 +286,20 @@ contain_vars_of_level_walker(Node *node, int *sublevels_up)
/*
* contain_vars_above_level
* Recursively scan a clause to discover whether it contains any Var nodes
- * above the specified query level. (For example, pass zero to detect
+ * above the specified query level. (For example, pass zero to detect
* all nonlocal Vars.)
*
* Returns true if any such Var found.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
bool
contain_vars_above_level(Node *node, int levelsup)
{
- int sublevels_up = levelsup;
+ int sublevels_up = levelsup;
return query_or_expression_tree_walker(node,
- contain_vars_above_level_walker,
+ contain_vars_above_level_walker,
(void *) &sublevels_up,
0);
}
@@ -344,7 +344,7 @@ contain_vars_above_level_walker(Node *node, int *sublevels_up)
*
* -1 is returned if the clause has no variables at all.
*
- * Will recurse into sublinks. Also, may be invoked directly on a Query.
+ * Will recurse into sublinks. Also, may be invoked directly on a Query.
*/
int
find_minimum_var_level(Node *node)
@@ -364,13 +364,13 @@ find_minimum_var_level(Node *node)
static bool
find_minimum_var_level_walker(Node *node,
- find_minimum_var_level_context *context)
+ find_minimum_var_level_context * context)
{
if (node == NULL)
return false;
if (IsA(node, Var))
{
- int varlevelsup = ((Var *) node)->varlevelsup;
+ int varlevelsup = ((Var *) node)->varlevelsup;
/* convert levelsup to frame of reference of original query */
varlevelsup -= context->sublevels_up;
@@ -381,6 +381,7 @@ find_minimum_var_level_walker(Node *node,
context->min_varlevel > varlevelsup)
{
context->min_varlevel = varlevelsup;
+
/*
* As soon as we find a local variable, we can abort the
* tree traversal, since min_varlevel is then certainly 0.
@@ -390,14 +391,16 @@ find_minimum_var_level_walker(Node *node,
}
}
}
+
/*
- * An Aggref must be treated like a Var of its level. Normally we'd get
- * the same result from looking at the Vars in the aggregate's argument,
- * but this fails in the case of a Var-less aggregate call (COUNT(*)).
+ * An Aggref must be treated like a Var of its level. Normally we'd
+ * get the same result from looking at the Vars in the aggregate's
+ * argument, but this fails in the case of a Var-less aggregate call
+ * (COUNT(*)).
*/
if (IsA(node, Aggref))
{
- int agglevelsup = ((Aggref *) node)->agglevelsup;
+ int agglevelsup = ((Aggref *) node)->agglevelsup;
/* convert levelsup to frame of reference of original query */
agglevelsup -= context->sublevels_up;
@@ -408,6 +411,7 @@ find_minimum_var_level_walker(Node *node,
context->min_varlevel > agglevelsup)
{
context->min_varlevel = agglevelsup;
+
/*
* As soon as we find a local aggregate, we can abort the
* tree traversal, since min_varlevel is then certainly 0.
@@ -519,6 +523,7 @@ flatten_join_alias_vars_mutator(Node *node,
Assert(var->varattno > 0);
/* Okay, must expand it */
newvar = (Node *) nth(var->varattno - 1, rte->joinaliasvars);
+
/*
* If we are expanding an alias carried down from an upper query,
* must adjust its varlevelsup fields.
@@ -534,11 +539,11 @@ flatten_join_alias_vars_mutator(Node *node,
if (IsA(node, InClauseInfo))
{
/* Copy the InClauseInfo node with correct mutation of subnodes */
- InClauseInfo *ininfo;
+ InClauseInfo *ininfo;
ininfo = (InClauseInfo *) expression_tree_mutator(node,
- flatten_join_alias_vars_mutator,
- (void *) context);
+ flatten_join_alias_vars_mutator,
+ (void *) context);
/* now fix InClauseInfo's relid sets */
if (context->sublevels_up == 0)
{
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index ef8fa510abd..3cbdcc6231d 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.283 2003/08/01 00:15:22 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.284 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -89,7 +89,7 @@ typedef struct
{
Oid *paramTypes;
int numParams;
-} check_parameter_resolution_context;
+} check_parameter_resolution_context;
static List *do_parse_analyze(Node *parseTree, ParseState *pstate);
@@ -106,7 +106,7 @@ static Query *transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt);
static Node *transformSetOperationTree(ParseState *pstate, SelectStmt *stmt);
static Query *transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt);
static Query *transformDeclareCursorStmt(ParseState *pstate,
- DeclareCursorStmt *stmt);
+ DeclareCursorStmt * stmt);
static Query *transformPrepareStmt(ParseState *pstate, PrepareStmt *stmt);
static Query *transformExecuteStmt(ParseState *pstate, ExecuteStmt *stmt);
static Query *transformCreateStmt(ParseState *pstate, CreateStmt *stmt,
@@ -114,18 +114,18 @@ static Query *transformCreateStmt(ParseState *pstate, CreateStmt *stmt,
static Query *transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
List **extras_before, List **extras_after);
static void transformColumnDefinition(ParseState *pstate,
- CreateStmtContext *cxt,
- ColumnDef *column);
+ CreateStmtContext *cxt,
+ ColumnDef *column);
static void transformTableConstraint(ParseState *pstate,
- CreateStmtContext *cxt,
- Constraint *constraint);
+ CreateStmtContext *cxt,
+ Constraint *constraint);
static void transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
- InhRelation *inhrelation);
+ InhRelation * inhrelation);
static void transformIndexConstraints(ParseState *pstate,
- CreateStmtContext *cxt);
+ CreateStmtContext *cxt);
static void transformFKConstraints(ParseState *pstate,
- CreateStmtContext *cxt,
- bool isAddConstraint);
+ CreateStmtContext *cxt,
+ bool isAddConstraint);
static void applyColumnNames(List *dst, List *src);
static List *getSetColTypes(ParseState *pstate, Node *node);
static void transformForUpdate(Query *qry, List *forUpdate);
@@ -135,7 +135,7 @@ static bool relationHasPrimaryKey(Oid relationOid);
static void release_pstate_resources(ParseState *pstate);
static FromExpr *makeFromExpr(List *fromlist, Node *quals);
static bool check_parameter_resolution_walker(Node *node,
- check_parameter_resolution_context *context);
+ check_parameter_resolution_context * context);
/*
@@ -229,6 +229,7 @@ static List *
do_parse_analyze(Node *parseTree, ParseState *pstate)
{
List *result = NIL;
+
/* Lists to return extra commands from transformation */
List *extras_before = NIL;
List *extras_after = NIL;
@@ -258,9 +259,10 @@ do_parse_analyze(Node *parseTree, ParseState *pstate)
/*
* Make sure that only the original query is marked original. We have
- * to do this explicitly since recursive calls of do_parse_analyze will
- * have marked some of the added-on queries as "original". Also mark
- * only the original query as allowed to set the command-result tag.
+ * to do this explicitly since recursive calls of do_parse_analyze
+ * will have marked some of the added-on queries as "original". Also
+ * mark only the original query as allowed to set the command-result
+ * tag.
*/
foreach(listscan, result)
{
@@ -419,7 +421,7 @@ transformStmt(ParseState *pstate, Node *parseTree,
case T_DeclareCursorStmt:
result = transformDeclareCursorStmt(pstate,
- (DeclareCursorStmt *) parseTree);
+ (DeclareCursorStmt *) parseTree);
break;
default:
@@ -593,10 +595,10 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
*
* HACK: unknown-type constants and params in the INSERT's targetlist
* are copied up as-is rather than being referenced as subquery
- * outputs. This is to ensure that when we try to coerce them
- * to the target column's datatype, the right things happen (see
- * special cases in coerce_type). Otherwise, this fails:
- * INSERT INTO foo SELECT 'bar', ... FROM baz
+ * outputs. This is to ensure that when we try to coerce them to
+ * the target column's datatype, the right things happen (see
+ * special cases in coerce_type). Otherwise, this fails: INSERT
+ * INTO foo SELECT 'bar', ... FROM baz
*/
qry->targetList = NIL;
foreach(tl, selectQuery->targetList)
@@ -608,7 +610,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
if (resnode->resjunk)
continue;
if (tle->expr &&
- (IsA(tle->expr, Const) || IsA(tle->expr, Param)) &&
+ (IsA(tle->expr, Const) ||IsA(tle->expr, Param)) &&
exprType((Node *) tle->expr) == UNKNOWNOID)
expr = tle->expr;
else
@@ -661,7 +663,7 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
if (icolumns == NIL || attnos == NIL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("INSERT has more expressions than target columns")));
+ errmsg("INSERT has more expressions than target columns")));
col = (ResTarget *) lfirst(icolumns);
Assert(IsA(col, ResTarget));
@@ -675,15 +677,14 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
}
/*
- * Ensure that the targetlist has the same number of entries that
- * were present in the columns list. Don't do the check unless
- * an explicit columns list was given, though.
- * statements.
+ * Ensure that the targetlist has the same number of entries that were
+ * present in the columns list. Don't do the check unless an explicit
+ * columns list was given, though. statements.
*/
if (stmt->cols != NIL && (icolumns != NIL || attnos != NIL))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("INSERT has more target columns than expressions")));
+ errmsg("INSERT has more target columns than expressions")));
/* done building the range table and jointree */
qry->rtable = pstate->p_rtable;
@@ -1054,7 +1055,7 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting NULL/NOT NULL declarations for \"%s.%s\"",
- cxt->relation->relname, column->colname)));
+ cxt->relation->relname, column->colname)));
column->is_not_null = FALSE;
saw_nullable = true;
break;
@@ -1064,7 +1065,7 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting NULL/NOT NULL declarations for \"%s.%s\"",
- cxt->relation->relname, column->colname)));
+ cxt->relation->relname, column->colname)));
column->is_not_null = TRUE;
saw_nullable = true;
break;
@@ -1074,7 +1075,7 @@ transformColumnDefinition(ParseState *pstate, CreateStmtContext *cxt,
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("multiple DEFAULT values specified for \"%s.%s\"",
- cxt->relation->relname, column->colname)));
+ cxt->relation->relname, column->colname)));
column->raw_default = constraint->raw_expr;
Assert(constraint->cooked_expr == NULL);
break;
@@ -1170,7 +1171,7 @@ transformTableConstraint(ParseState *pstate, CreateStmtContext *cxt,
*/
static void
transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
- InhRelation *inhRelation)
+ InhRelation * inhRelation)
{
AttrNumber parent_attno;
@@ -1188,7 +1189,7 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
inhRelation->relation->relname)));
/*
- * Check for SELECT privilages
+ * Check for SELECT privilages
*/
aclresult = pg_class_aclcheck(RelationGetRelid(relation), GetUserId(),
ACL_SELECT);
@@ -1200,8 +1201,8 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
constr = tupleDesc->constr;
/*
- * Insert the inherited attributes into the cxt for the
- * new table definition.
+ * Insert the inherited attributes into the cxt for the new table
+ * definition.
*/
for (parent_attno = 1; parent_attno <= tupleDesc->natts;
parent_attno++)
@@ -1220,8 +1221,8 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
/*
* Create a new inherited column.
*
- * For constraints, ONLY the NOT NULL constraint is inherited
- * by the new column definition per SQL99.
+ * For constraints, ONLY the NOT NULL constraint is inherited by the
+ * new column definition per SQL99.
*/
def = makeNode(ColumnDef);
def->colname = pstrdup(attributeName);
@@ -1265,9 +1266,8 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
Assert(this_default != NULL);
/*
- * If default expr could contain any vars, we'd need to
- * fix 'em, but it can't; so default is ready to apply to
- * child.
+ * If default expr could contain any vars, we'd need to fix
+ * 'em, but it can't; so default is ready to apply to child.
*/
def->cooked_default = pstrdup(this_default);
@@ -1275,9 +1275,9 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
}
/*
- * Close the parent rel, but keep our AccessShareLock on it until
- * xact commit. That will prevent someone else from deleting or
- * ALTERing the parent before the child is committed.
+ * Close the parent rel, but keep our AccessShareLock on it until xact
+ * commit. That will prevent someone else from deleting or ALTERing
+ * the parent before the child is committed.
*/
heap_close(relation, NoLock);
}
@@ -1340,8 +1340,8 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
/*
* Make sure referenced keys exist. If we are making a PRIMARY
* KEY index, also make sure they are NOT NULL, if possible.
- * (Although we could leave it to DefineIndex to mark the columns NOT
- * NULL, it's more efficient to get it right the first time.)
+ * (Although we could leave it to DefineIndex to mark the columns
+ * NOT NULL, it's more efficient to get it right the first time.)
*/
foreach(keys, constraint->keys)
{
@@ -1390,8 +1390,8 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
if (rel->rd_rel->relkind != RELKIND_RELATION)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("inherited table \"%s\" is not a relation",
- inh->relname)));
+ errmsg("inherited table \"%s\" is not a relation",
+ inh->relname)));
for (count = 0; count < rel->rd_att->natts; count++)
{
Form_pg_attribute inhattr = rel->rd_att->attrs[count];
@@ -1402,11 +1402,13 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
if (strcmp(key, inhname) == 0)
{
found = true;
+
/*
* We currently have no easy way to force an
- * inherited column to be NOT NULL at creation, if
- * its parent wasn't so already. We leave it to
- * DefineIndex to fix things up in this case.
+ * inherited column to be NOT NULL at
+ * creation, if its parent wasn't so already.
+ * We leave it to DefineIndex to fix things up
+ * in this case.
*/
break;
}
@@ -1425,9 +1427,10 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
if (HeapTupleIsValid(atttuple))
{
found = true;
+
/*
- * If it's not already NOT NULL, leave it to DefineIndex
- * to fix later.
+ * If it's not already NOT NULL, leave it to
+ * DefineIndex to fix later.
*/
ReleaseSysCache(atttuple);
}
@@ -1436,8 +1439,8 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
if (!found)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- key)));
+ errmsg("column \"%s\" named in key does not exist",
+ key)));
/* Check for PRIMARY KEY(foo, foo) */
foreach(columns, index->indexParams)
@@ -1446,10 +1449,10 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
if (iparam->name && strcmp(key, iparam->name) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- /* translator: second %s is PRIMARY KEY or UNIQUE */
- errmsg("column \"%s\" appears twice in %s constraint",
- key,
- index->primary ? "PRIMARY KEY" : "UNIQUE")));
+ /* translator: second %s is PRIMARY KEY or UNIQUE */
+ errmsg("column \"%s\" appears twice in %s constraint",
+ key,
+ index->primary ? "PRIMARY KEY" : "UNIQUE")));
}
/* OK, add it to the index definition */
@@ -1543,7 +1546,7 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
ereport(NOTICE,
(errmsg("%s / %s%s will create implicit index \"%s\" for table \"%s\"",
cxt->stmtType,
- (strcmp(cxt->stmtType, "ALTER TABLE") == 0) ? "ADD " : "",
+ (strcmp(cxt->stmtType, "ALTER TABLE") == 0) ? "ADD " : "",
(index->primary ? "PRIMARY KEY" : "UNIQUE"),
index->idxname, cxt->relation->relname)));
}
@@ -1557,13 +1560,13 @@ transformFKConstraints(ParseState *pstate, CreateStmtContext *cxt,
return;
ereport(NOTICE,
- (errmsg("%s will create implicit trigger(s) for FOREIGN KEY check(s)",
- cxt->stmtType)));
+ (errmsg("%s will create implicit trigger(s) for FOREIGN KEY check(s)",
+ cxt->stmtType)));
/*
* For ALTER TABLE ADD CONSTRAINT, nothing to do. For CREATE TABLE or
- * ALTER TABLE ADD COLUMN, gin up an ALTER TABLE ADD CONSTRAINT command
- * to execute after the basic command is complete.
+ * ALTER TABLE ADD COLUMN, gin up an ALTER TABLE ADD CONSTRAINT
+ * command to execute after the basic command is complete.
*
* Note: the ADD CONSTRAINT command must also execute after any index
* creation commands. Thus, this should run after
@@ -1575,7 +1578,7 @@ transformFKConstraints(ParseState *pstate, CreateStmtContext *cxt,
AlterTableStmt *alterstmt = makeNode(AlterTableStmt);
List *fkclist;
- alterstmt->subtype = 'c'; /* preprocessed add constraint */
+ alterstmt->subtype = 'c'; /* preprocessed add constraint */
alterstmt->relation = cxt->relation;
alterstmt->name = NULL;
alterstmt->def = (Node *) cxt->fkconstraints;
@@ -1628,7 +1631,7 @@ transformIndexStmt(ParseState *pstate, IndexStmt *stmt)
/* take care of any index expressions */
foreach(l, stmt->indexParams)
{
- IndexElem *ielem = (IndexElem *) lfirst(l);
+ IndexElem *ielem = (IndexElem *) lfirst(l);
if (ielem->expr)
{
@@ -1641,6 +1644,7 @@ transformIndexStmt(ParseState *pstate, IndexStmt *stmt)
addRTEtoQuery(pstate, rte, false, true);
}
ielem->expr = transformExpr(pstate, ielem->expr);
+
/*
* We check only that the result type is legitimate; this is
* for consistency with what transformWhereClause() checks for
@@ -1649,7 +1653,7 @@ transformIndexStmt(ParseState *pstate, IndexStmt *stmt)
if (expression_returns_set(ielem->expr))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("index expression may not return a set")));
+ errmsg("index expression may not return a set")));
}
}
@@ -1829,10 +1833,10 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
sub_qry = getInsertSelectQuery(top_subqry, NULL);
/*
- * If the sub_qry is a setop, we cannot attach any qualifications
- * to it, because the planner won't notice them. This could
- * perhaps be relaxed someday, but for now, we may as well reject
- * such a rule immediately.
+ * If the sub_qry is a setop, we cannot attach any
+ * qualifications to it, because the planner won't notice
+ * them. This could perhaps be relaxed someday, but for now,
+ * we may as well reject such a rule immediately.
*/
if (sub_qry->setOperations != NULL && stmt->whereClause != NULL)
ereport(ERROR,
@@ -1854,12 +1858,12 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
case CMD_SELECT:
if (has_old)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON SELECT rule may not use OLD")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON SELECT rule may not use OLD")));
if (has_new)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON SELECT rule may not use NEW")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON SELECT rule may not use NEW")));
break;
case CMD_UPDATE:
/* both are OK */
@@ -1867,14 +1871,14 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
case CMD_INSERT:
if (has_old)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON INSERT rule may not use OLD")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON INSERT rule may not use OLD")));
break;
case CMD_DELETE:
if (has_new)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("ON DELETE rule may not use NEW")));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("ON DELETE rule may not use NEW")));
break;
default:
elog(ERROR, "unrecognized event type: %d",
@@ -1902,9 +1906,10 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
if (has_old || (has_new && stmt->event == CMD_UPDATE))
{
/*
- * If sub_qry is a setop, manipulating its jointree will do
- * no good at all, because the jointree is dummy. (This
- * should be a can't-happen case because of prior tests.)
+ * If sub_qry is a setop, manipulating its jointree will
+ * do no good at all, because the jointree is dummy.
+ * (This should be a can't-happen case because of prior
+ * tests.)
*/
if (sub_qry->setOperations != NULL)
ereport(ERROR,
@@ -1978,7 +1983,7 @@ transformSelectStmt(ParseState *pstate, SelectStmt *stmt)
qry->sortClause = transformSortClause(pstate,
stmt->sortClause,
qry->targetList,
- true /* fix unknowns */);
+ true /* fix unknowns */ );
qry->groupClause = transformGroupClause(pstate,
stmt->groupClause,
@@ -2107,10 +2112,10 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
* make lists of the dummy vars and their names for use in parsing
* ORDER BY.
*
- * Note: we use leftmostRTI as the varno of the dummy variables.
- * It shouldn't matter too much which RT index they have, as long
- * as they have one that corresponds to a real RT entry; else funny
- * things may happen when the tree is mashed by rule rewriting.
+ * Note: we use leftmostRTI as the varno of the dummy variables. It
+ * shouldn't matter too much which RT index they have, as long as they
+ * have one that corresponds to a real RT entry; else funny things may
+ * happen when the tree is mashed by rule rewriting.
*/
qry->targetList = NIL;
targetvars = NIL;
@@ -2144,8 +2149,8 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* Handle SELECT INTO/CREATE TABLE AS.
*
- * Any column names from CREATE TABLE AS need to be attached to both
- * the top level and the leftmost subquery. We do not do this earlier
+ * Any column names from CREATE TABLE AS need to be attached to both the
+ * top level and the leftmost subquery. We do not do this earlier
* because we do *not* want the targetnames list to be affected.
*/
qry->into = into;
@@ -2192,7 +2197,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
qry->sortClause = transformSortClause(pstate,
sortClause,
qry->targetList,
- false /* no unknowns expected */);
+ false /* no unknowns expected */ );
pstate->p_namespace = sv_namespace;
pstate->p_rtable = sv_rtable;
@@ -2290,9 +2295,9 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
/*
* Check for bogus references to Vars on the current query level
- * (but upper-level references are okay).
- * Normally this can't happen because the namespace will be empty,
- * but it could happen if we are inside a rule.
+ * (but upper-level references are okay). Normally this can't
+ * happen because the namespace will be empty, but it could happen
+ * if we are inside a rule.
*/
if (pstate->p_namespace)
{
@@ -2352,8 +2357,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
if (length(lcoltypes) != length(rcoltypes))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("each %s query must have the same number of columns",
- context)));
+ errmsg("each %s query must have the same number of columns",
+ context)));
op->colTypes = NIL;
while (lcoltypes != NIL)
{
@@ -2422,7 +2427,7 @@ applyColumnNames(List *dst, List *src)
if (length(src) > length(dst))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("CREATE TABLE AS specifies too many column names")));
+ errmsg("CREATE TABLE AS specifies too many column names")));
while (src != NIL && dst != NIL)
{
@@ -2538,8 +2543,8 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
* handling are 'A'dd column and Add 'C'onstraint. These largely
* re-use code from CREATE TABLE.
*
- * If we need to do any parse transformation, get exclusive lock on
- * the relation to make sure it won't change before we execute the
+ * If we need to do any parse transformation, get exclusive lock on the
+ * relation to make sure it won't change before we execute the
* command.
*/
switch (stmt->subtype)
@@ -2574,7 +2579,7 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
*extras_before = nconc(*extras_before, cxt.blist);
*extras_after = nconc(cxt.alist, *extras_after);
- heap_close(rel, NoLock); /* close rel, keep lock */
+ heap_close(rel, NoLock); /* close rel, keep lock */
break;
case 'C':
@@ -2614,7 +2619,7 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
*extras_before = nconc(*extras_before, cxt.blist);
*extras_after = nconc(cxt.alist, *extras_after);
- heap_close(rel, NoLock); /* close rel, keep lock */
+ heap_close(rel, NoLock); /* close rel, keep lock */
break;
case 'c':
@@ -2638,7 +2643,7 @@ transformAlterTableStmt(ParseState *pstate, AlterTableStmt *stmt,
}
static Query *
-transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt *stmt)
+transformDeclareCursorStmt(ParseState *pstate, DeclareCursorStmt * stmt)
{
Query *result = makeNode(Query);
List *extras_before = NIL,
@@ -2672,7 +2677,7 @@ transformPrepareStmt(ParseState *pstate, PrepareStmt *stmt)
{
Query *result = makeNode(Query);
List *argtype_oids = NIL; /* argtype OIDs in a list */
- Oid *argtoids = NULL; /* and as an array */
+ Oid *argtoids = NULL; /* and as an array */
int nargs;
List *queries;
@@ -2757,11 +2762,11 @@ transformExecuteStmt(ParseState *pstate, ExecuteStmt *stmt)
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use sub-select in EXECUTE parameter")));
+ errmsg("cannot use sub-select in EXECUTE parameter")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate in EXECUTE parameter")));
+ errmsg("cannot use aggregate in EXECUTE parameter")));
given_type_id = exprType(expr);
expected_type_id = lfirsto(paramtypes);
@@ -2801,15 +2806,15 @@ CheckSelectForUpdate(Query *qry)
if (qry->distinctClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE is not allowed with DISTINCT clause")));
+ errmsg("SELECT FOR UPDATE is not allowed with DISTINCT clause")));
if (qry->groupClause != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE is not allowed with GROUP BY clause")));
+ errmsg("SELECT FOR UPDATE is not allowed with GROUP BY clause")));
if (qry->hasAggs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE is not allowed with AGGREGATE")));
+ errmsg("SELECT FOR UPDATE is not allowed with AGGREGATE")));
}
static void
@@ -2915,7 +2920,7 @@ relationHasPrimaryKey(Oid relationOid)
indexTuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexoid),
0, 0, 0);
- if (!HeapTupleIsValid(indexTuple)) /* should not happen */
+ if (!HeapTupleIsValid(indexTuple)) /* should not happen */
elog(ERROR, "cache lookup failed for index %u", indexoid);
result = ((Form_pg_index) GETSTRUCT(indexTuple))->indisprimary;
ReleaseSysCache(indexTuple);
@@ -2981,7 +2986,7 @@ transformConstraintAttrs(List *constraintList)
!IsA(lastprimarynode, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("misplaced NOT DEFERRABLE clause")));
+ errmsg("misplaced NOT DEFERRABLE clause")));
if (saw_deferrability)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
@@ -2999,7 +3004,7 @@ transformConstraintAttrs(List *constraintList)
!IsA(lastprimarynode, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("misplaced INITIALLY DEFERRED clause")));
+ errmsg("misplaced INITIALLY DEFERRED clause")));
if (saw_initially)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
@@ -3023,7 +3028,7 @@ transformConstraintAttrs(List *constraintList)
!IsA(lastprimarynode, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("misplaced INITIALLY IMMEDIATE clause")));
+ errmsg("misplaced INITIALLY IMMEDIATE clause")));
if (saw_initially)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
@@ -3135,10 +3140,10 @@ analyzeCreateSchemaStmt(CreateSchemaStmt *stmt)
elp->relation->schemaname = cxt.schemaname;
else if (strcmp(cxt.schemaname, elp->relation->schemaname) != 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_SCHEMA_DEFINITION),
- errmsg("CREATE specifies a schema (%s)"
- " different from the one being created (%s)",
- elp->relation->schemaname, cxt.schemaname)));
+ (errcode(ERRCODE_INVALID_SCHEMA_DEFINITION),
+ errmsg("CREATE specifies a schema (%s)"
+ " different from the one being created (%s)",
+ elp->relation->schemaname, cxt.schemaname)));
/*
* XXX todo: deal with constraints
@@ -3156,10 +3161,10 @@ analyzeCreateSchemaStmt(CreateSchemaStmt *stmt)
elp->view->schemaname = cxt.schemaname;
else if (strcmp(cxt.schemaname, elp->view->schemaname) != 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_SCHEMA_DEFINITION),
- errmsg("CREATE specifies a schema (%s)"
- " different from the one being created (%s)",
- elp->view->schemaname, cxt.schemaname)));
+ (errcode(ERRCODE_INVALID_SCHEMA_DEFINITION),
+ errmsg("CREATE specifies a schema (%s)"
+ " different from the one being created (%s)",
+ elp->view->schemaname, cxt.schemaname)));
/*
* XXX todo: deal with references between views
@@ -3195,7 +3200,7 @@ analyzeCreateSchemaStmt(CreateSchemaStmt *stmt)
*/
static bool
check_parameter_resolution_walker(Node *node,
- check_parameter_resolution_context *context)
+ check_parameter_resolution_context * context)
{
if (node == NULL)
return false;
@@ -3207,17 +3212,17 @@ check_parameter_resolution_walker(Node *node,
{
int paramno = param->paramid;
- if (paramno <= 0 || /* shouldn't happen, but... */
+ if (paramno <= 0 || /* shouldn't happen, but... */
paramno > context->numParams)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PARAMETER),
errmsg("there is no parameter $%d", paramno)));
- if (param->paramtype != context->paramTypes[paramno-1])
+ if (param->paramtype != context->paramTypes[paramno - 1])
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
- errmsg("could not determine datatype of parameter $%d",
- paramno)));
+ errmsg("could not determine datatype of parameter $%d",
+ paramno)));
}
return false;
}
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 90eaf18bf5f..d9ef4ed74d8 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_agg.c,v 1.55 2003/07/19 20:20:52 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_agg.c,v 1.56 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,7 +44,7 @@ static bool check_ungrouped_columns_walker(Node *node,
* Finish initial transformation of an aggregate call
*
* parse_func.c has recognized the function as an aggregate, and has set
- * up all the fields of the Aggref except agglevelsup. Here we must
+ * up all the fields of the Aggref except agglevelsup. Here we must
* determine which query level the aggregate actually belongs to, set
* agglevelsup accordingly, and mark p_hasAggs true in the corresponding
* pstate level.
@@ -56,22 +56,22 @@ transformAggregateCall(ParseState *pstate, Aggref *agg)
/*
* The aggregate's level is the same as the level of the lowest-level
- * variable or aggregate in its argument; or if it contains no variables
- * at all, we presume it to be local.
+ * variable or aggregate in its argument; or if it contains no
+ * variables at all, we presume it to be local.
*/
min_varlevel = find_minimum_var_level((Node *) agg->target);
/*
* An aggregate can't directly contain another aggregate call of the
- * same level (though outer aggs are okay). We can skip this check
- * if we didn't find any local vars or aggs.
+ * same level (though outer aggs are okay). We can skip this check if
+ * we didn't find any local vars or aggs.
*/
if (min_varlevel == 0)
{
if (checkExprHasAggs((Node *) agg->target))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregate function calls may not be nested")));
+ errmsg("aggregate function calls may not be nested")));
}
if (min_varlevel < 0)
@@ -142,17 +142,17 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
if (checkExprHasAggs(expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("aggregates not allowed in GROUP BY clause")));
+ errmsg("aggregates not allowed in GROUP BY clause")));
groupClauses = lcons(expr, groupClauses);
if (!IsA(expr, Var))
have_non_var_grouping = true;
}
/*
- * If there are join alias vars involved, we have to flatten them
- * to the underlying vars, so that aliased and unaliased vars will be
- * correctly taken as equal. We can skip the expense of doing this
- * if no rangetable entries are RTE_JOIN kind.
+ * If there are join alias vars involved, we have to flatten them to
+ * the underlying vars, so that aliased and unaliased vars will be
+ * correctly taken as equal. We can skip the expense of doing this if
+ * no rangetable entries are RTE_JOIN kind.
*/
hasJoinRTEs = false;
foreach(lst, pstate->p_rtable)
@@ -168,7 +168,7 @@ parseCheckAggregates(ParseState *pstate, Query *qry)
if (hasJoinRTEs)
groupClauses = (List *) flatten_join_alias_vars(qry,
- (Node *) groupClauses);
+ (Node *) groupClauses);
/*
* Check the targetlist and HAVING clause for ungrouped variables.
@@ -235,22 +235,22 @@ check_ungrouped_columns_walker(Node *node,
/*
* If we find an aggregate call of the original level, do not recurse
- * into its arguments; ungrouped vars in the arguments are not an error.
- * We can also skip looking at the arguments of aggregates of higher
- * levels, since they could not possibly contain Vars that are of concern
- * to us (see transformAggregateCall). We do need to look into the
- * arguments of aggregates of lower levels, however.
+ * into its arguments; ungrouped vars in the arguments are not an
+ * error. We can also skip looking at the arguments of aggregates of
+ * higher levels, since they could not possibly contain Vars that are
+ * of concern to us (see transformAggregateCall). We do need to look
+ * into the arguments of aggregates of lower levels, however.
*/
if (IsA(node, Aggref) &&
(int) ((Aggref *) node)->agglevelsup >= context->sublevels_up)
return false;
/*
- * If we have any GROUP BY items that are not simple Vars,
- * check to see if subexpression as a whole matches any GROUP BY item.
- * We need to do this at every recursion level so that we recognize
- * GROUPed-BY expressions before reaching variables within them.
- * But this only works at the outer query level, as noted above.
+ * If we have any GROUP BY items that are not simple Vars, check to
+ * see if subexpression as a whole matches any GROUP BY item. We need
+ * to do this at every recursion level so that we recognize GROUPed-BY
+ * expressions before reaching variables within them. But this only
+ * works at the outer query level, as noted above.
*/
if (context->have_non_var_grouping && context->sublevels_up == 0)
{
@@ -264,7 +264,7 @@ check_ungrouped_columns_walker(Node *node,
/*
* If we have an ungrouped Var of the original query level, we have a
* failure. Vars below the original query level are not a problem,
- * and neither are Vars from above it. (If such Vars are ungrouped as
+ * and neither are Vars from above it. (If such Vars are ungrouped as
* far as their own query level is concerned, that's someone else's
* problem...)
*/
@@ -276,6 +276,7 @@ check_ungrouped_columns_walker(Node *node,
if (var->varlevelsup != context->sublevels_up)
return false; /* it's not local to my query, ignore */
+
/*
* Check for a match, if we didn't do it above.
*/
@@ -283,13 +284,13 @@ check_ungrouped_columns_walker(Node *node,
{
foreach(gl, context->groupClauses)
{
- Var *gvar = (Var *) lfirst(gl);
+ Var *gvar = (Var *) lfirst(gl);
if (IsA(gvar, Var) &&
gvar->varno == var->varno &&
gvar->varattno == var->varattno &&
gvar->varlevelsup == 0)
- return false; /* acceptable, we're okay */
+ return false; /* acceptable, we're okay */
}
}
@@ -365,10 +366,10 @@ build_aggregate_fnexprs(Oid agg_input_type,
(void) get_func_signature(transfn_oid, transfn_arg_types, &transfn_nargs);
/*
- * Build arg list to use in the transfn FuncExpr node. We really
- * only care that transfn can discover the actual argument types
- * at runtime using get_fn_expr_argtype(), so it's okay to use
- * Param nodes that don't correspond to any real Param.
+ * Build arg list to use in the transfn FuncExpr node. We really only
+ * care that transfn can discover the actual argument types at runtime
+ * using get_fn_expr_argtype(), so it's okay to use Param nodes that
+ * don't correspond to any real Param.
*/
arg0 = makeNode(Param);
arg0->paramkind = PARAM_EXEC;
@@ -385,33 +386,31 @@ build_aggregate_fnexprs(Oid agg_input_type,
args = makeList2(arg0, arg1);
}
else
- {
args = makeList1(arg0);
+
+ *transfnexpr = (Expr *) makeFuncExpr(transfn_oid,
+ agg_state_type,
+ args,
+ COERCE_DONTCARE);
+
+ /* see if we have a final function */
+ if (!OidIsValid(finalfn_oid))
+ {
+ *finalfnexpr = NULL;
+ return;
}
- *transfnexpr = (Expr *) makeFuncExpr(transfn_oid,
- agg_state_type,
- args,
- COERCE_DONTCARE);
-
- /* see if we have a final function */
- if (!OidIsValid(finalfn_oid))
- {
- *finalfnexpr = NULL;
- return;
- }
-
- /*
- * Build expr tree for final function
- */
+ /*
+ * Build expr tree for final function
+ */
arg0 = makeNode(Param);
arg0->paramkind = PARAM_EXEC;
arg0->paramid = -1;
arg0->paramtype = agg_state_type;
args = makeList1(arg0);
- *finalfnexpr = (Expr *) makeFuncExpr(finalfn_oid,
- agg_result_type,
- args,
- COERCE_DONTCARE);
+ *finalfnexpr = (Expr *) makeFuncExpr(finalfn_oid,
+ agg_result_type,
+ args,
+ COERCE_DONTCARE);
}
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 4b61d59b4a6..59cf2516999 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_clause.c,v 1.118 2003/07/19 20:20:52 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_clause.c,v 1.119 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,8 +60,8 @@ static Node *buildMergedJoinVar(ParseState *pstate, JoinType jointype,
static TargetEntry *findTargetlistEntry(ParseState *pstate, Node *node,
List *tlist, int clause);
static List *addTargetToSortList(ParseState *pstate, TargetEntry *tle,
- List *sortlist, List *targetlist,
- List *opname, bool resolveUnknown);
+ List *sortlist, List *targetlist,
+ List *opname, bool resolveUnknown);
/*
@@ -337,7 +337,7 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j,
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("JOIN/ON clause refers to \"%s\", which is not part of JOIN",
- rt_fetch(varno, pstate->p_rtable)->eref->aliasname)));
+ rt_fetch(varno, pstate->p_rtable)->eref->aliasname)));
}
}
bms_free(clause_varnos);
@@ -422,17 +422,19 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
errmsg("sub-select in FROM may not have SELECT INTO")));
/*
- * The subquery cannot make use of any variables from FROM items created
- * earlier in the current query. Per SQL92, the scope of a FROM item
- * does not include other FROM items. Formerly we hacked the namespace
- * so that the other variables weren't even visible, but it seems more
- * useful to leave them visible and give a specific error message.
+ * The subquery cannot make use of any variables from FROM items
+ * created earlier in the current query. Per SQL92, the scope of a
+ * FROM item does not include other FROM items. Formerly we hacked
+ * the namespace so that the other variables weren't even visible, but
+ * it seems more useful to leave them visible and give a specific
+ * error message.
*
* XXX this will need further work to support SQL99's LATERAL() feature,
* wherein such references would indeed be legal.
*
* We can skip groveling through the subquery if there's not anything
- * visible in the current query. Also note that outer references are OK.
+ * visible in the current query. Also note that outer references are
+ * OK.
*/
if (pstate->p_namespace)
{
@@ -482,9 +484,9 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r)
/*
* The function parameters cannot make use of any variables from other
- * FROM items. (Compare to transformRangeSubselect(); the coding is
- * different though because we didn't parse as a sub-select with its own
- * level of namespace.)
+ * FROM items. (Compare to transformRangeSubselect(); the coding is
+ * different though because we didn't parse as a sub-select with its
+ * own level of namespace.)
*
* XXX this will need further work to support SQL99's LATERAL() feature,
* wherein such references would indeed be legal.
@@ -1017,7 +1019,7 @@ transformLimitClause(ParseState *pstate, Node *clause,
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- /* translator: %s is name of a SQL construct, eg LIMIT */
+ /* translator: %s is name of a SQL construct, eg LIMIT */
errmsg("argument of %s must not contain variables",
constructName)));
}
@@ -1025,7 +1027,7 @@ transformLimitClause(ParseState *pstate, Node *clause,
{
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- /* translator: %s is name of a SQL construct, eg LIMIT */
+ /* translator: %s is name of a SQL construct, eg LIMIT */
errmsg("argument of %s must not contain aggregates",
constructName)));
}
@@ -1033,7 +1035,7 @@ transformLimitClause(ParseState *pstate, Node *clause,
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- /* translator: %s is name of a SQL construct, eg LIMIT */
+ /* translator: %s is name of a SQL construct, eg LIMIT */
errmsg("argument of %s must not contain sub-selects",
constructName)));
}
@@ -1135,7 +1137,11 @@ findTargetlistEntry(ParseState *pstate, Node *node, List *tlist, int clause)
if (!equal(target_result->expr, tle->expr))
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_COLUMN),
- /* translator: first %s is name of a SQL construct, eg ORDER BY */
+
+ /*
+ * translator: first %s is name of a SQL
+ * construct, eg ORDER BY
+ */
errmsg("%s \"%s\" is ambiguous",
clauseText[clause], name)));
}
@@ -1157,7 +1163,7 @@ findTargetlistEntry(ParseState *pstate, Node *node, List *tlist, int clause)
if (!IsA(val, Integer))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- /* translator: %s is name of a SQL construct, eg ORDER BY */
+ /* translator: %s is name of a SQL construct, eg ORDER BY */
errmsg("non-integer constant in %s",
clauseText[clause])));
target_pos = intVal(val);
@@ -1174,7 +1180,7 @@ findTargetlistEntry(ParseState *pstate, Node *node, List *tlist, int clause)
}
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- /* translator: %s is name of a SQL construct, eg ORDER BY */
+ /* translator: %s is name of a SQL construct, eg ORDER BY */
errmsg("%s position %d is not in target list",
clauseText[clause], target_pos)));
}
@@ -1250,10 +1256,10 @@ transformGroupClause(ParseState *pstate, List *grouplist,
/*
* If the GROUP BY clause matches the ORDER BY clause, we want to
* adopt the ordering operators from the latter rather than using
- * the default ops. This allows "GROUP BY foo ORDER BY foo DESC" to
- * be done with only one sort step. Note we are assuming that any
- * user-supplied ordering operator will bring equal values together,
- * which is all that GROUP BY needs.
+ * the default ops. This allows "GROUP BY foo ORDER BY foo DESC"
+ * to be done with only one sort step. Note we are assuming that
+ * any user-supplied ordering operator will bring equal values
+ * together, which is all that GROUP BY needs.
*/
if (sortClause &&
((SortClause *) lfirst(sortClause))->tleSortGroupRef ==
@@ -1422,7 +1428,7 @@ transformDistinctClause(ParseState *pstate, List *distinctlist,
break;
}
}
- if (slitem == NIL) /* should not happen */
+ if (slitem == NIL) /* should not happen */
elog(ERROR, "failed to add DISTINCT ON clause to target list");
}
}
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 351bc00c70f..9995fcc832b 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_coerce.c,v 2.106 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_coerce.c,v 2.107 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,8 +30,8 @@
static Node *coerce_type_typmod(Node *node,
- Oid targetTypeId, int32 targetTypMod,
- CoercionForm cformat, bool isExplicit);
+ Oid targetTypeId, int32 targetTypMod,
+ CoercionForm cformat, bool isExplicit);
/*
@@ -39,12 +39,12 @@ static Node *coerce_type_typmod(Node *node,
* Convert an expression to a target type and typmod.
*
* This is the general-purpose entry point for arbitrary type coercion
- * operations. Direct use of the component operations can_coerce_type,
+ * operations. Direct use of the component operations can_coerce_type,
* coerce_type, and coerce_type_typmod should be restricted to special
* cases (eg, when the conversion is expected to succeed).
*
* Returns the possibly-transformed expression tree, or NULL if the type
- * conversion is not possible. (We do this, rather than ereport'ing directly,
+ * conversion is not possible. (We do this, rather than ereport'ing directly,
* so that callers can generate custom error messages indicating context.)
*
* pstate - parse state (can be NULL, see coerce_type)
@@ -66,14 +66,14 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
else if (ccontext >= COERCION_ASSIGNMENT)
{
/*
- * String hacks to get transparent conversions for char and varchar:
- * if a coercion to text is available, use it for forced coercions to
- * char(n) or varchar(n) or domains thereof.
+ * String hacks to get transparent conversions for char and
+ * varchar: if a coercion to text is available, use it for forced
+ * coercions to char(n) or varchar(n) or domains thereof.
*
* This is pretty grotty, but seems easier to maintain than providing
* entries in pg_cast that parallel all the ones for text.
*/
- Oid targetbasetype = getBaseType(targettype);
+ Oid targetbasetype = getBaseType(targettype);
if (targetbasetype == BPCHAROID || targetbasetype == VARCHAROID)
{
@@ -91,7 +91,10 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
}
else
{
- /* need a RelabelType if no typmod coercion will be performed */
+ /*
+ * need a RelabelType if no typmod coercion will be
+ * performed
+ */
if (targettypmod < 0)
expr = (Node *) makeRelabelType((Expr *) expr,
targettype, -1,
@@ -127,13 +130,13 @@ coerce_to_target_type(ParseState *pstate, Node *expr, Oid exprtype,
* The caller should already have determined that the coercion is possible;
* see can_coerce_type.
*
- * No coercion to a typmod (length) is performed here. The caller must
+ * No coercion to a typmod (length) is performed here. The caller must
* call coerce_type_typmod as well, if a typmod constraint is wanted.
* (But if the target type is a domain, it may internally contain a
* typmod constraint, which will be applied inside coerce_to_domain.)
*
* pstate is only used in the case that we are able to resolve the type of
- * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
+ * a previously UNKNOWN Param. It is okay to pass pstate = NULL if the
* caller does not want type information updated for Params.
*/
Node *
@@ -191,8 +194,9 @@ coerce_type(ParseState *pstate, Node *node,
* Any length constraint will be applied later by our caller.
*
* Note that we call stringTypeDatum using the domain's pg_type
- * row, if it's a domain. This works because the domain row has
- * the same typinput and typelem as the base type --- ugly...
+ * row, if it's a domain. This works because the domain row
+ * has the same typinput and typelem as the base type ---
+ * ugly...
*/
newcon->constvalue = stringTypeDatum(targetType, val, -1);
pfree(val);
@@ -232,12 +236,12 @@ coerce_type(ParseState *pstate, Node *node,
(errcode(ERRCODE_UNDEFINED_PARAMETER),
errmsg("there is no parameter $%d", paramno)));
- if (toppstate->p_paramtypes[paramno-1] == UNKNOWNOID)
+ if (toppstate->p_paramtypes[paramno - 1] == UNKNOWNOID)
{
/* We've successfully resolved the type */
- toppstate->p_paramtypes[paramno-1] = targetTypeId;
+ toppstate->p_paramtypes[paramno - 1] = targetTypeId;
}
- else if (toppstate->p_paramtypes[paramno-1] == targetTypeId)
+ else if (toppstate->p_paramtypes[paramno - 1] == targetTypeId)
{
/* We previously resolved the type, and it matches */
}
@@ -246,10 +250,10 @@ coerce_type(ParseState *pstate, Node *node,
/* Ooops */
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
- errmsg("inconsistent types deduced for parameter $%d",
- paramno),
+ errmsg("inconsistent types deduced for parameter $%d",
+ paramno),
errdetail("%s versus %s",
- format_type_be(toppstate->p_paramtypes[paramno-1]),
+ format_type_be(toppstate->p_paramtypes[paramno - 1]),
format_type_be(targetTypeId))));
}
@@ -298,7 +302,7 @@ coerce_type(ParseState *pstate, Node *node,
* higher-level code.
*
* Also, domains may have value restrictions beyond the base type
- * that must be accounted for. If the destination is a domain
+ * that must be accounted for. If the destination is a domain
* then we won't need a RelabelType node.
*/
result = coerce_to_domain(node, InvalidOid, targetTypeId,
@@ -306,10 +310,11 @@ coerce_type(ParseState *pstate, Node *node,
if (result == node)
{
/*
- * XXX could we label result with exprTypmod(node) instead of
- * default -1 typmod, to save a possible length-coercion
- * later? Would work if both types have same interpretation of
- * typmod, which is likely but not certain.
+ * XXX could we label result with exprTypmod(node) instead
+ * of default -1 typmod, to save a possible
+ * length-coercion later? Would work if both types have
+ * same interpretation of typmod, which is likely but not
+ * certain.
*/
result = (Node *) makeRelabelType((Expr *) result,
targetTypeId, -1,
@@ -386,7 +391,7 @@ can_coerce_type(int nargs, Oid *input_typeids, Oid *target_typeids,
if (targetTypeId == ANYARRAYOID ||
targetTypeId == ANYELEMENTOID)
{
- have_generics = true; /* do more checking later */
+ have_generics = true; /* do more checking later */
continue;
}
@@ -437,7 +442,7 @@ Node *
coerce_to_domain(Node *arg, Oid baseTypeId, Oid typeId, CoercionForm cformat)
{
CoerceToDomain *result;
- int32 typmod;
+ int32 typmod;
/* Get the base type if it hasn't been supplied */
if (baseTypeId == InvalidOid)
@@ -448,17 +453,18 @@ coerce_to_domain(Node *arg, Oid baseTypeId, Oid typeId, CoercionForm cformat)
return arg;
/*
- * If the domain applies a typmod to its base type, build the appropriate
- * coercion step. Mark it implicit for display purposes, because we don't
- * want it shown separately by ruleutils.c; but the isExplicit flag passed
- * to the conversion function depends on the manner in which the domain
- * coercion is invoked, so that the semantics of implicit and explicit
- * coercion differ. (Is that really the behavior we want?)
+ * If the domain applies a typmod to its base type, build the
+ * appropriate coercion step. Mark it implicit for display purposes,
+ * because we don't want it shown separately by ruleutils.c; but the
+ * isExplicit flag passed to the conversion function depends on the
+ * manner in which the domain coercion is invoked, so that the
+ * semantics of implicit and explicit coercion differ. (Is that
+ * really the behavior we want?)
*
* NOTE: because we apply this as part of the fixed expression structure,
- * ALTER DOMAIN cannot alter the typtypmod. But it's unclear that that
- * would be safe to do anyway, without lots of knowledge about what the
- * base type thinks the typmod means.
+ * ALTER DOMAIN cannot alter the typtypmod. But it's unclear that
+ * that would be safe to do anyway, without lots of knowledge about
+ * what the base type thinks the typmod means.
*/
typmod = get_typtypmod(typeId);
if (typmod >= 0)
@@ -467,9 +473,10 @@ coerce_to_domain(Node *arg, Oid baseTypeId, Oid typeId, CoercionForm cformat)
(cformat != COERCE_IMPLICIT_CAST));
/*
- * Now build the domain coercion node. This represents run-time checking
- * of any constraints currently attached to the domain. This also
- * ensures that the expression is properly labeled as to result type.
+ * Now build the domain coercion node. This represents run-time
+ * checking of any constraints currently attached to the domain. This
+ * also ensures that the expression is properly labeled as to result
+ * type.
*/
result = makeNode(CoerceToDomain);
result->arg = (Expr *) arg;
@@ -568,15 +575,15 @@ coerce_to_boolean(ParseState *pstate, Node *node,
if (node == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- /* translator: first %s is name of a SQL construct, eg WHERE */
- errmsg("argument of %s must be type boolean, not type %s",
- constructName, format_type_be(inputTypeId))));
+ /* translator: first %s is name of a SQL construct, eg WHERE */
+ errmsg("argument of %s must be type boolean, not type %s",
+ constructName, format_type_be(inputTypeId))));
}
if (expression_returns_set(node))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- /* translator: %s is name of a SQL construct, eg WHERE */
+ /* translator: %s is name of a SQL construct, eg WHERE */
errmsg("argument of %s must not return a set",
constructName)));
@@ -607,15 +614,15 @@ coerce_to_integer(ParseState *pstate, Node *node,
if (node == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- /* translator: first %s is name of a SQL construct, eg LIMIT */
- errmsg("argument of %s must be type integer, not type %s",
- constructName, format_type_be(inputTypeId))));
+ /* translator: first %s is name of a SQL construct, eg LIMIT */
+ errmsg("argument of %s must be type integer, not type %s",
+ constructName, format_type_be(inputTypeId))));
}
if (expression_returns_set(node))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- /* translator: %s is name of a SQL construct, eg LIMIT */
+ /* translator: %s is name of a SQL construct, eg LIMIT */
errmsg("argument of %s must not return a set",
constructName)));
@@ -664,19 +671,24 @@ select_common_type(List *typeids, const char *context)
*/
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- /* translator: first %s is name of a SQL construct, eg CASE */
+
+ /*
+ * translator: first %s is name of a SQL construct, eg
+ * CASE
+ */
errmsg("%s types %s and %s cannot be matched",
context,
format_type_be(ptype),
format_type_be(ntype))));
}
else if (!IsPreferredType(pcategory, ptype) &&
- can_coerce_type(1, &ptype, &ntype, COERCION_IMPLICIT) &&
- !can_coerce_type(1, &ntype, &ptype, COERCION_IMPLICIT))
+ can_coerce_type(1, &ptype, &ntype, COERCION_IMPLICIT) &&
+ !can_coerce_type(1, &ntype, &ptype, COERCION_IMPLICIT))
{
/*
- * take new type if can coerce to it implicitly but not the
- * other way; but if we have a preferred type, stay on it.
+ * take new type if can coerce to it implicitly but not
+ * the other way; but if we have a preferred type, stay on
+ * it.
*/
ptype = ntype;
pcategory = TypeCategory(ptype);
@@ -725,7 +737,7 @@ coerce_to_common_type(ParseState *pstate, Node *node,
else
ereport(ERROR,
(errcode(ERRCODE_CANNOT_COERCE),
- /* translator: first %s is name of a SQL construct, eg CASE */
+ /* translator: first %s is name of a SQL construct, eg CASE */
errmsg("%s could not convert type %s to %s",
context,
format_type_be(inputTypeId),
@@ -744,8 +756,8 @@ coerce_to_common_type(ParseState *pstate, Node *node,
* and must in fact be varlena arrays.
* 2) All arguments declared ANYELEMENT must have matching datatypes.
* 3) If there are arguments of both ANYELEMENT and ANYARRAY, make sure
- * the actual ANYELEMENT datatype is in fact the element type for
- * the actual ANYARRAY datatype.
+ * the actual ANYELEMENT datatype is in fact the element type for
+ * the actual ANYARRAY datatype.
*
* If we have UNKNOWN input (ie, an untyped literal) for any ANYELEMENT
* or ANYARRAY argument, assume it is okay.
@@ -763,13 +775,13 @@ check_generic_type_consistency(Oid *actual_arg_types,
Oid array_typelem;
/*
- * Loop through the arguments to see if we have any that are
- * ANYARRAY or ANYELEMENT. If so, require the actual types to be
+ * Loop through the arguments to see if we have any that are ANYARRAY
+ * or ANYELEMENT. If so, require the actual types to be
* self-consistent
*/
for (j = 0; j < nargs; j++)
{
- Oid actual_type = actual_arg_types[j];
+ Oid actual_type = actual_arg_types[j];
if (declared_arg_types[j] == ANYELEMENTOID)
{
@@ -798,7 +810,10 @@ check_generic_type_consistency(Oid *actual_arg_types,
if (!OidIsValid(elem_typeid))
{
- /* if we don't have an element type yet, use the one we just got */
+ /*
+ * if we don't have an element type yet, use the one we just
+ * got
+ */
elem_typeid = array_typelem;
}
else if (array_typelem != elem_typeid)
@@ -831,23 +846,23 @@ check_generic_type_consistency(Oid *actual_arg_types,
* if it is declared ANYARRAY or ANYELEMENT:
*
* 1) If return type is ANYARRAY, and any argument is ANYARRAY, use the
- * argument's actual type as the function's return type.
+ * argument's actual type as the function's return type.
* 2) If return type is ANYARRAY, no argument is ANYARRAY, but any argument
- * is ANYELEMENT, use the actual type of the argument to determine
- * the function's return type, i.e. the element type's corresponding
- * array type.
+ * is ANYELEMENT, use the actual type of the argument to determine
+ * the function's return type, i.e. the element type's corresponding
+ * array type.
* 3) If return type is ANYARRAY, no argument is ANYARRAY or ANYELEMENT,
- * generate an ERROR. This condition is prevented by CREATE FUNCTION
- * and is therefore not expected here.
+ * generate an ERROR. This condition is prevented by CREATE FUNCTION
+ * and is therefore not expected here.
* 4) If return type is ANYELEMENT, and any argument is ANYELEMENT, use the
- * argument's actual type as the function's return type.
+ * argument's actual type as the function's return type.
* 5) If return type is ANYELEMENT, no argument is ANYELEMENT, but any
- * argument is ANYARRAY, use the actual type of the argument to determine
- * the function's return type, i.e. the array type's corresponding
- * element type.
+ * argument is ANYARRAY, use the actual type of the argument to determine
+ * the function's return type, i.e. the array type's corresponding
+ * element type.
* 6) If return type is ANYELEMENT, no argument is ANYARRAY or ANYELEMENT,
- * generate an ERROR. This condition is prevented by CREATE FUNCTION
- * and is therefore not expected here.
+ * generate an ERROR. This condition is prevented by CREATE FUNCTION
+ * and is therefore not expected here.
*/
Oid
enforce_generic_type_consistency(Oid *actual_arg_types,
@@ -863,13 +878,13 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
Oid array_typelem = InvalidOid;
/*
- * Loop through the arguments to see if we have any that are
- * ANYARRAY or ANYELEMENT. If so, require the actual types to be
+ * Loop through the arguments to see if we have any that are ANYARRAY
+ * or ANYELEMENT. If so, require the actual types to be
* self-consistent
*/
for (j = 0; j < nargs; j++)
{
- Oid actual_type = actual_arg_types[j];
+ Oid actual_type = actual_arg_types[j];
if (declared_arg_types[j] == ANYELEMENTOID)
{
@@ -882,7 +897,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (OidIsValid(elem_typeid) && actual_type != elem_typeid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("arguments declared ANYELEMENT are not all alike"),
+ errmsg("arguments declared ANYELEMENT are not all alike"),
errdetail("%s versus %s",
format_type_be(elem_typeid),
format_type_be(actual_type))));
@@ -899,7 +914,7 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (OidIsValid(array_typeid) && actual_type != array_typeid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("arguments declared ANYARRAY are not all alike"),
+ errmsg("arguments declared ANYARRAY are not all alike"),
errdetail("%s versus %s",
format_type_be(array_typeid),
format_type_be(actual_type))));
@@ -921,12 +936,15 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (!OidIsValid(array_typelem))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("argument declared ANYARRAY is not an array but %s",
- format_type_be(array_typeid))));
+ errmsg("argument declared ANYARRAY is not an array but %s",
+ format_type_be(array_typeid))));
if (!OidIsValid(elem_typeid))
{
- /* if we don't have an element type yet, use the one we just got */
+ /*
+ * if we don't have an element type yet, use the one we just
+ * got
+ */
elem_typeid = array_typelem;
}
else if (array_typelem != elem_typeid)
@@ -955,15 +973,13 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
{
for (j = 0; j < nargs; j++)
{
- Oid actual_type = actual_arg_types[j];
+ Oid actual_type = actual_arg_types[j];
if (actual_type != UNKNOWNOID)
continue;
if (declared_arg_types[j] == ANYELEMENTOID)
- {
declared_arg_types[j] = elem_typeid;
- }
else if (declared_arg_types[j] == ANYARRAYOID)
{
if (!OidIsValid(array_typeid))
@@ -989,8 +1005,8 @@ enforce_generic_type_consistency(Oid *actual_arg_types,
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for datatype %s",
- format_type_be(elem_typeid))));
+ errmsg("could not find array type for datatype %s",
+ format_type_be(elem_typeid))));
}
return array_typeid;
}
@@ -1026,7 +1042,7 @@ resolve_generic_type(Oid declared_type,
if (context_declared_type == ANYARRAYOID)
{
/* Use actual type, but it must be an array */
- Oid array_typelem = get_element_type(context_actual_type);
+ Oid array_typelem = get_element_type(context_actual_type);
if (!OidIsValid(array_typelem))
ereport(ERROR,
@@ -1038,13 +1054,13 @@ resolve_generic_type(Oid declared_type,
else if (context_declared_type == ANYELEMENTOID)
{
/* Use the array type corresponding to actual type */
- Oid array_typeid = get_array_type(context_actual_type);
+ Oid array_typeid = get_array_type(context_actual_type);
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for datatype %s",
- format_type_be(context_actual_type))));
+ errmsg("could not find array type for datatype %s",
+ format_type_be(context_actual_type))));
return array_typeid;
}
}
@@ -1053,7 +1069,7 @@ resolve_generic_type(Oid declared_type,
if (context_declared_type == ANYARRAYOID)
{
/* Use the element type corresponding to actual type */
- Oid array_typelem = get_element_type(context_actual_type);
+ Oid array_typelem = get_element_type(context_actual_type);
if (!OidIsValid(array_typelem))
ereport(ERROR,
@@ -1207,8 +1223,8 @@ IsPreferredType(CATEGORY category, Oid type)
return false;
/*
- * This switch should agree with TypeCategory(), above. Note that
- * at this point, category certainly matches the type.
+ * This switch should agree with TypeCategory(), above. Note that at
+ * this point, category certainly matches the type.
*/
switch (category)
{
@@ -1279,8 +1295,8 @@ IsPreferredType(CATEGORY category, Oid type)
* Check if srctype is binary-coercible to targettype.
*
* This notion allows us to cheat and directly exchange values without
- * going through the trouble of calling a conversion function. Note that
- * in general, this should only be an implementation shortcut. Before 7.4,
+ * going through the trouble of calling a conversion function. Note that
+ * in general, this should only be an implementation shortcut. Before 7.4,
* this was also used as a heuristic for resolving overloaded functions and
* operators, but that's basically a bad idea.
*
@@ -1293,7 +1309,7 @@ IsPreferredType(CATEGORY category, Oid type)
* ANYARRAY type.
*
* This function replaces IsBinaryCompatible(), which was an inherently
- * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
+ * symmetric test. Since the pg_cast entries aren't necessarily symmetric,
* the order of the operands is now significant.
*/
bool
@@ -1415,16 +1431,16 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
else
{
/*
- * If there's no pg_cast entry, perhaps we are dealing with a
- * pair of array types. If so, and if the element types have
- * a suitable cast, use array_type_coerce().
+ * If there's no pg_cast entry, perhaps we are dealing with a pair
+ * of array types. If so, and if the element types have a
+ * suitable cast, use array_type_coerce().
*/
Oid targetElemType;
Oid sourceElemType;
Oid elemfuncid;
if ((targetElemType = get_element_type(targetTypeId)) != InvalidOid &&
- (sourceElemType = get_element_type(sourceTypeId)) != InvalidOid)
+ (sourceElemType = get_element_type(sourceTypeId)) != InvalidOid)
{
if (find_coercion_pathway(targetElemType, sourceElemType,
ccontext, &elemfuncid))
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index eb24c1b615e..b5aac4f9fd6 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_expr.c,v 1.158 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_expr.c,v 1.159 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,7 +41,7 @@ static int expr_depth_counter = 0;
bool Transform_null_equals = false;
static Node *typecast_expression(ParseState *pstate, Node *expr,
- TypeName *typename);
+ TypeName *typename);
static Node *transformColumnRef(ParseState *pstate, ColumnRef *cref);
static Node *transformIndirection(ParseState *pstate, Node *basenode,
List *indirection);
@@ -130,10 +130,10 @@ transformExpr(ParseState *pstate, Node *expr)
toppstate = toppstate->parentParseState;
/* Check parameter number is in range */
- if (paramno <= 0) /* probably can't happen? */
+ if (paramno <= 0) /* probably can't happen? */
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PARAMETER),
- errmsg("there is no parameter $%d", paramno)));
+ errmsg("there is no parameter $%d", paramno)));
if (paramno > toppstate->p_numparams)
{
if (!toppstate->p_variableparams)
@@ -152,20 +152,20 @@ transformExpr(ParseState *pstate, Node *expr)
/* Zero out the previously-unreferenced slots */
MemSet(toppstate->p_paramtypes + toppstate->p_numparams,
0,
- (paramno - toppstate->p_numparams) * sizeof(Oid));
+ (paramno - toppstate->p_numparams) * sizeof(Oid));
toppstate->p_numparams = paramno;
}
if (toppstate->p_variableparams)
{
/* If not seen before, initialize to UNKNOWN type */
- if (toppstate->p_paramtypes[paramno-1] == InvalidOid)
- toppstate->p_paramtypes[paramno-1] = UNKNOWNOID;
+ if (toppstate->p_paramtypes[paramno - 1] == InvalidOid)
+ toppstate->p_paramtypes[paramno - 1] = UNKNOWNOID;
}
param = makeNode(Param);
param->paramkind = PARAM_NUM;
param->paramid = (AttrNumber) paramno;
- param->paramtype = toppstate->p_paramtypes[paramno-1];
+ param->paramtype = toppstate->p_paramtypes[paramno - 1];
result = (Node *) param;
/* handle qualification, if any */
@@ -254,9 +254,9 @@ transformExpr(ParseState *pstate, Node *expr)
else
{
Node *lexpr = transformExpr(pstate,
- a->lexpr);
+ a->lexpr);
Node *rexpr = transformExpr(pstate,
- a->rexpr);
+ a->rexpr);
result = (Node *) make_op(pstate,
a->name,
@@ -276,8 +276,8 @@ transformExpr(ParseState *pstate, Node *expr)
rexpr = coerce_to_boolean(pstate, rexpr, "AND");
result = (Node *) makeBoolExpr(AND_EXPR,
- makeList2(lexpr,
- rexpr));
+ makeList2(lexpr,
+ rexpr));
}
break;
case AEXPR_OR:
@@ -291,8 +291,8 @@ transformExpr(ParseState *pstate, Node *expr)
rexpr = coerce_to_boolean(pstate, rexpr, "OR");
result = (Node *) makeBoolExpr(OR_EXPR,
- makeList2(lexpr,
- rexpr));
+ makeList2(lexpr,
+ rexpr));
}
break;
case AEXPR_NOT:
@@ -303,7 +303,7 @@ transformExpr(ParseState *pstate, Node *expr)
rexpr = coerce_to_boolean(pstate, rexpr, "NOT");
result = (Node *) makeBoolExpr(NOT_EXPR,
- makeList1(rexpr));
+ makeList1(rexpr));
}
break;
case AEXPR_OP_ANY:
@@ -314,7 +314,7 @@ transformExpr(ParseState *pstate, Node *expr)
a->rexpr);
result = (Node *) make_scalar_array_op(pstate,
- a->name,
+ a->name,
true,
lexpr,
rexpr);
@@ -328,7 +328,7 @@ transformExpr(ParseState *pstate, Node *expr)
a->rexpr);
result = (Node *) make_scalar_array_op(pstate,
- a->name,
+ a->name,
false,
lexpr,
rexpr);
@@ -347,10 +347,12 @@ transformExpr(ParseState *pstate, Node *expr)
rexpr);
if (((OpExpr *) result)->opresulttype != BOOLOID)
ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM requires = operator to yield boolean")));
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("IS DISTINCT FROM requires = operator to yield boolean")));
+
/*
- * We rely on DistinctExpr and OpExpr being same struct
+ * We rely on DistinctExpr and OpExpr being
+ * same struct
*/
NodeSetTag(result, T_DistinctExpr);
}
@@ -368,10 +370,12 @@ transformExpr(ParseState *pstate, Node *expr)
rexpr);
if (((OpExpr *) result)->opresulttype != BOOLOID)
ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("NULLIF requires = operator to yield boolean")));
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("NULLIF requires = operator to yield boolean")));
+
/*
- * We rely on NullIfExpr and OpExpr being same struct
+ * We rely on NullIfExpr and OpExpr being same
+ * struct
*/
NodeSetTag(result, T_NullIfExpr);
}
@@ -425,8 +429,8 @@ transformExpr(ParseState *pstate, Node *expr)
List *args;
/*
- * Transform the list of arguments. We use a shallow
- * list copy and then transform-in-place to avoid O(N^2)
+ * Transform the list of arguments. We use a shallow list
+ * copy and then transform-in-place to avoid O(N^2)
* behavior from repeated lappend's.
*/
targs = listCopy(fn->args);
@@ -489,7 +493,7 @@ transformExpr(ParseState *pstate, Node *expr)
((TargetEntry *) lfirst(tlist))->resdom->resjunk)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("sub-select must return a column")));
+ errmsg("sub-select must return a column")));
while ((tlist = lnext(tlist)) != NIL)
{
if (!((TargetEntry *) lfirst(tlist))->resdom->resjunk)
@@ -499,8 +503,9 @@ transformExpr(ParseState *pstate, Node *expr)
}
/*
- * EXPR and ARRAY need no lefthand or combining operator.
- * These fields should be NIL already, but make sure.
+ * EXPR and ARRAY need no lefthand or combining
+ * operator. These fields should be NIL already, but
+ * make sure.
*/
sublink->lefthand = NIL;
sublink->operName = NIL;
@@ -523,9 +528,10 @@ transformExpr(ParseState *pstate, Node *expr)
lfirst(elist) = transformExpr(pstate, lfirst(elist));
/*
- * If the expression is "<> ALL" (with unqualified opname)
- * then convert it to "NOT IN". This is a hack to improve
- * efficiency of expressions output by pre-7.4 Postgres.
+ * If the expression is "<> ALL" (with unqualified
+ * opname) then convert it to "NOT IN". This is a
+ * hack to improve efficiency of expressions output by
+ * pre-7.4 Postgres.
*/
if (sublink->subLinkType == ALL_SUBLINK &&
length(op) == 1 && strcmp(opname, "<>") == 0)
@@ -549,14 +555,14 @@ transformExpr(ParseState *pstate, Node *expr)
strcmp(opname, "<>") != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("row comparison cannot use operator %s",
- opname)));
+ errmsg("row comparison cannot use operator %s",
+ opname)));
/*
- * To build the list of combining operator OIDs, we must
- * scan subquery's targetlist to find values that will
- * be matched against lefthand values. We need to
- * ignore resjunk targets, so doing the outer
+ * To build the list of combining operator OIDs, we
+ * must scan subquery's targetlist to find values that
+ * will be matched against lefthand values. We need
+ * to ignore resjunk targets, so doing the outer
* iteration over right_list is easier than doing it
* over left_list.
*/
@@ -576,7 +582,7 @@ transformExpr(ParseState *pstate, Node *expr)
if (left_list == NIL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("sub-select has too many columns")));
+ errmsg("sub-select has too many columns")));
lexpr = lfirst(left_list);
left_list = lnext(left_list);
@@ -596,14 +602,14 @@ transformExpr(ParseState *pstate, Node *expr)
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("operator %s must return boolean, not type %s",
opname,
- format_type_be(opform->oprresult)),
+ format_type_be(opform->oprresult)),
errhint("The operator of a quantified predicate subquery must return boolean.")));
if (get_func_retset(opform->oprcode))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator %s must not return a set",
- opname),
+ errmsg("operator %s must not return a set",
+ opname),
errhint("The operator of a quantified predicate subquery must return boolean.")));
sublink->operOids = lappendo(sublink->operOids,
@@ -614,7 +620,7 @@ transformExpr(ParseState *pstate, Node *expr)
if (left_list != NIL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("sub-select has too few columns")));
+ errmsg("sub-select has too few columns")));
if (needNot)
{
@@ -657,7 +663,7 @@ transformExpr(ParseState *pstate, Node *expr)
neww->expr = (Expr *) transformExpr(pstate, warg);
neww->expr = (Expr *) coerce_to_boolean(pstate,
- (Node *) neww->expr,
+ (Node *) neww->expr,
"CASE/WHEN");
/*
@@ -747,8 +753,8 @@ transformExpr(ParseState *pstate, Node *expr)
/* Transform the element expressions */
foreach(element, a->elements)
{
- Node *e = (Node *) lfirst(element);
- Node *newe;
+ Node *e = (Node *) lfirst(element);
+ Node *newe;
newe = transformExpr(pstate, e);
newelems = lappend(newelems, newe);
@@ -761,8 +767,8 @@ transformExpr(ParseState *pstate, Node *expr)
/* Coerce arguments to common type if necessary */
foreach(element, newelems)
{
- Node *e = (Node *) lfirst(element);
- Node *newe;
+ Node *e = (Node *) lfirst(element);
+ Node *newe;
newe = coerce_to_common_type(pstate, e,
element_type,
@@ -837,15 +843,15 @@ transformExpr(ParseState *pstate, Node *expr)
{
CoalesceExpr *c = (CoalesceExpr *) expr;
CoalesceExpr *newc = makeNode(CoalesceExpr);
- List *newargs = NIL;
- List *newcoercedargs = NIL;
- List *typeids = NIL;
- List *args;
+ List *newargs = NIL;
+ List *newcoercedargs = NIL;
+ List *typeids = NIL;
+ List *args;
foreach(args, c->args)
{
- Node *e = (Node *) lfirst(args);
- Node *newe;
+ Node *e = (Node *) lfirst(args);
+ Node *newe;
newe = transformExpr(pstate, e);
newargs = lappend(newargs, newe);
@@ -857,8 +863,8 @@ transformExpr(ParseState *pstate, Node *expr)
/* Convert arguments if necessary */
foreach(args, newargs)
{
- Node *e = (Node *) lfirst(args);
- Node *newe;
+ Node *e = (Node *) lfirst(args);
+ Node *newe;
newe = coerce_to_common_type(pstate, e,
newc->coalescetype,
@@ -1026,9 +1032,9 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
* Not known as a column of any range-table entry.
*
* Consider the possibility that it's VALUE in a domain
- * check expression. (We handle VALUE as a name, not a
- * keyword, to avoid breaking a lot of applications that
- * have used VALUE as a column name in the past.)
+ * check expression. (We handle VALUE as a name, not
+ * a keyword, to avoid breaking a lot of applications
+ * that have used VALUE as a column name in the past.)
*/
if (pstate->p_value_substitute != NULL &&
strcmp(name, "value") == 0)
@@ -1059,7 +1065,7 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" not found", name)));
+ errmsg("attribute \"%s\" not found", name)));
}
break;
}
@@ -1175,8 +1181,8 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper qualified name (too many dotted names): %s",
- NameListToString(cref->fields))));
+ errmsg("improper qualified name (too many dotted names): %s",
+ NameListToString(cref->fields))));
node = NULL; /* keep compiler quiet */
break;
}
@@ -1246,14 +1252,15 @@ exprType(Node *expr)
Assert(!tent->resdom->resjunk);
if (sublink->subLinkType == EXPR_SUBLINK)
type = tent->resdom->restype;
- else /* ARRAY_SUBLINK */
+ else
+/* ARRAY_SUBLINK */
{
type = get_array_type(tent->resdom->restype);
if (!OidIsValid(type))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("could not find array type for datatype %s",
- format_type_be(tent->resdom->restype))));
+ format_type_be(tent->resdom->restype))));
}
}
else
@@ -1266,9 +1273,10 @@ exprType(Node *expr)
case T_SubPlan:
{
/*
- * Although the parser does not ever deal with already-planned
- * expression trees, we support SubPlan nodes in this routine
- * for the convenience of ruleutils.c.
+ * Although the parser does not ever deal with
+ * already-planned expression trees, we support SubPlan
+ * nodes in this routine for the convenience of
+ * ruleutils.c.
*/
SubPlan *subplan = (SubPlan *) expr;
@@ -1283,14 +1291,15 @@ exprType(Node *expr)
Assert(!tent->resdom->resjunk);
if (subplan->subLinkType == EXPR_SUBLINK)
type = tent->resdom->restype;
- else /* ARRAY_SUBLINK */
+ else
+/* ARRAY_SUBLINK */
{
type = get_array_type(tent->resdom->restype);
if (!OidIsValid(type))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("could not find array type for datatype %s",
- format_type_be(tent->resdom->restype))));
+ format_type_be(tent->resdom->restype))));
}
}
else
@@ -1337,9 +1346,10 @@ exprType(Node *expr)
type = ((SetToDefault *) expr)->typeId;
break;
case T_RangeVar:
+
/*
- * If someone uses a bare relation name in an expression,
- * we will likely first notice a problem here (see comments in
+ * If someone uses a bare relation name in an expression, we
+ * will likely first notice a problem here (see comments in
* transformColumnRef()). Issue an appropriate error message.
*/
ereport(ERROR,
@@ -1438,14 +1448,14 @@ exprTypmod(Node *expr)
* that typmod, else use -1
*/
CoalesceExpr *cexpr = (CoalesceExpr *) expr;
- Oid coalescetype = cexpr->coalescetype;
- int32 typmod;
- List *arg;
+ Oid coalescetype = cexpr->coalescetype;
+ int32 typmod;
+ List *arg;
typmod = exprTypmod((Node *) lfirst(cexpr->args));
foreach(arg, cexpr->args)
{
- Node *e = (Node *) lfirst(arg);
+ Node *e = (Node *) lfirst(arg);
if (exprType(e) != coalescetype)
return -1;
@@ -1505,9 +1515,9 @@ exprIsLengthCoercion(Node *expr, int32 *coercedTypmod)
return false;
/*
- * If it's not a two-argument or three-argument function with the second
- * argument being an int4 constant, it can't have been created from a
- * length coercion (it must be a type coercion, instead).
+ * If it's not a two-argument or three-argument function with the
+ * second argument being an int4 constant, it can't have been created
+ * from a length coercion (it must be a type coercion, instead).
*/
nargs = length(func->args);
if (nargs < 2 || nargs > 3)
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index 40197394501..836f703aae8 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_func.c,v 1.157 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_func.c,v 1.158 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,8 +86,8 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
if (nargs > FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("cannot pass more than %d arguments to a function",
- FUNC_MAX_ARGS)));
+ errmsg("cannot pass more than %d arguments to a function",
+ FUNC_MAX_ARGS)));
if (fargs)
{
@@ -262,9 +262,9 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
if (agg_star)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("%s(*) specified, but %s is not an aggregate function",
- NameListToString(funcname),
- NameListToString(funcname))));
+ errmsg("%s(*) specified, but %s is not an aggregate function",
+ NameListToString(funcname),
+ NameListToString(funcname))));
if (agg_distinct)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -298,8 +298,8 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" not found in datatype %s",
- colname, format_type_be(relTypeId))));
+ errmsg("attribute \"%s\" not found in datatype %s",
+ colname, format_type_be(relTypeId))));
}
/*
@@ -311,8 +311,8 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
errmsg("function %s is not unique",
func_signature_string(funcname, nargs,
actual_arg_types)),
- errhint("Could not choose a best candidate function. "
- "You may need to add explicit typecasts.")));
+ errhint("Could not choose a best candidate function. "
+ "You may need to add explicit typecasts.")));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
@@ -392,7 +392,7 @@ int
func_match_argtypes(int nargs,
Oid *input_typeids,
FuncCandidateList raw_candidates,
- FuncCandidateList *candidates) /* return value */
+ FuncCandidateList *candidates) /* return value */
{
FuncCandidateList current_candidate;
FuncCandidateList next_candidate;
@@ -495,12 +495,13 @@ func_select_candidate(int nargs,
/*
* If any input types are domains, reduce them to their base types.
* This ensures that we will consider functions on the base type to be
- * "exact matches" in the exact-match heuristic; it also makes it possible
- * to do something useful with the type-category heuristics. Note that
- * this makes it difficult, but not impossible, to use functions declared
- * to take a domain as an input datatype. Such a function will be
- * selected over the base-type function only if it is an exact match at
- * all argument positions, and so was already chosen by our caller.
+ * "exact matches" in the exact-match heuristic; it also makes it
+ * possible to do something useful with the type-category heuristics.
+ * Note that this makes it difficult, but not impossible, to use
+ * functions declared to take a domain as an input datatype. Such a
+ * function will be selected over the base-type function only if it is
+ * an exact match at all argument positions, and so was already chosen
+ * by our caller.
*/
for (i = 0; i < nargs; i++)
input_base_typeids[i] = getBaseType(input_typeids[i]);
@@ -550,13 +551,14 @@ func_select_candidate(int nargs,
return candidates;
/*
- * Still too many candidates? Now look for candidates which have either
- * exact matches or preferred types at the args that will require coercion.
- * (Restriction added in 7.4: preferred type must be of same category as
- * input type; give no preference to cross-category conversions to
- * preferred types.) Keep all candidates if none match.
+ * Still too many candidates? Now look for candidates which have
+ * either exact matches or preferred types at the args that will
+ * require coercion. (Restriction added in 7.4: preferred type must be
+ * of same category as input type; give no preference to
+ * cross-category conversions to preferred types.) Keep all
+ * candidates if none match.
*/
- for (i = 0; i < nargs; i++) /* avoid multiple lookups */
+ for (i = 0; i < nargs; i++) /* avoid multiple lookups */
slot_category[i] = TypeCategory(input_base_typeids[i]);
ncandidates = 0;
nbestMatch = 0;
@@ -602,10 +604,11 @@ func_select_candidate(int nargs,
* Still too many candidates? Try assigning types for the unknown
* columns.
*
- * NOTE: for a binary operator with one unknown and one non-unknown input,
- * we already tried the heuristic of looking for a candidate with the
- * known input type on both sides (see binary_oper_exact()). That's
- * essentially a special case of the general algorithm we try next.
+ * NOTE: for a binary operator with one unknown and one non-unknown
+ * input, we already tried the heuristic of looking for a candidate
+ * with the known input type on both sides (see binary_oper_exact()).
+ * That's essentially a special case of the general algorithm we try
+ * next.
*
* We do this by examining each unknown argument position to see if we
* can determine a "type category" for it. If any candidate has an
@@ -815,9 +818,10 @@ func_get_detail(List *funcname,
* constant to a specific type.
*
* The reason we can restrict our check to binary-compatible
- * coercions here is that we expect non-binary-compatible coercions
- * to have an implementation function named after the target type.
- * That function will be found by normal lookup if appropriate.
+ * coercions here is that we expect non-binary-compatible
+ * coercions to have an implementation function named after the
+ * target type. That function will be found by normal lookup if
+ * appropriate.
*
* NB: it's important that this code stays in sync with what
* coerce_type can do, because the caller will try to apply
@@ -895,7 +899,7 @@ func_get_detail(List *funcname,
{
best_candidate = func_select_candidate(nargs,
current_input_typeids,
- current_candidates);
+ current_candidates);
/*
* If we were able to choose a best candidate, we're
@@ -960,7 +964,7 @@ func_get_detail(List *funcname,
* finding all superclasses of that type. A vector of new Oid type
* arrays is returned to the caller, listing possible alternative
* interpretations of the input typeids as members of their superclasses
- * rather than the actually given argument types. The vector is
+ * rather than the actually given argument types. The vector is
* terminated by a NULL pointer.
*
* The order of this vector is as follows: all superclasses of the
@@ -1123,14 +1127,14 @@ gen_cross_product(InhPaths *arginh, int nargs)
/*
* We also need an extra slot for the terminating NULL in the result
* array, but that cancels out with the fact that we don't want to
- * generate the zero-changes case. So we need exactly nanswers slots.
+ * generate the zero-changes case. So we need exactly nanswers slots.
*/
result = (Oid **) palloc(sizeof(Oid *) * nanswers);
j = 0;
/*
* Compute the cross product from right to left. When cur[i] == 0,
- * generate the original input type at position i. When cur[i] == k
+ * generate the original input type at position i. When cur[i] == k
* for k > 0, generate its k'th supertype.
*/
MemSet(cur, 0, sizeof(cur));
@@ -1138,7 +1142,7 @@ gen_cross_product(InhPaths *arginh, int nargs)
for (;;)
{
/*
- * Find a column we can increment. All the columns after it get
+ * Find a column we can increment. All the columns after it get
* reset to zero. (Essentially, we're adding one to the multi-
* digit number represented by cur[].)
*/
@@ -1263,8 +1267,8 @@ setup_field_select(Node *input, char *attname, Oid relid)
if (attno == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- attname, get_rel_name(relid))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ attname, get_rel_name(relid))));
fselect->arg = (Expr *) input;
fselect->fieldnum = attno;
@@ -1448,7 +1452,7 @@ find_aggregate_func(List *aggname, Oid basetype, bool noError)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s(%s) is not an aggregate",
- NameListToString(aggname), format_type_be(basetype))));
+ NameListToString(aggname), format_type_be(basetype))));
}
ReleaseSysCache(ftup);
@@ -1485,7 +1489,7 @@ LookupFuncName(List *funcname, int nargs, const Oid *argtypes, bool noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function %s does not exist",
- func_signature_string(funcname, nargs, argtypes))));
+ func_signature_string(funcname, nargs, argtypes))));
return InvalidOid;
}
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index 5e047400a1e..74162a9f807 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_node.c,v 1.79 2003/07/19 20:20:52 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_node.c,v 1.80 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -121,8 +121,8 @@ transformArraySubscripts(ParseState *pstate,
if (elementType == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot subscript type %s because it is not an array",
- format_type_be(arrayType))));
+ errmsg("cannot subscript type %s because it is not an array",
+ format_type_be(arrayType))));
/*
* A list containing only single subscripts refers to a single array
@@ -173,14 +173,14 @@ transformArraySubscripts(ParseState *pstate,
subexpr = transformExpr(pstate, ai->lidx);
/* If it's not int4 already, try to coerce */
subexpr = coerce_to_target_type(pstate,
- subexpr, exprType(subexpr),
+ subexpr, exprType(subexpr),
INT4OID, -1,
COERCION_ASSIGNMENT,
COERCE_IMPLICIT_CAST);
if (subexpr == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("array subscript must have type integer")));
+ errmsg("array subscript must have type integer")));
}
else
{
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index 68225aae0a3..c3307a76587 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_oper.c,v 1.71 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_oper.c,v 1.72 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,13 +31,13 @@
static Oid binary_oper_exact(Oid arg1, Oid arg2,
FuncCandidateList candidates);
static FuncDetailCode oper_select_candidate(int nargs,
- Oid *input_typeids,
- FuncCandidateList candidates,
- Oid *operOid);
+ Oid *input_typeids,
+ FuncCandidateList candidates,
+ Oid *operOid);
static const char *op_signature_string(List *op, char oprkind,
- Oid arg1, Oid arg2);
+ Oid arg1, Oid arg2);
static void op_error(List *op, char oprkind, Oid arg1, Oid arg2,
- FuncDetailCode fdresult);
+ FuncDetailCode fdresult);
/*
@@ -140,9 +140,9 @@ equality_oper(Oid argtype, bool noError)
/*
* If the datatype is an array, then we can use array_eq ... but only
- * if there is a suitable equality operator for the element type.
- * (We must run this test first, since compatible_oper will find
- * array_eq, but would not notice the lack of an element operator.)
+ * if there is a suitable equality operator for the element type. (We
+ * must run this test first, since compatible_oper will find array_eq,
+ * but would not notice the lack of an element operator.)
*/
elem_type = get_element_type(argtype);
if (OidIsValid(elem_type))
@@ -184,8 +184,8 @@ equality_oper(Oid argtype, bool noError)
if (!noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(argtype))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(argtype))));
return NULL;
}
@@ -202,10 +202,10 @@ ordering_oper(Oid argtype, bool noError)
/*
* If the datatype is an array, then we can use array_lt ... but only
- * if there is a suitable ordering operator for the element type.
- * (We must run this test first, since the code below would find
- * array_lt if there's an element = operator, but would not notice the
- * lack of an element < operator.)
+ * if there is a suitable ordering operator for the element type. (We
+ * must run this test first, since the code below would find array_lt
+ * if there's an element = operator, but would not notice the lack of
+ * an element < operator.)
*/
elem_type = get_element_type(argtype);
if (OidIsValid(elem_type))
@@ -222,15 +222,15 @@ ordering_oper(Oid argtype, bool noError)
else
{
/*
- * Find the type's equality operator, and use its lsortop (it *must*
- * be mergejoinable). We use this definition because for sorting and
- * grouping purposes, it's important that the equality and ordering
- * operators are consistent.
+ * Find the type's equality operator, and use its lsortop (it
+ * *must* be mergejoinable). We use this definition because for
+ * sorting and grouping purposes, it's important that the equality
+ * and ordering operators are consistent.
*/
optup = equality_oper(argtype, noError);
if (optup != NULL)
{
- Oid lsortop;
+ Oid lsortop;
lsortop = ((Form_pg_operator) GETSTRUCT(optup))->oprlsortop;
ReleaseSysCache(optup);
@@ -247,8 +247,8 @@ ordering_oper(Oid argtype, bool noError)
if (!noError)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an ordering operator for type %s",
- format_type_be(argtype)),
+ errmsg("could not identify an ordering operator for type %s",
+ format_type_be(argtype)),
errhint("Use an explicit ordering operator or modify the query.")));
return NULL;
}
@@ -361,13 +361,13 @@ static FuncDetailCode
oper_select_candidate(int nargs,
Oid *input_typeids,
FuncCandidateList candidates,
- Oid *operOid) /* output argument */
+ Oid *operOid) /* output argument */
{
int ncandidates;
/*
- * Delete any candidates that cannot actually accept the given
- * input types, whether directly or by coercion.
+ * Delete any candidates that cannot actually accept the given input
+ * types, whether directly or by coercion.
*/
ncandidates = func_match_argtypes(nargs, input_typeids,
candidates, &candidates);
@@ -385,8 +385,8 @@ oper_select_candidate(int nargs,
}
/*
- * Use the same heuristics as for ambiguous functions to resolve
- * the conflict.
+ * Use the same heuristics as for ambiguous functions to resolve the
+ * conflict.
*/
candidates = func_select_candidate(nargs, input_typeids, candidates);
@@ -397,7 +397,7 @@ oper_select_candidate(int nargs,
}
*operOid = InvalidOid;
- return FUNCDETAIL_MULTIPLE; /* failed to select a best candidate */
+ return FUNCDETAIL_MULTIPLE; /* failed to select a best candidate */
}
@@ -772,6 +772,7 @@ make_scalar_array_op(ParseState *pstate, List *opname,
ltypeId = exprType(ltree);
atypeId = exprType(rtree);
+
/*
* The right-hand input of the operator will be the element type of
* the array. However, if we currently have just an untyped literal
@@ -785,7 +786,7 @@ make_scalar_array_op(ParseState *pstate, List *opname,
if (!OidIsValid(rtypeId))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires array on right side")));
+ errmsg("op ANY/ALL (array) requires array on right side")));
}
/* Now resolve the operator */
@@ -814,15 +815,15 @@ make_scalar_array_op(ParseState *pstate, List *opname,
if (rettype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires operator to yield boolean")));
+ errmsg("op ANY/ALL (array) requires operator to yield boolean")));
if (get_func_retset(opform->oprcode))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("op ANY/ALL (array) requires operator not to return a set")));
/*
- * Now switch back to the array type on the right, arranging for
- * any needed cast to be applied.
+ * Now switch back to the array type on the right, arranging for any
+ * needed cast to be applied.
*/
res_atypeId = get_array_type(declared_arg_types[1]);
if (!OidIsValid(res_atypeId))
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 732699e3ad7..ee368a23603 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_relation.c,v 1.85 2003/07/20 21:56:35 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_relation.c,v 1.86 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@
#include "utils/syscache.h"
/* GUC parameter */
-bool add_missing_from;
+bool add_missing_from;
static Node *scanNameSpaceForRefname(ParseState *pstate, Node *nsnode,
const char *refname);
@@ -365,8 +365,8 @@ scanNameSpaceForConflict(ParseState *pstate, Node *nsnode,
if (strcmp(j->alias->aliasname, aliasname1) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_ALIAS),
- errmsg("table name \"%s\" specified more than once",
- aliasname1)));
+ errmsg("table name \"%s\" specified more than once",
+ aliasname1)));
/*
* Tables within an aliased join are invisible from outside
@@ -570,8 +570,8 @@ colnameToVar(ParseState *pstate, char *colname)
if (result)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_COLUMN),
- errmsg("column reference \"%s\" is ambiguous",
- colname)));
+ errmsg("column reference \"%s\" is ambiguous",
+ colname)));
result = newresult;
}
}
@@ -666,7 +666,7 @@ addRangeTableEntry(ParseState *pstate,
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("table \"%s\" has %d columns available but %d columns specified",
- RelationGetRelationName(rel), maxattrs, numaliases)));
+ RelationGetRelationName(rel), maxattrs, numaliases)));
/* fill in any unspecified alias columns using actual column names */
for (varattno = numaliases; varattno < maxattrs; varattno++)
@@ -761,7 +761,7 @@ addRangeTableEntryForRelation(ParseState *pstate,
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("table \"%s\" has %d columns available but %d columns specified",
- RelationGetRelationName(rel), maxattrs, numaliases)));
+ RelationGetRelationName(rel), maxattrs, numaliases)));
/* fill in any unspecified alias columns using actual column names */
for (varattno = numaliases; varattno < maxattrs; varattno++)
@@ -957,7 +957,8 @@ addRangeTableEntryForFunction(ParseState *pstate,
Relation rel;
int maxattrs;
- if (!OidIsValid(funcrelid)) /* shouldn't happen if typtype is 'c' */
+ if (!OidIsValid(funcrelid)) /* shouldn't happen if typtype is
+ * 'c' */
elog(ERROR, "invalid typrelid for complex type %u", funcrettype);
/*
@@ -1003,8 +1004,8 @@ addRangeTableEntryForFunction(ParseState *pstate,
if (numaliases > 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("too many column aliases specified for function %s",
- funcname)));
+ errmsg("too many column aliases specified for function %s",
+ funcname)));
if (numaliases == 0)
eref->colnames = makeList1(makeString(eref->aliasname));
}
@@ -1026,8 +1027,8 @@ addRangeTableEntryForFunction(ParseState *pstate,
else
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("function \"%s\" in FROM has unsupported return type",
- funcname)));
+ errmsg("function \"%s\" in FROM has unsupported return type",
+ funcname)));
/*----------
* Flags:
@@ -1318,7 +1319,7 @@ expandRTE(ParseState *pstate, RangeTblEntry *rte,
int maxattrs;
int numaliases;
- if (!OidIsValid(funcrelid)) /* shouldn't happen */
+ if (!OidIsValid(funcrelid)) /* shouldn't happen */
elog(ERROR, "invalid typrelid for complex type %u",
funcrettype);
@@ -1575,7 +1576,7 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum,
ObjectIdGetDatum(rte->relid),
Int16GetDatum(attnum),
0, 0);
- if (!HeapTupleIsValid(tp)) /* shouldn't happen */
+ if (!HeapTupleIsValid(tp)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
attnum, rte->relid);
att_tup = (Form_pg_attribute) GETSTRUCT(tp);
@@ -1632,7 +1633,7 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum,
HeapTuple tp;
Form_pg_attribute att_tup;
- if (!OidIsValid(funcrelid)) /* shouldn't happen */
+ if (!OidIsValid(funcrelid)) /* shouldn't happen */
elog(ERROR, "invalid typrelid for complex type %u",
funcrettype);
@@ -1640,7 +1641,7 @@ get_rte_attribute_type(RangeTblEntry *rte, AttrNumber attnum,
ObjectIdGetDatum(funcrelid),
Int16GetDatum(attnum),
0, 0);
- if (!HeapTupleIsValid(tp)) /* shouldn't happen */
+ if (!HeapTupleIsValid(tp)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
attnum, funcrelid);
att_tup = (Form_pg_attribute) GETSTRUCT(tp);
@@ -1720,7 +1721,7 @@ get_rte_attribute_is_dropped(RangeTblEntry *rte, AttrNumber attnum)
ObjectIdGetDatum(rte->relid),
Int16GetDatum(attnum),
0, 0);
- if (!HeapTupleIsValid(tp)) /* shouldn't happen */
+ if (!HeapTupleIsValid(tp)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
attnum, rte->relid);
att_tup = (Form_pg_attribute) GETSTRUCT(tp);
@@ -1752,7 +1753,7 @@ get_rte_attribute_is_dropped(RangeTblEntry *rte, AttrNumber attnum)
ObjectIdGetDatum(funcrelid),
Int16GetDatum(attnum),
0, 0);
- if (!HeapTupleIsValid(tp)) /* shouldn't happen */
+ if (!HeapTupleIsValid(tp)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for attribute %d of relation %u",
attnum, funcrelid);
att_tup = (Form_pg_attribute) GETSTRUCT(tp);
@@ -1927,7 +1928,7 @@ warnAutoRange(ParseState *pstate, RangeVar *relation)
else
ereport(NOTICE,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("adding missing FROM-clause entry for table \"%s\"",
- relation->relname)));
+ errmsg("adding missing FROM-clause entry for table \"%s\"",
+ relation->relname)));
}
}
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index daeb56b1101..6e10d52723e 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_target.c,v 1.108 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_target.c,v 1.109 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -66,7 +66,7 @@ transformTargetEntry(ParseState *pstate,
errmsg("relation reference \"%s\" cannot be used as a targetlist entry",
((RangeVar *) expr)->relname),
errhint("Write \"%s\".* to denote all the columns of the relation.",
- ((RangeVar *) expr)->relname)));
+ ((RangeVar *) expr)->relname)));
type_id = exprType(expr);
type_mod = exprTypmod(expr);
@@ -152,8 +152,8 @@ transformTargetList(ParseState *pstate, List *targetlist)
*/
if (strcmp(name1, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cross-database references are not implemented")));
schemaname = strVal(lsecond(fields));
relname = strVal(lthird(fields));
break;
@@ -286,7 +286,7 @@ markTargetListOrigin(ParseState *pstate, Resdom *res, Var *var)
case RTE_JOIN:
{
/* Join RTE --- recursively inspect the alias variable */
- Var *aliasvar;
+ Var *aliasvar;
Assert(attnum > 0 && attnum <= length(rte->joinaliasvars));
aliasvar = (Var *) nth(attnum - 1, rte->joinaliasvars);
@@ -322,7 +322,7 @@ updateTargetListEntry(ParseState *pstate,
int attrno,
List *indirection)
{
- Oid type_id; /* type of value provided */
+ Oid type_id; /* type of value provided */
Oid attrtype; /* type of target column */
int32 attrtypmod;
Resdom *resnode = tle->resdom;
@@ -341,13 +341,13 @@ updateTargetListEntry(ParseState *pstate,
* If the expression is a DEFAULT placeholder, insert the attribute's
* type/typmod into it so that exprType will report the right things.
* (We expect that the eventually substituted default expression will
- * in fact have this type and typmod.) Also, reject trying to update
+ * in fact have this type and typmod.) Also, reject trying to update
* an array element with DEFAULT, since there can't be any default for
* individual elements of a column.
*/
if (tle->expr && IsA(tle->expr, SetToDefault))
{
- SetToDefault *def = (SetToDefault *) tle->expr;
+ SetToDefault *def = (SetToDefault *) tle->expr;
def->typeId = attrtype;
def->typeMod = attrtypmod;
@@ -496,8 +496,8 @@ checkInsertTargets(ParseState *pstate, List *cols, List **attrnos)
if (intMember(attrno, *attrnos))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" specified more than once",
- name)));
+ errmsg("attribute \"%s\" specified more than once",
+ name)));
*attrnos = lappendi(*attrnos, attrno);
}
}
@@ -551,7 +551,7 @@ ExpandAllTables(ParseState *pstate)
if (!found_table)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("SELECT * with no tables specified is not valid")));
+ errmsg("SELECT * with no tables specified is not valid")));
return target;
}
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index 5d68ef14a5b..d744823b3b7 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_type.c,v 1.59 2003/07/20 21:56:35 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_type.c,v 1.60 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -91,8 +91,8 @@ LookupTypeName(const TypeName *typename)
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- field, rel->relname)));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ field, rel->relname)));
restype = get_atttype(relid, attnum);
/* this construct should never have an array indicator */
@@ -229,7 +229,7 @@ typenameType(const TypeName *typename)
tup = SearchSysCache(TYPEOID,
ObjectIdGetDatum(typoid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
+ if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for type %u", typoid);
if (!((Form_pg_type) GETSTRUCT(tup))->typisdefined)
ereport(ERROR,
diff --git a/src/backend/port/beos/sem.c b/src/backend/port/beos/sem.c
index 06518e7db7d..5de018b22bf 100644
--- a/src/backend/port/beos/sem.c
+++ b/src/backend/port/beos/sem.c
@@ -291,8 +291,8 @@ semop(int semId, struct sembuf * sops, int nsops)
if (sops[i].sem_op < 0)
{
/*
- * Try acquiring the semaphore till we are not interrupted by a
- * signal
+ * Try acquiring the semaphore till we are not interrupted by
+ * a signal
*/
if (sops[i].sem_flg == IPC_NOWAIT)
{
diff --git a/src/backend/port/dynloader/darwin.c b/src/backend/port/dynloader/darwin.c
index 8810308a147..b297405b85c 100644
--- a/src/backend/port/dynloader/darwin.c
+++ b/src/backend/port/dynloader/darwin.c
@@ -3,7 +3,7 @@
* available with a PostgreSQL-compatible license. Kudos Wilfredo
* S�nchez <wsanchez@apple.com>.
*
- * $Header: /cvsroot/pgsql/src/backend/port/dynloader/darwin.c,v 1.7 2003/04/13 01:19:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/dynloader/darwin.c,v 1.8 2003/08/04 00:43:21 momjian Exp $
*/
#include "postgres.h"
@@ -58,7 +58,7 @@ char *
pg_dlerror(void)
{
NSLinkEditErrors c;
- int errorNumber;
+ int errorNumber;
const char *fileName;
const char *errorString = NULL;
diff --git a/src/backend/port/dynloader/linux.c b/src/backend/port/dynloader/linux.c
index f82cfadf6a1..faf06d9bc43 100644
--- a/src/backend/port/dynloader/linux.c
+++ b/src/backend/port/dynloader/linux.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/port/dynloader/linux.c,v 1.24 2003/07/22 23:30:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/dynloader/linux.c,v 1.25 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -131,4 +131,4 @@ pg_dlerror(void)
#endif
}
-#endif /* !HAVE_DLOPEN */
+#endif /* !HAVE_DLOPEN */
diff --git a/src/backend/port/dynloader/linux.h b/src/backend/port/dynloader/linux.h
index 0adbdf8edf4..fdc65a069f1 100644
--- a/src/backend/port/dynloader/linux.h
+++ b/src/backend/port/dynloader/linux.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: linux.h,v 1.19 2002/10/15 16:04:17 tgl Exp $
+ * $Id: linux.h,v 1.20 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,6 @@
#define pg_dlsym dlsym
#define pg_dlclose dlclose
#define pg_dlerror dlerror
-
#endif /* HAVE_DLOPEN */
#endif /* PORT_PROTOS_H */
diff --git a/src/backend/port/dynloader/win32.c b/src/backend/port/dynloader/win32.c
index e124ee41208..83194aa7757 100644
--- a/src/backend/port/dynloader/win32.c
+++ b/src/backend/port/dynloader/win32.c
@@ -1,4 +1,4 @@
-/* $Header: /cvsroot/pgsql/src/backend/port/dynloader/win32.c,v 1.1 2003/05/15 16:35:29 momjian Exp $ */
+/* $Header: /cvsroot/pgsql/src/backend/port/dynloader/win32.c,v 1.2 2003/08/04 00:43:21 momjian Exp $ */
#include <windows.h>
@@ -11,17 +11,17 @@ dlerror(void)
int
dlclose(void *handle)
{
- return FreeLibrary((HMODULE)handle) ? 0 : 1;
+ return FreeLibrary((HMODULE) handle) ? 0 : 1;
}
void *
dlsym(void *handle, const char *symbol)
{
- return (void *)GetProcAddress((HMODULE)handle, symbol);
+ return (void *) GetProcAddress((HMODULE) handle, symbol);
}
void *
dlopen(const char *path, int mode)
{
- return (void *)LoadLibrary(path);
+ return (void *) LoadLibrary(path);
}
diff --git a/src/backend/port/ipc_test.c b/src/backend/port/ipc_test.c
index 6d10d3bb8f2..4ae64d788e4 100644
--- a/src/backend/port/ipc_test.c
+++ b/src/backend/port/ipc_test.c
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/port/ipc_test.c,v 1.7 2003/07/27 21:49:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/ipc_test.c,v 1.8 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -140,13 +140,13 @@ errstart(int elevel, const char *filename, int lineno,
}
void
-errfinish(int dummy, ...)
+errfinish(int dummy,...)
{
proc_exit(1);
}
void
-elog_finish(int elevel, const char *fmt, ...)
+elog_finish(int elevel, const char *fmt,...)
{
fprintf(stderr, "ERROR: %s\n", fmt);
proc_exit(1);
@@ -159,28 +159,28 @@ errcode(int sqlerrcode)
}
int
-errmsg(const char *fmt, ...)
+errmsg(const char *fmt,...)
{
fprintf(stderr, "ERROR: %s\n", fmt);
return 0; /* return value does not matter */
}
int
-errmsg_internal(const char *fmt, ...)
+errmsg_internal(const char *fmt,...)
{
fprintf(stderr, "ERROR: %s\n", fmt);
return 0; /* return value does not matter */
}
int
-errdetail(const char *fmt, ...)
+errdetail(const char *fmt,...)
{
fprintf(stderr, "DETAIL: %s\n", fmt);
return 0; /* return value does not matter */
}
int
-errhint(const char *fmt, ...)
+errhint(const char *fmt,...)
{
fprintf(stderr, "HINT: %s\n", fmt);
return 0; /* return value does not matter */
diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c
index ae73eff0240..3434618d93b 100644
--- a/src/backend/port/posix_sema.c
+++ b/src/backend/port/posix_sema.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/port/posix_sema.c,v 1.8 2003/07/27 21:49:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/posix_sema.c,v 1.9 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -114,7 +114,6 @@ PosixSemaphoreCreate(sem_t * sem)
if (sem_init(sem, 1, 1) < 0)
elog(FATAL, "sem_init failed: %m");
}
-
#endif /* USE_NAMED_POSIX_SEMAPHORES */
diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c
index 23131f8a670..2ec342df85e 100644
--- a/src/backend/port/sysv_sema.c
+++ b/src/backend/port/sysv_sema.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/port/sysv_sema.c,v 1.7 2003/07/27 21:49:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/sysv_sema.c,v 1.8 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -121,12 +121,12 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems)
(errno == ENOSPC) ?
errhint("This error does *not* mean that you have run out of disk space.\n"
"It occurs when either the system limit for the maximum number of "
- "semaphore sets (SEMMNI), or the system wide maximum number of "
- "semaphores (SEMMNS), would be exceeded. You need to raise the "
+ "semaphore sets (SEMMNI), or the system wide maximum number of "
+ "semaphores (SEMMNS), would be exceeded. You need to raise the "
"respective kernel parameter. Alternatively, reduce PostgreSQL's "
"consumption of semaphores by reducing its max_connections parameter "
"(currently %d).\n"
- "The PostgreSQL documentation contains more information about "
+ "The PostgreSQL documentation contains more information about "
"configuring your system for PostgreSQL.",
MaxBackends) : 0));
}
@@ -149,7 +149,7 @@ IpcSemaphoreInitialize(IpcSemaphoreId semId, int semNum, int value)
semId, semNum, value),
(errno == ERANGE) ?
errhint("You possibly need to raise your kernel's SEMVMX value to be at least "
- "%d. Look into the PostgreSQL documentation for details.",
+ "%d. Look into the PostgreSQL documentation for details.",
value) : 0));
}
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 3f31f21d8e6..b7829d7c8c4 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -10,7 +10,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/port/sysv_shmem.c,v 1.13 2003/07/27 21:49:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/port/sysv_shmem.c,v 1.14 2003/08/04 00:43:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,13 +40,13 @@ typedef int IpcMemoryId; /* shared memory ID returned by shmget(2) */
IpcMemoryKey UsedShmemSegID = 0;
-void *UsedShmemSegAddr = NULL;
+void *UsedShmemSegAddr = NULL;
static void *InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size);
static void IpcMemoryDetach(int status, Datum shmaddr);
static void IpcMemoryDelete(int status, Datum shmId);
static PGShmemHeader *PGSharedMemoryAttach(IpcMemoryKey key,
- IpcMemoryId *shmid);
+ IpcMemoryId *shmid);
/*
@@ -90,15 +90,15 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size)
*/
ereport(FATAL,
(errmsg("could not create shared memory segment: %m"),
- errdetail("Failed syscall was shmget(key=%d, size=%u, 0%o).",
- (int) memKey, size,
- IPC_CREAT | IPC_EXCL | IPCProtection),
+ errdetail("Failed syscall was shmget(key=%d, size=%u, 0%o).",
+ (int) memKey, size,
+ IPC_CREAT | IPC_EXCL | IPCProtection),
(errno == EINVAL) ?
errhint("This error usually means that PostgreSQL's request for a shared memory "
"segment exceeded your kernel's SHMMAX parameter. You can either "
"reduce the request size or reconfigure the kernel with larger SHMMAX. "
- "To reduce the request size (currently %u bytes), reduce "
- "PostgreSQL's shared_buffers parameter (currently %d) and/or "
+ "To reduce the request size (currently %u bytes), reduce "
+ "PostgreSQL's shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
"If the request size is already small, it's possible that it is less than "
"your kernel's SHMMIN parameter, in which case raising the request size or "
@@ -108,9 +108,9 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size)
size, NBuffers, MaxBackends) : 0,
(errno == ENOMEM) ?
errhint("This error usually means that PostgreSQL's request for a shared "
- "memory segment exceeded available memory or swap space. "
- "To reduce the request size (currently %u bytes), reduce "
- "PostgreSQL's shared_buffers parameter (currently %d) and/or "
+ "memory segment exceeded available memory or swap space. "
+ "To reduce the request size (currently %u bytes), reduce "
+ "PostgreSQL's shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
"The PostgreSQL documentation contains more information about shared "
"memory configuration.",
@@ -120,9 +120,9 @@ InternalIpcMemoryCreate(IpcMemoryKey memKey, uint32 size)
"It occurs either if all available shared memory IDs have been taken, "
"in which case you need to raise the SHMMNI parameter in your kernel, "
"or because the system's overall limit for shared memory has been "
- "reached. If you cannot increase the shared memory limit, "
- "reduce PostgreSQL's shared memory request (currently %u bytes), "
- "by reducing its shared_buffers parameter (currently %d) and/or "
+ "reached. If you cannot increase the shared memory limit, "
+ "reduce PostgreSQL's shared memory request (currently %u bytes), "
+ "by reducing its shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
"The PostgreSQL documentation contains more information about shared "
"memory configuration.",
@@ -326,13 +326,13 @@ PGSharedMemoryCreate(uint32 size, bool makePrivate, int port)
hdr->totalsize = size;
hdr->freeoffset = MAXALIGN(sizeof(PGShmemHeader));
-
+
if (ExecBackend && UsedShmemSegAddr == NULL && !makePrivate)
{
UsedShmemSegAddr = memAddress;
UsedShmemSegID = NextShmemSegID;
}
-
+
return hdr;
}
@@ -353,7 +353,7 @@ PGSharedMemoryAttach(IpcMemoryKey key, IpcMemoryId *shmid)
hdr = (PGShmemHeader *) shmat(*shmid,
UsedShmemSegAddr,
#if defined(solaris) && defined(__sparc__)
- /* use intimate shared memory on Solaris */
+ /* use intimate shared memory on Solaris */
SHM_SHARE_MMU
#else
0
diff --git a/src/backend/port/win32/sema.c b/src/backend/port/win32/sema.c
index 267b4619205..7bdd2e09776 100644
--- a/src/backend/port/win32/sema.c
+++ b/src/backend/port/win32/sema.c
@@ -17,9 +17,9 @@ typedef struct
{
int m_numSems;
off_t m_semaphoreHandles;
- //offset from beginning of header
+ /* offset from beginning of header */
off_t m_semaphoreCounts;
- //offset from beginning of header
+ /* offset from beginning of header */
} win32_sem_set_hdr;
/* Control of a semaphore pool. The pool is an area in which we stored all
@@ -131,7 +131,7 @@ semget(int semKey, int semNum, int flags)
HANDLE *sem_handles = NULL;
int *sem_counts = NULL;
int i;
-
+
sec_attrs.nLength = sizeof(sec_attrs);
sec_attrs.lpSecurityDescriptor = NULL;
sec_attrs.bInheritHandle = TRUE;
@@ -152,8 +152,8 @@ semget(int semKey, int semNum, int flags)
new_set->m_numSems = semNum;
new_set->m_semaphoreHandles = sizeof(win32_sem_set_hdr);
- //array starts after header
- new_set->m_semaphoreCounts = new_set->m_semaphoreHandles + (sizeof(HANDLE) * semNum);
+ /* array starts after header */
+ new_set->m_semaphoreCounts = new_set->m_semaphoreHandles + (sizeof(HANDLE) * semNum);
sem_handles = (HANDLE *) ((off_t) new_set + new_set->m_semaphoreHandles);
sem_counts = (int *) ((off_t) new_set + new_set->m_semaphoreCounts);
@@ -186,7 +186,8 @@ semget(int semKey, int semNum, int flags)
return MAKE_OFFSET(new_set);
else
{
- int i;
+ int i;
+
/* Blow away what we've got right now... */
for (i = 0; i < semNum; ++i)
{
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 4c8dbe5e163..5e603a00b14 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -13,7 +13,7 @@
*
* Copyright (c) 2001-2003, PostgreSQL Global Development Group
*
- * $Header: /cvsroot/pgsql/src/backend/postmaster/pgstat.c,v 1.41 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/postmaster/pgstat.c,v 1.42 2003/08/04 00:43:21 momjian Exp $
* ----------
*/
#include "postgres.h"
@@ -85,6 +85,7 @@ static bool pgStatRunningInCollector = FALSE;
static int pgStatTabstatAlloc = 0;
static int pgStatTabstatUsed = 0;
static PgStat_MsgTabstat **pgStatTabstatMessages = NULL;
+
#define TABSTAT_QUANTUM 4 /* we alloc this many at a time */
static int pgStatXactCommit = 0;
@@ -146,8 +147,10 @@ static void pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len);
void
pgstat_init(void)
{
- ACCEPT_TYPE_ARG3 alen;
- struct addrinfo *addrs = NULL, *addr, hints;
+ ACCEPT_TYPE_ARG3 alen;
+ struct addrinfo *addrs = NULL,
+ *addr,
+ hints;
int ret;
/*
@@ -197,7 +200,7 @@ pgstat_init(void)
gai_strerror(ret))));
goto startup_failed;
}
-
+
for (addr = addrs; addr; addr = addr->ai_next)
{
#ifdef HAVE_UNIX_SOCKETS
@@ -233,11 +236,11 @@ pgstat_init(void)
addrs = NULL;
alen = sizeof(pgStatAddr);
- if (getsockname(pgStatSock, (struct sockaddr *)&pgStatAddr, &alen) < 0)
+ if (getsockname(pgStatSock, (struct sockaddr *) & pgStatAddr, &alen) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not get address of socket for statistics: %m")));
+ errmsg("could not get address of socket for statistics: %m")));
goto startup_failed;
}
@@ -265,7 +268,7 @@ pgstat_init(void)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not set statistics socket to nonblock mode: %m")));
+ errmsg("could not set statistics socket to nonblock mode: %m")));
goto startup_failed;
}
@@ -276,7 +279,7 @@ pgstat_init(void)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not create pipe for statistics collector: %m")));
+ errmsg("could not create pipe for statistics collector: %m")));
goto startup_failed;
}
@@ -320,10 +323,10 @@ pgstat_start(void)
/*
* Do nothing if too soon since last collector start. This is a
- * safety valve to protect against continuous respawn attempts if
- * the collector is dying immediately at launch. Note that since
- * we will be re-called from the postmaster main loop, we will get
- * another chance later.
+ * safety valve to protect against continuous respawn attempts if the
+ * collector is dying immediately at launch. Note that since we will
+ * be re-called from the postmaster main loop, we will get another
+ * chance later.
*/
curtime = time(NULL);
if ((unsigned int) (curtime - last_pgstat_start_time) <
@@ -338,6 +341,7 @@ pgstat_start(void)
{
ereport(LOG,
(errmsg("statistics collector startup skipped")));
+
/*
* We can only get here if someone tries to manually turn
* pgstat_collect_startcollector on after it had been off.
@@ -347,7 +351,8 @@ pgstat_start(void)
}
/*
- * Okay, fork off the collector. Remember its PID for pgstat_ispgstat.
+ * Okay, fork off the collector. Remember its PID for
+ * pgstat_ispgstat.
*/
fflush(stdout);
@@ -772,7 +777,7 @@ pgstat_reset_counters(void)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to reset statistics counters")));
+ errmsg("must be superuser to reset statistics counters")));
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_RESETCOUNTER);
pgstat_send(&msg, sizeof(msg));
@@ -897,7 +902,7 @@ pgstat_initstats(PgStat_Info *stats, Relation rel)
*/
if (pgStatTabstatUsed >= pgStatTabstatAlloc)
{
- int newAlloc = pgStatTabstatAlloc + TABSTAT_QUANTUM;
+ int newAlloc = pgStatTabstatAlloc + TABSTAT_QUANTUM;
PgStat_MsgTabstat *newMessages;
PgStat_MsgTabstat **msgArray;
@@ -1251,7 +1256,7 @@ pgstat_main(void)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not create pipe for statistics buffer: %m")));
+ errmsg("could not create pipe for statistics buffer: %m")));
exit(1);
}
@@ -1316,7 +1321,7 @@ pgstat_main(void)
/* assume the problem is out-of-memory */
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory in statistics collector --- abort")));
+ errmsg("out of memory in statistics collector --- abort")));
exit(1);
}
@@ -1394,7 +1399,7 @@ pgstat_main(void)
continue;
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("select failed in statistics collector: %m")));
+ errmsg("select failed in statistics collector: %m")));
exit(1);
}
@@ -1436,7 +1441,7 @@ pgstat_main(void)
continue;
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not read from statistics pipe: %m")));
+ errmsg("could not read from statistics pipe: %m")));
exit(1);
}
if (len == 0) /* EOF on the pipe! */
@@ -1455,7 +1460,7 @@ pgstat_main(void)
* that we can restart both processes.
*/
ereport(LOG,
- (errmsg("invalid statistics message length")));
+ (errmsg("invalid statistics message length")));
exit(1);
}
}
@@ -1579,7 +1584,7 @@ pgstat_recvbuffer(void)
int msg_send = 0; /* next send index in buffer */
int msg_recv = 0; /* next receive index */
int msg_have = 0; /* number of bytes stored */
- struct sockaddr_storage fromaddr;
+ struct sockaddr_storage fromaddr;
int fromlen;
bool overflow = false;
@@ -1607,7 +1612,7 @@ pgstat_recvbuffer(void)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not set statistics pipe to nonblock mode: %m")));
+ errmsg("could not set statistics pipe to nonblock mode: %m")));
exit(1);
}
@@ -1619,7 +1624,7 @@ pgstat_recvbuffer(void)
{
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory in statistics collector --- abort")));
+ errmsg("out of memory in statistics collector --- abort")));
exit(1);
}
@@ -1692,13 +1697,13 @@ pgstat_recvbuffer(void)
{
fromlen = sizeof(fromaddr);
len = recvfrom(pgStatSock, (char *) &input_buffer,
- sizeof(PgStat_Msg), 0,
- (struct sockaddr *) &fromaddr, &fromlen);
+ sizeof(PgStat_Msg), 0,
+ (struct sockaddr *) &fromaddr, &fromlen);
if (len < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("failed to read statistics message: %m")));
+ errmsg("failed to read statistics message: %m")));
exit(1);
}
@@ -1887,7 +1892,7 @@ pgstat_add_backend(PgStat_MsgHdr *msg)
{
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory in statistics collector --- abort")));
+ errmsg("out of memory in statistics collector --- abort")));
exit(1);
}
@@ -1919,7 +1924,7 @@ pgstat_add_backend(PgStat_MsgHdr *msg)
/* assume the problem is out-of-memory */
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory in statistics collector --- abort")));
+ errmsg("out of memory in statistics collector --- abort")));
exit(1);
}
}
@@ -2234,7 +2239,7 @@ pgstat_read_statsfile(HTAB **dbhash, Oid onlydb,
{
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory in statistics collector --- abort")));
+ errmsg("out of memory in statistics collector --- abort")));
exit(1);
}
/* in backend, can do normal error */
@@ -2621,7 +2626,7 @@ pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len)
{
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory in statistics collector --- abort")));
+ errmsg("out of memory in statistics collector --- abort")));
exit(1);
}
@@ -2803,7 +2808,7 @@ pgstat_recv_resetcounter(PgStat_MsgResetcounter *msg, int len)
/* assume the problem is out-of-memory */
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory in statistics collector --- abort")));
+ errmsg("out of memory in statistics collector --- abort")));
exit(1);
}
}
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 3adea9ab404..9692c8898dd 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/postmaster/postmaster.c,v 1.338 2003/08/01 23:25:00 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/postmaster/postmaster.c,v 1.339 2003/08/04 00:43:21 momjian Exp $
*
* NOTES
*
@@ -169,14 +169,15 @@ int ReservedBackends;
static char *progname = (char *) NULL;
/* The socket(s) we're listening to. */
-#define MAXLISTEN 10
+#define MAXLISTEN 10
static int ListenSocket[MAXLISTEN];
/* Used to reduce macros tests */
#ifdef EXEC_BACKEND
-const bool ExecBackend = true;
+const bool ExecBackend = true;
+
#else
-const bool ExecBackend = false;
+const bool ExecBackend = false;
#endif
/*
@@ -210,15 +211,15 @@ bool LogSourcePort;
bool Log_connections = false;
bool Db_user_namespace = false;
-char *rendezvous_name;
+char *rendezvous_name;
/* For FNCTL_NONBLOCK */
#if defined(WIN32) || defined(__BEOS__)
-long ioctlsocket_ret;
+long ioctlsocket_ret;
#endif
/* list of library:init-function to be preloaded */
-char *preload_libraries_string = NULL;
+char *preload_libraries_string = NULL;
/* Startup/shutdown state */
static pid_t StartupPID = 0,
@@ -290,7 +291,8 @@ static void SignalChildren(int signal);
static int CountChildren(void);
static bool CreateOptsFile(int argc, char *argv[]);
static pid_t SSDataBase(int xlop);
-static void postmaster_error(const char *fmt,...)
+static void
+postmaster_error(const char *fmt,...)
/* This lets gcc check the format string for consistency. */
__attribute__((format(printf, 1, 2)));
@@ -327,8 +329,8 @@ checkDataDir(const char *checkdir)
else
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not read permissions of directory \"%s\": %m",
- checkdir)));
+ errmsg("could not read permissions of directory \"%s\": %m",
+ checkdir)));
}
/*
@@ -357,7 +359,7 @@ checkDataDir(const char *checkdir)
{
fprintf(stderr,
gettext("%s could not find the database system.\n"
- "Expected to find it in the PGDATA directory \"%s\",\n"
+ "Expected to find it in the PGDATA directory \"%s\",\n"
"but failed to open file \"%s\": %s\n"),
progname, checkdir, path, strerror(errno));
ExitPostmaster(2);
@@ -374,17 +376,16 @@ reg_reply(DNSServiceRegistrationReplyErrorType errorCode, void *context)
{
}
-
#endif
int
PostmasterMain(int argc, char *argv[])
{
- int opt;
- int status;
+ int opt;
+ int status;
char original_extraoptions[MAXPGPATH];
- char *potential_DataDir = NULL;
- int i;
+ char *potential_DataDir = NULL;
+ int i;
*original_extraoptions = '\0';
@@ -581,7 +582,7 @@ PostmasterMain(int argc, char *argv[])
default:
fprintf(stderr,
- gettext("Try '%s --help' for more information.\n"),
+ gettext("Try '%s --help' for more information.\n"),
progname);
ExitPostmaster(1);
}
@@ -682,8 +683,8 @@ PostmasterMain(int argc, char *argv[])
#endif
/*
- * process any libraries that should be preloaded and
- * optionally pre-initialized
+ * process any libraries that should be preloaded and optionally
+ * pre-initialized
*/
if (preload_libraries_string)
process_preload_libraries(preload_libraries_string);
@@ -725,13 +726,14 @@ PostmasterMain(int argc, char *argv[])
{
if (VirtualHost && VirtualHost[0])
{
- char *curhost, *endptr;
- char c = 0;
+ char *curhost,
+ *endptr;
+ char c = 0;
curhost = VirtualHost;
for (;;)
{
- while (*curhost == ' ') /* skip any extra spaces */
+ while (*curhost == ' ') /* skip any extra spaces */
curhost++;
if (*curhost == '\0')
break;
@@ -747,8 +749,8 @@ PostmasterMain(int argc, char *argv[])
ListenSocket, MAXLISTEN);
if (status != STATUS_OK)
ereport(LOG,
- (errmsg("could not create listen socket for \"%s\"",
- curhost)));
+ (errmsg("could not create listen socket for \"%s\"",
+ curhost)));
if (endptr)
{
*endptr = c;
@@ -766,10 +768,10 @@ PostmasterMain(int argc, char *argv[])
ListenSocket, MAXLISTEN);
if (status != STATUS_OK)
ereport(LOG,
- (errmsg("could not create TCP/IP listen socket")));
+ (errmsg("could not create TCP/IP listen socket")));
}
-#ifdef USE_RENDEZVOUS
+#ifdef USE_RENDEZVOUS
if (rendezvous_name != NULL)
{
DNSServiceRegistrationCreate(rendezvous_name,
@@ -777,7 +779,7 @@ PostmasterMain(int argc, char *argv[])
"",
htonl(PostPortNumber),
"",
- (DNSServiceRegistrationReply)reg_reply,
+ (DNSServiceRegistrationReply) reg_reply,
NULL);
}
#endif
@@ -842,8 +844,8 @@ PostmasterMain(int argc, char *argv[])
/*
* Reset whereToSendOutput from Debug (its starting state) to None.
- * This prevents ereport from sending log messages to stderr unless the
- * syslog/stderr switch permits. We don't do this until the
+ * This prevents ereport from sending log messages to stderr unless
+ * the syslog/stderr switch permits. We don't do this until the
* postmaster is fully launched, since startup failures may as well be
* reported to stderr.
*/
@@ -989,10 +991,11 @@ usage(const char *progname)
static int
ServerLoop(void)
{
- fd_set readmask;
+ fd_set readmask;
int nSockets;
- struct timeval now, later;
- struct timezone tz;
+ struct timeval now,
+ later;
+ struct timezone tz;
int i;
gettimeofday(&now, &tz);
@@ -1090,8 +1093,8 @@ ServerLoop(void)
}
/*
- * New connection pending on any of our sockets? If so,
- * fork a child process to deal with it.
+ * New connection pending on any of our sockets? If so, fork a
+ * child process to deal with it.
*/
for (i = 0; i < MAXLISTEN; i++)
{
@@ -1105,8 +1108,8 @@ ServerLoop(void)
BackendStartup(port);
/*
- * We no longer need the open socket
- * or port structure in this process
+ * We no longer need the open socket or port structure
+ * in this process
*/
StreamClose(port->sock);
ConnFree(port);
@@ -1136,7 +1139,7 @@ initMasks(fd_set *rmask)
for (i = 0; i < MAXLISTEN; i++)
{
- int fd = ListenSocket[i];
+ int fd = ListenSocket[i];
if (fd == -1)
break;
@@ -1173,7 +1176,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
{
/*
* EOF after SSLdone probably means the client didn't like our
- * response to NEGOTIATE_SSL_CODE. That's not an error condition,
+ * response to NEGOTIATE_SSL_CODE. That's not an error condition,
* so don't clutter the log with a complaint.
*/
if (!SSLdone)
@@ -1197,9 +1200,9 @@ ProcessStartupPacket(Port *port, bool SSLdone)
/*
* Allocate at least the size of an old-style startup packet, plus one
- * extra byte, and make sure all are zeroes. This ensures we will have
- * null termination of all strings, in both fixed- and variable-length
- * packet layouts.
+ * extra byte, and make sure all are zeroes. This ensures we will
+ * have null termination of all strings, in both fixed- and
+ * variable-length packet layouts.
*/
if (len <= (int32) sizeof(StartupPacket))
buf = palloc0(sizeof(StartupPacket) + 1);
@@ -1243,7 +1246,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
{
ereport(COMMERROR,
(errcode_for_socket_access(),
- errmsg("failed to send SSL negotiation response: %m")));
+ errmsg("failed to send SSL negotiation response: %m")));
return STATUS_ERROR; /* close the connection */
}
@@ -1259,41 +1262,41 @@ ProcessStartupPacket(Port *port, bool SSLdone)
/* Could add additional special packet types here */
/*
- * Set FrontendProtocol now so that ereport() knows what format to send
- * if we fail during startup.
+ * Set FrontendProtocol now so that ereport() knows what format to
+ * send if we fail during startup.
*/
FrontendProtocol = proto;
/* Check we can handle the protocol the frontend is using. */
if (PG_PROTOCOL_MAJOR(proto) < PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST) ||
- PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) ||
- (PG_PROTOCOL_MAJOR(proto) == PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) &&
- PG_PROTOCOL_MINOR(proto) > PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST)))
+ PG_PROTOCOL_MAJOR(proto) > PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) ||
+ (PG_PROTOCOL_MAJOR(proto) == PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST) &&
+ PG_PROTOCOL_MINOR(proto) > PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST)))
ereport(FATAL,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unsupported frontend protocol %u.%u: server supports %u.0 to %u.%u",
- PG_PROTOCOL_MAJOR(proto), PG_PROTOCOL_MINOR(proto),
+ PG_PROTOCOL_MAJOR(proto), PG_PROTOCOL_MINOR(proto),
PG_PROTOCOL_MAJOR(PG_PROTOCOL_EARLIEST),
PG_PROTOCOL_MAJOR(PG_PROTOCOL_LATEST),
PG_PROTOCOL_MINOR(PG_PROTOCOL_LATEST))));
/*
* Now fetch parameters out of startup packet and save them into the
- * Port structure. All data structures attached to the Port struct
+ * Port structure. All data structures attached to the Port struct
* must be allocated in TopMemoryContext so that they won't disappear
- * when we pass them to PostgresMain (see BackendFork). We need not worry
- * about leaking this storage on failure, since we aren't in the postmaster
- * process anymore.
+ * when we pass them to PostgresMain (see BackendFork). We need not
+ * worry about leaking this storage on failure, since we aren't in the
+ * postmaster process anymore.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
if (PG_PROTOCOL_MAJOR(proto) >= 3)
{
- int32 offset = sizeof(ProtocolVersion);
+ int32 offset = sizeof(ProtocolVersion);
/*
- * Scan packet body for name/option pairs. We can assume any
+ * Scan packet body for name/option pairs. We can assume any
* string beginning within the packet body is null-terminated,
* thanks to zeroing extra byte above.
*/
@@ -1301,9 +1304,9 @@ ProcessStartupPacket(Port *port, bool SSLdone)
while (offset < len)
{
- char *nameptr = ((char *) buf) + offset;
- int32 valoffset;
- char *valptr;
+ char *nameptr = ((char *) buf) + offset;
+ int32 valoffset;
+ char *valptr;
if (*nameptr == '\0')
break; /* found packet terminator */
@@ -1328,11 +1331,12 @@ ProcessStartupPacket(Port *port, bool SSLdone)
}
offset = valoffset + strlen(valptr) + 1;
}
+
/*
* If we didn't find a packet terminator exactly at the end of the
* given packet length, complain.
*/
- if (offset != len-1)
+ if (offset != len - 1)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid startup packet layout: expected terminator as last byte")));
@@ -1340,10 +1344,11 @@ ProcessStartupPacket(Port *port, bool SSLdone)
else
{
/*
- * Get the parameters from the old-style, fixed-width-fields startup
- * packet as C strings. The packet destination was cleared first so a
- * short packet has zeros silently added. We have to be prepared to
- * truncate the pstrdup result for oversize fields, though.
+ * Get the parameters from the old-style, fixed-width-fields
+ * startup packet as C strings. The packet destination was
+ * cleared first so a short packet has zeros silently added. We
+ * have to be prepared to truncate the pstrdup result for oversize
+ * fields, though.
*/
StartupPacket *packet = (StartupPacket *) buf;
@@ -1363,7 +1368,7 @@ ProcessStartupPacket(Port *port, bool SSLdone)
if (port->user_name == NULL || port->user_name[0] == '\0')
ereport(FATAL,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no PostgreSQL user name specified in startup packet")));
+ errmsg("no PostgreSQL user name specified in startup packet")));
/* The database defaults to the user name. */
if (port->database_name == NULL || port->database_name[0] == '\0')
@@ -1468,10 +1473,8 @@ processCancelRequest(Port *port, void *pkt)
return;
}
else if (ExecBackend)
- {
AttachSharedMemoryAndSemaphores();
- }
-
+
/* See if we have a matching backend */
for (curr = DLGetHead(BackendList); curr; curr = DLGetSucc(curr))
@@ -1590,7 +1593,7 @@ ConnFree(Port *conn)
void
ClosePostmasterPorts(bool pgstat_too)
{
- int i;
+ int i;
/* Close the listen sockets */
for (i = 0; i < MAXLISTEN; i++)
@@ -1639,7 +1642,7 @@ SIGHUP_handler(SIGNAL_ARGS)
if (Shutdown <= SmartShutdown)
{
ereport(LOG,
- (errmsg("received SIGHUP, reloading configuration files")));
+ (errmsg("received SIGHUP, reloading configuration files")));
ProcessConfigFile(PGC_SIGHUP);
#ifdef EXEC_BACKEND
write_nondefault_variables(PGC_SIGHUP);
@@ -1806,9 +1809,9 @@ reaper(SIGNAL_ARGS)
#endif
/*
- * Check if this child was the statistics collector. If so,
- * try to start a new one. (If fail, we'll try again in
- * future cycles of the main loop.)
+ * Check if this child was the statistics collector. If so, try to
+ * start a new one. (If fail, we'll try again in future cycles of
+ * the main loop.)
*/
if (pgstat_ispgstat(pid))
{
@@ -1883,7 +1886,7 @@ reaper(SIGNAL_ARGS)
*/
CleanupProc(pid, exitstatus);
- } /* loop over pending child-death reports */
+ } /* loop over pending child-death reports */
#endif
if (FatalError)
@@ -1895,7 +1898,7 @@ reaper(SIGNAL_ARGS)
if (DLGetHead(BackendList) || StartupPID > 0 || ShutdownPID > 0)
goto reaper_done;
ereport(LOG,
- (errmsg("all server processes terminated; reinitializing")));
+ (errmsg("all server processes terminated; reinitializing")));
shmem_exit(0);
reset_shared(PostPortNumber);
@@ -1979,11 +1982,11 @@ CleanupProc(int pid,
if (!FatalError)
{
LogChildExit(LOG,
- (pid == CheckPointPID) ? gettext("checkpoint process") :
+ (pid == CheckPointPID) ? gettext("checkpoint process") :
gettext("server process"),
pid, exitstatus);
ereport(LOG,
- (errmsg("terminating any other active server processes")));
+ (errmsg("terminating any other active server processes")));
}
curr = DLGetHead(BackendList);
@@ -2045,26 +2048,29 @@ LogChildExit(int lev, const char *procname, int pid, int exitstatus)
{
if (WIFEXITED(exitstatus))
ereport(lev,
- /*
- * translator: %s is a noun phrase describing a child process,
- * such as "server process"
- */
+
+ /*
+ * translator: %s is a noun phrase describing a child process,
+ * such as "server process"
+ */
(errmsg("%s (pid %d) exited with exit code %d",
procname, pid, WEXITSTATUS(exitstatus))));
else if (WIFSIGNALED(exitstatus))
ereport(lev,
- /*
- * translator: %s is a noun phrase describing a child process,
- * such as "server process"
- */
+
+ /*
+ * translator: %s is a noun phrase describing a child process,
+ * such as "server process"
+ */
(errmsg("%s (pid %d) was terminated by signal %d",
procname, pid, WTERMSIG(exitstatus))));
else
ereport(lev,
- /*
- * translator: %s is a noun phrase describing a child process,
- * such as "server process"
- */
+
+ /*
+ * translator: %s is a noun phrase describing a child process,
+ * such as "server process"
+ */
(errmsg("%s (pid %d) exited with unexpected status %d",
procname, pid, exitstatus)));
}
@@ -2195,7 +2201,7 @@ BackendStartup(Port *port)
free(bn);
errno = save_errno;
ereport(LOG,
- (errmsg("could not fork new process for connection: %m")));
+ (errmsg("could not fork new process for connection: %m")));
report_fork_failure_to_client(port, save_errno);
return STATUS_ERROR;
}
@@ -2284,18 +2290,19 @@ split_opts(char **argv, int *argcp, char *s)
static int
BackendFork(Port *port)
{
- char **av;
- int maxac;
- int ac;
+ char **av;
+ int maxac;
+ int ac;
char debugbuf[32];
char protobuf[32];
+
#ifdef EXEC_BACKEND
char pbuf[NAMEDATALEN + 256];
#endif
- int i;
- int status;
- struct timeval now;
- struct timezone tz;
+ int i;
+ int status;
+ struct timeval now;
+ struct timezone tz;
char remote_host[NI_MAXHOST];
char remote_port[NI_MAXSERV];
@@ -2324,8 +2331,8 @@ BackendFork(Port *port)
MyProcPid = getpid();
/*
- * Initialize libpq and enable reporting of ereport errors to the client.
- * Must do this now because authentication uses libpq to send
+ * Initialize libpq and enable reporting of ereport errors to the
+ * client. Must do this now because authentication uses libpq to send
* messages.
*/
pq_init(); /* initialize libpq to talk to client */
@@ -2350,7 +2357,7 @@ BackendFork(Port *port)
if (getnameinfo_all(&port->raddr.addr, port->raddr.salen,
remote_host, sizeof(remote_host),
remote_port, sizeof(remote_port),
- (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV))
+ (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV))
{
getnameinfo_all(&port->raddr.addr, port->raddr.salen,
remote_host, sizeof(remote_host),
@@ -2366,7 +2373,7 @@ BackendFork(Port *port)
if (LogSourcePort)
{
/* modify remote_host for use in ps status */
- char tmphost[NI_MAXHOST];
+ char tmphost[NI_MAXHOST];
snprintf(tmphost, sizeof(tmphost), "%s:%s", remote_host, remote_port);
StrNCpy(remote_host, tmphost, sizeof(remote_host));
@@ -2484,14 +2491,15 @@ BackendFork(Port *port)
*/
av[ac++] = "-p";
#ifdef EXEC_BACKEND
- Assert(UsedShmemSegID != 0 && UsedShmemSegAddr != NULL);
+ Assert(UsedShmemSegID != 0 && UsedShmemSegAddr != NULL);
/* database name at the end because it might contain commas */
snprintf(pbuf, NAMEDATALEN + 256, "%d,%d,%d,%p,%s", port->sock, canAcceptConnections(),
- UsedShmemSegID, UsedShmemSegAddr, port->database_name);
+ UsedShmemSegID, UsedShmemSegAddr, port->database_name);
av[ac++] = pbuf;
#else
av[ac++] = port->database_name;
#endif
+
/*
* Pass the (insecure) option switches from the connection request.
* (It's OK to mangle port->cmdline_options now.)
@@ -2507,7 +2515,7 @@ BackendFork(Port *port)
* Release postmaster's working memory context so that backend can
* recycle the space. Note this does not trash *MyProcPort, because
* ConnCreate() allocated that space with malloc() ... else we'd need
- * to copy the Port data here. Also, subsidiary data such as the
+ * to copy the Port data here. Also, subsidiary data such as the
* username isn't lost either; see ProcessStartupPacket().
*/
MemoryContextSwitchTo(TopMemoryContext);
@@ -2565,21 +2573,21 @@ sigusr1_handler(SIGNAL_ARGS)
if (CheckPointWarning != 0)
{
/*
- * This only times checkpoints forced by running out of
- * segment files. Other checkpoints could reduce
- * the frequency of forced checkpoints.
+ * This only times checkpoints forced by running out of
+ * segment files. Other checkpoints could reduce the
+ * frequency of forced checkpoints.
*/
- time_t now = time(NULL);
+ time_t now = time(NULL);
if (LastSignalledCheckpoint != 0)
{
- int elapsed_secs = now - LastSignalledCheckpoint;
+ int elapsed_secs = now - LastSignalledCheckpoint;
if (elapsed_secs < CheckPointWarning)
ereport(LOG,
(errmsg("checkpoints are occurring too frequently (%d seconds apart)",
elapsed_secs),
- errhint("Consider increasing CHECKPOINT_SEGMENTS.")));
+ errhint("Consider increasing CHECKPOINT_SEGMENTS.")));
}
LastSignalledCheckpoint = now;
}
@@ -2763,6 +2771,7 @@ SSDataBase(int xlop)
int ac = 0;
char nbbuf[32];
char xlbuf[32];
+
#ifdef EXEC_BACKEND
char pbuf[NAMEDATALEN + 256];
#endif
@@ -2817,10 +2826,10 @@ SSDataBase(int xlop)
av[ac++] = "-p";
#ifdef EXEC_BACKEND
- Assert(UsedShmemSegID != 0 && UsedShmemSegAddr != NULL);
+ Assert(UsedShmemSegID != 0 && UsedShmemSegAddr != NULL);
/* database name at the end because it might contain commas */
snprintf(pbuf, NAMEDATALEN + 256, "%d,%p,%s", UsedShmemSegID,
- UsedShmemSegAddr, "template1");
+ UsedShmemSegAddr, "template1");
av[ac++] = pbuf;
#else
av[ac++] = "template1";
@@ -2850,7 +2859,7 @@ SSDataBase(int xlop)
break;
case BS_XLOG_CHECKPOINT:
ereport(LOG,
- (errmsg("could not fork checkpoint process: %m")));
+ (errmsg("could not fork checkpoint process: %m")));
break;
case BS_XLOG_SHUTDOWN:
ereport(LOG,
diff --git a/src/backend/regex/regc_color.c b/src/backend/regex/regc_color.c
index eb250556822..da2b79b5f09 100644
--- a/src/backend/regex/regc_color.c
+++ b/src/backend/regex/regc_color.c
@@ -2,21 +2,21 @@
* colorings of characters
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
- *
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ *
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
- * thanks all of them.
- *
+ * thanks all of them.
+ *
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
- *
+ *
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
- *
+ *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/regc_color.c,v 1.1 2003/02/05 17:41:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/regc_color.c,v 1.2 2003/08/04 00:43:21 momjian Exp $
*
*
* Note that there are some incestuous relationships between this code and
@@ -37,8 +37,8 @@
-#define CISERR() VISERR(cm->v)
-#define CERR(e) VERR(cm->v, (e))
+#define CISERR() VISERR(cm->v)
+#define CERR(e) VERR(cm->v, (e))
@@ -46,11 +46,11 @@
* initcm - set up new colormap
*/
static void
-initcm(struct vars *v,
- struct colormap *cm)
+initcm(struct vars * v,
+ struct colormap * cm)
{
- int i;
- int j;
+ int i;
+ int j;
union tree *t;
union tree *nextt;
struct colordesc *cd;
@@ -63,21 +63,22 @@ initcm(struct vars *v,
cm->max = 0;
cm->free = 0;
- cd = cm->cd; /* cm->cd[WHITE] */
+ cd = cm->cd; /* cm->cd[WHITE] */
cd->sub = NOSUB;
cd->arcs = NULL;
cd->flags = 0;
cd->nchrs = CHR_MAX - CHR_MIN + 1;
/* upper levels of tree */
- for (t = &cm->tree[0], j = NBYTS-1; j > 0; t = nextt, j--) {
+ for (t = &cm->tree[0], j = NBYTS - 1; j > 0; t = nextt, j--)
+ {
nextt = t + 1;
- for (i = BYTTAB-1; i >= 0; i--)
+ for (i = BYTTAB - 1; i >= 0; i--)
t->tptr[i] = nextt;
}
/* bottom level is solid white */
- t = &cm->tree[NBYTS-1];
- for (i = BYTTAB-1; i >= 0; i--)
+ t = &cm->tree[NBYTS - 1];
+ for (i = BYTTAB - 1; i >= 0; i--)
t->tcolor[i] = WHITE;
cd->block = t;
}
@@ -86,16 +87,17 @@ initcm(struct vars *v,
* freecm - free dynamically-allocated things in a colormap
*/
static void
-freecm(struct colormap *cm)
+freecm(struct colormap * cm)
{
- size_t i;
+ size_t i;
union tree *cb;
cm->magic = 0;
if (NBYTS > 1)
cmtreefree(cm, cm->tree, 0);
for (i = 1; i <= cm->max; i++) /* skip WHITE */
- if (!UNUSEDCOLOR(&cm->cd[i])) {
+ if (!UNUSEDCOLOR(&cm->cd[i]))
+ {
cb = cm->cd[i].block;
if (cb != NULL)
FREE(cb);
@@ -108,24 +110,29 @@ freecm(struct colormap *cm)
* cmtreefree - free a non-terminal part of a colormap tree
*/
static void
-cmtreefree(struct colormap *cm,
- union tree *tree,
+cmtreefree(struct colormap * cm,
+ union tree * tree,
int level) /* level number (top == 0) of this block */
{
- int i;
+ int i;
union tree *t;
- union tree *fillt = &cm->tree[level+1];
+ union tree *fillt = &cm->tree[level + 1];
union tree *cb;
- assert(level < NBYTS-1); /* this level has pointers */
- for (i = BYTTAB-1; i >= 0; i--) {
+ assert(level < NBYTS - 1); /* this level has pointers */
+ for (i = BYTTAB - 1; i >= 0; i--)
+ {
t = tree->tptr[i];
assert(t != NULL);
- if (t != fillt) {
- if (level < NBYTS-2) { /* more pointer blocks below */
- cmtreefree(cm, t, level+1);
+ if (t != fillt)
+ {
+ if (level < NBYTS - 2)
+ { /* more pointer blocks below */
+ cmtreefree(cm, t, level + 1);
FREE(t);
- } else { /* color block below */
+ }
+ else
+ { /* color block below */
cb = cm->cd[t->tcolor[0]].block;
if (t != cb) /* not a solid block */
FREE(t);
@@ -137,22 +144,22 @@ cmtreefree(struct colormap *cm,
/*
* setcolor - set the color of a character in a colormap
*/
-static color /* previous color */
-setcolor(struct colormap *cm,
+static color /* previous color */
+setcolor(struct colormap * cm,
chr c,
pcolor co)
{
- uchr uc = c;
- int shift;
- int level;
- int b;
- int bottom;
+ uchr uc = c;
+ int shift;
+ int level;
+ int b;
+ int bottom;
union tree *t;
union tree *newt;
union tree *fillt;
union tree *lastt;
union tree *cb;
- color prev;
+ color prev;
assert(cm->magic == CMMAGIC);
if (CISERR() || co == COLORLESS)
@@ -160,27 +167,30 @@ setcolor(struct colormap *cm,
t = cm->tree;
for (level = 0, shift = BYTBITS * (NBYTS - 1); shift > 0;
- level++, shift -= BYTBITS) {
+ level++, shift -= BYTBITS)
+ {
b = (uc >> shift) & BYTMASK;
lastt = t;
t = lastt->tptr[b];
assert(t != NULL);
- fillt = &cm->tree[level+1];
+ fillt = &cm->tree[level + 1];
bottom = (shift <= BYTBITS) ? 1 : 0;
cb = (bottom) ? cm->cd[t->tcolor[0]].block : fillt;
- if (t == fillt || t == cb) { /* must allocate a new block */
- newt = (union tree *)MALLOC((bottom) ?
- sizeof(struct colors) : sizeof(struct ptrs));
- if (newt == NULL) {
+ if (t == fillt || t == cb)
+ { /* must allocate a new block */
+ newt = (union tree *) MALLOC((bottom) ?
+ sizeof(struct colors) : sizeof(struct ptrs));
+ if (newt == NULL)
+ {
CERR(REG_ESPACE);
return COLORLESS;
}
if (bottom)
memcpy(VS(newt->tcolor), VS(t->tcolor),
- BYTTAB*sizeof(color));
+ BYTTAB * sizeof(color));
else
memcpy(VS(newt->tptr), VS(t->tptr),
- BYTTAB*sizeof(union tree *));
+ BYTTAB * sizeof(union tree *));
t = newt;
lastt->tptr[b] = t;
}
@@ -188,7 +198,7 @@ setcolor(struct colormap *cm,
b = uc & BYTMASK;
prev = t->tcolor[b];
- t->tcolor[b] = (color)co;
+ t->tcolor[b] = (color) co;
return prev;
}
@@ -196,51 +206,59 @@ setcolor(struct colormap *cm,
* maxcolor - report largest color number in use
*/
static color
-maxcolor(struct colormap *cm)
+maxcolor(struct colormap * cm)
{
if (CISERR())
return COLORLESS;
- return (color)cm->max;
+ return (color) cm->max;
}
/*
* newcolor - find a new color (must be subject of setcolor at once)
- * Beware: may relocate the colordescs.
+ * Beware: may relocate the colordescs.
*/
-static color /* COLORLESS for error */
-newcolor(struct colormap *cm)
+static color /* COLORLESS for error */
+newcolor(struct colormap * cm)
{
struct colordesc *cd;
struct colordesc *new;
- size_t n;
+ size_t n;
if (CISERR())
return COLORLESS;
- if (cm->free != 0) {
+ if (cm->free != 0)
+ {
assert(cm->free > 0);
- assert((size_t)cm->free < cm->ncds);
+ assert((size_t) cm->free < cm->ncds);
cd = &cm->cd[cm->free];
assert(UNUSEDCOLOR(cd));
assert(cd->arcs == NULL);
cm->free = cd->sub;
- } else if (cm->max < cm->ncds - 1) {
+ }
+ else if (cm->max < cm->ncds - 1)
+ {
cm->max++;
cd = &cm->cd[cm->max];
- } else {
+ }
+ else
+ {
/* oops, must allocate more */
n = cm->ncds * 2;
- if (cm->cd == cm->cdspace) {
- new = (struct colordesc *)MALLOC(n *
- sizeof(struct colordesc));
+ if (cm->cd == cm->cdspace)
+ {
+ new = (struct colordesc *) MALLOC(n *
+ sizeof(struct colordesc));
if (new != NULL)
memcpy(VS(new), VS(cm->cdspace), cm->ncds *
- sizeof(struct colordesc));
- } else
- new = (struct colordesc *)REALLOC(cm->cd,
- n * sizeof(struct colordesc));
- if (new == NULL) {
+ sizeof(struct colordesc));
+ }
+ else
+ new = (struct colordesc *) REALLOC(cm->cd,
+ n * sizeof(struct colordesc));
+ if (new == NULL)
+ {
CERR(REG_ESPACE);
return COLORLESS;
}
@@ -257,18 +275,19 @@ newcolor(struct colormap *cm)
cd->flags = 0;
cd->block = NULL;
- return (color)(cd - cm->cd);
+ return (color) (cd - cm->cd);
}
/*
* freecolor - free a color (must have no arcs or subcolor)
*/
static void
-freecolor(struct colormap *cm,
+freecolor(struct colormap * cm,
pcolor co)
{
struct colordesc *cd = &cm->cd[co];
- color pco, nco; /* for freelist scan */
+ color pco,
+ nco; /* for freelist scan */
assert(co >= 0);
if (co == WHITE)
@@ -278,35 +297,43 @@ freecolor(struct colormap *cm,
assert(cd->sub == NOSUB);
assert(cd->nchrs == 0);
cd->flags = FREECOL;
- if (cd->block != NULL) {
+ if (cd->block != NULL)
+ {
FREE(cd->block);
- cd->block = NULL; /* just paranoia */
+ cd->block = NULL; /* just paranoia */
}
- if ((size_t)co == cm->max) {
+ if ((size_t) co == cm->max)
+ {
while (cm->max > WHITE && UNUSEDCOLOR(&cm->cd[cm->max]))
cm->max--;
assert(cm->free >= 0);
- while ((size_t)cm->free > cm->max)
+ while ((size_t) cm->free > cm->max)
cm->free = cm->cd[cm->free].sub;
- if (cm->free > 0) {
+ if (cm->free > 0)
+ {
assert(cm->free < cm->max);
pco = cm->free;
nco = cm->cd[pco].sub;
while (nco > 0)
- if ((size_t)nco > cm->max) {
+ if ((size_t) nco > cm->max)
+ {
/* take this one out of freelist */
nco = cm->cd[nco].sub;
cm->cd[pco].sub = nco;
- } else {
+ }
+ else
+ {
assert(nco < cm->max);
pco = nco;
nco = cm->cd[pco].sub;
}
}
- } else {
+ }
+ else
+ {
cd->sub = cm->free;
- cm->free = (color)(cd - cm->cd);
+ cm->free = (color) (cd - cm->cd);
}
}
@@ -314,9 +341,9 @@ freecolor(struct colormap *cm,
* pseudocolor - allocate a false color, to be managed by other means
*/
static color
-pseudocolor(struct colormap *cm)
+pseudocolor(struct colormap * cm)
{
- color co;
+ color co;
co = newcolor(cm);
if (CISERR())
@@ -330,10 +357,10 @@ pseudocolor(struct colormap *cm)
* subcolor - allocate a new subcolor (if necessary) to this chr
*/
static color
-subcolor(struct colormap *cm, chr c)
+subcolor(struct colormap * cm, chr c)
{
- color co; /* current color of c */
- color sco; /* new subcolor */
+ color co; /* current color of c */
+ color sco; /* new subcolor */
co = GETCOLOR(cm, c);
sco = newsub(cm, co);
@@ -341,8 +368,8 @@ subcolor(struct colormap *cm, chr c)
return COLORLESS;
assert(sco != COLORLESS);
- if (co == sco) /* already in an open subcolor */
- return co; /* rest is redundant */
+ if (co == sco) /* already in an open subcolor */
+ return co; /* rest is redundant */
cm->cd[co].nchrs--;
cm->cd[sco].nchrs++;
setcolor(cm, c, sco);
@@ -353,17 +380,19 @@ subcolor(struct colormap *cm, chr c)
* newsub - allocate a new subcolor (if necessary) for a color
*/
static color
-newsub(struct colormap *cm,
+newsub(struct colormap * cm,
pcolor co)
{
- color sco; /* new subcolor */
+ color sco; /* new subcolor */
sco = cm->cd[co].sub;
- if (sco == NOSUB) { /* color has no open subcolor */
- if (cm->cd[co].nchrs == 1) /* optimization */
+ if (sco == NOSUB)
+ { /* color has no open subcolor */
+ if (cm->cd[co].nchrs == 1) /* optimization */
return co;
- sco = newcolor(cm); /* must create subcolor */
- if (sco == COLORLESS) {
+ sco = newcolor(cm); /* must create subcolor */
+ if (sco == COLORLESS)
+ {
assert(CISERR());
return COLORLESS;
}
@@ -379,23 +408,23 @@ newsub(struct colormap *cm,
* subrange - allocate new subcolors to this range of chrs, fill in arcs
*/
static void
-subrange(struct vars *v,
+subrange(struct vars * v,
chr from,
chr to,
- struct state *lp,
- struct state *rp)
+ struct state * lp,
+ struct state * rp)
{
- uchr uf;
- int i;
+ uchr uf;
+ int i;
assert(from <= to);
/* first, align "from" on a tree-block boundary */
- uf = (uchr)from;
- i = (int)( ((uf + BYTTAB-1) & (uchr)~BYTMASK) - uf );
+ uf = (uchr) from;
+ i = (int) (((uf + BYTTAB - 1) & (uchr) ~ BYTMASK) - uf);
for (; from <= to && i > 0; i--, from++)
newarc(v->nfa, PLAIN, subcolor(v->cm, from), lp, rp);
- if (from > to) /* didn't reach a boundary */
+ if (from > to) /* didn't reach a boundary */
return;
/* deal with whole blocks */
@@ -411,25 +440,25 @@ subrange(struct vars *v,
* subblock - allocate new subcolors for one tree block of chrs, fill in arcs
*/
static void
-subblock(struct vars *v,
+subblock(struct vars * v,
chr start, /* first of BYTTAB chrs */
- struct state *lp,
- struct state *rp)
+ struct state * lp,
+ struct state * rp)
{
- uchr uc = start;
+ uchr uc = start;
struct colormap *cm = v->cm;
- int shift;
- int level;
- int i;
- int b;
+ int shift;
+ int level;
+ int i;
+ int b;
union tree *t;
union tree *cb;
union tree *fillt;
union tree *lastt;
- int previ;
- int ndone;
- color co;
- color sco;
+ int previ;
+ int ndone;
+ color co;
+ color sco;
assert((uc % BYTTAB) == 0);
@@ -437,20 +466,23 @@ subblock(struct vars *v,
t = cm->tree;
fillt = NULL;
for (level = 0, shift = BYTBITS * (NBYTS - 1); shift > 0;
- level++, shift -= BYTBITS) {
+ level++, shift -= BYTBITS)
+ {
b = (uc >> shift) & BYTMASK;
lastt = t;
t = lastt->tptr[b];
assert(t != NULL);
- fillt = &cm->tree[level+1];
- if (t == fillt && shift > BYTBITS) { /* need new ptr block */
- t = (union tree *)MALLOC(sizeof(struct ptrs));
- if (t == NULL) {
+ fillt = &cm->tree[level + 1];
+ if (t == fillt && shift > BYTBITS)
+ { /* need new ptr block */
+ t = (union tree *) MALLOC(sizeof(struct ptrs));
+ if (t == NULL)
+ {
CERR(REG_ESPACE);
return;
}
memcpy(VS(t->tptr), VS(fillt->tptr),
- BYTTAB*sizeof(union tree *));
+ BYTTAB * sizeof(union tree *));
lastt->tptr[b] = t;
}
}
@@ -458,13 +490,16 @@ subblock(struct vars *v,
/* special cases: fill block or solid block */
co = t->tcolor[0];
cb = cm->cd[co].block;
- if (t == fillt || t == cb) {
+ if (t == fillt || t == cb)
+ {
/* either way, we want a subcolor solid block */
sco = newsub(cm, co);
t = cm->cd[sco].block;
- if (t == NULL) { /* must set it up */
- t = (union tree *)MALLOC(sizeof(struct colors));
- if (t == NULL) {
+ if (t == NULL)
+ { /* must set it up */
+ t = (union tree *) MALLOC(sizeof(struct colors));
+ if (t == NULL)
+ {
CERR(REG_ESPACE);
return;
}
@@ -482,12 +517,14 @@ subblock(struct vars *v,
/* general case, a mixed block to be altered */
i = 0;
- while (i < BYTTAB) {
+ while (i < BYTTAB)
+ {
co = t->tcolor[i];
sco = newsub(cm, co);
newarc(v->nfa, PLAIN, sco, lp, rp);
previ = i;
- do {
+ do
+ {
t->tcolor[i++] = sco;
} while (i < BYTTAB && t->tcolor[i] == co);
ndone = i - previ;
@@ -500,30 +537,37 @@ subblock(struct vars *v,
* okcolors - promote subcolors to full colors
*/
static void
-okcolors(struct nfa *nfa,
- struct colormap *cm)
+okcolors(struct nfa * nfa,
+ struct colormap * cm)
{
struct colordesc *cd;
struct colordesc *end = CDEND(cm);
struct colordesc *scd;
struct arc *a;
- color co;
- color sco;
+ color co;
+ color sco;
- for (cd = cm->cd, co = 0; cd < end; cd++, co++) {
+ for (cd = cm->cd, co = 0; cd < end; cd++, co++)
+ {
sco = cd->sub;
- if (UNUSEDCOLOR(cd) || sco == NOSUB) {
+ if (UNUSEDCOLOR(cd) || sco == NOSUB)
+ {
/* has no subcolor, no further action */
- } else if (sco == co) {
+ }
+ else if (sco == co)
+ {
/* is subcolor, let parent deal with it */
- } else if (cd->nchrs == 0) {
+ }
+ else if (cd->nchrs == 0)
+ {
/* parent empty, its arcs change color to subcolor */
cd->sub = NOSUB;
scd = &cm->cd[sco];
assert(scd->nchrs > 0);
assert(scd->sub == sco);
scd->sub = NOSUB;
- while ((a = cd->arcs) != NULL) {
+ while ((a = cd->arcs) != NULL)
+ {
assert(a->co == co);
/* uncolorchain(cm, a); */
cd->arcs = a->colorchain;
@@ -533,14 +577,17 @@ okcolors(struct nfa *nfa,
scd->arcs = a;
}
freecolor(cm, co);
- } else {
+ }
+ else
+ {
/* parent's arcs must gain parallel subcolor arcs */
cd->sub = NOSUB;
scd = &cm->cd[sco];
assert(scd->nchrs > 0);
assert(scd->sub == sco);
scd->sub = NOSUB;
- for (a = cd->arcs; a != NULL; a = a->colorchain) {
+ for (a = cd->arcs; a != NULL; a = a->colorchain)
+ {
assert(a->co == co);
newarc(nfa, a->type, sco, a->from, a->to);
}
@@ -552,8 +599,8 @@ okcolors(struct nfa *nfa,
* colorchain - add this arc to the color chain of its color
*/
static void
-colorchain(struct colormap *cm,
- struct arc *a)
+colorchain(struct colormap * cm,
+ struct arc * a)
{
struct colordesc *cd = &cm->cd[a->co];
@@ -565,32 +612,33 @@ colorchain(struct colormap *cm,
* uncolorchain - delete this arc from the color chain of its color
*/
static void
-uncolorchain(struct colormap *cm,
- struct arc *a)
+uncolorchain(struct colormap * cm,
+ struct arc * a)
{
struct colordesc *cd = &cm->cd[a->co];
struct arc *aa;
aa = cd->arcs;
- if (aa == a) /* easy case */
+ if (aa == a) /* easy case */
cd->arcs = a->colorchain;
- else {
+ else
+ {
for (; aa != NULL && aa->colorchain != a; aa = aa->colorchain)
continue;
assert(aa != NULL);
aa->colorchain = a->colorchain;
}
- a->colorchain = NULL; /* paranoia */
+ a->colorchain = NULL; /* paranoia */
}
/*
* singleton - is this character in its own color?
*/
-static int /* predicate */
-singleton(struct colormap *cm,
+static int /* predicate */
+singleton(struct colormap * cm,
chr c)
{
- color co; /* color of c */
+ color co; /* color of c */
co = GETCOLOR(cm, c);
if (cm->cd[co].nchrs == 1 && cm->cd[co].sub == NOSUB)
@@ -602,20 +650,20 @@ singleton(struct colormap *cm,
* rainbow - add arcs of all full colors (but one) between specified states
*/
static void
-rainbow(struct nfa *nfa,
- struct colormap *cm,
+rainbow(struct nfa * nfa,
+ struct colormap * cm,
int type,
pcolor but, /* COLORLESS if no exceptions */
- struct state *from,
- struct state *to)
+ struct state * from,
+ struct state * to)
{
struct colordesc *cd;
struct colordesc *end = CDEND(cm);
- color co;
+ color co;
for (cd = cm->cd, co = 0; cd < end && !CISERR(); cd++, co++)
if (!UNUSEDCOLOR(cd) && cd->sub != co && co != but &&
- !(cd->flags&PSEUDO))
+ !(cd->flags & PSEUDO))
newarc(nfa, type, co, from, to);
}
@@ -625,20 +673,21 @@ rainbow(struct nfa *nfa,
* The calling sequence ought to be reconciled with cloneouts().
*/
static void
-colorcomplement(struct nfa *nfa,
- struct colormap *cm,
+colorcomplement(struct nfa * nfa,
+ struct colormap * cm,
int type,
- struct state *of, /* complements of this guy's PLAIN outarcs */
- struct state *from,
- struct state *to)
+ struct state * of, /* complements of this guy's PLAIN
+ * outarcs */
+ struct state * from,
+ struct state * to)
{
struct colordesc *cd;
struct colordesc *end = CDEND(cm);
- color co;
+ color co;
assert(of != from);
for (cd = cm->cd, co = 0; cd < end && !CISERR(); cd++, co++)
- if (!UNUSEDCOLOR(cd) && !(cd->flags&PSEUDO))
+ if (!UNUSEDCOLOR(cd) && !(cd->flags & PSEUDO))
if (findarc(of, PLAIN, co) == NULL)
newarc(nfa, type, co, from, to);
}
@@ -650,28 +699,29 @@ colorcomplement(struct nfa *nfa,
* dumpcolors - debugging output
*/
static void
-dumpcolors(struct colormap *cm,
+dumpcolors(struct colormap * cm,
FILE *f)
{
struct colordesc *cd;
struct colordesc *end;
- color co;
- chr c;
- char *has;
+ color co;
+ chr c;
+ char *has;
- fprintf(f, "max %ld\n", (long)cm->max);
+ fprintf(f, "max %ld\n", (long) cm->max);
if (NBYTS > 1)
fillcheck(cm, cm->tree, 0, f);
end = CDEND(cm);
- for (cd = cm->cd + 1, co = 1; cd < end; cd++, co++) /* skip 0 */
- if (!UNUSEDCOLOR(cd)) {
+ for (cd = cm->cd + 1, co = 1; cd < end; cd++, co++) /* skip 0 */
+ if (!UNUSEDCOLOR(cd))
+ {
assert(cd->nchrs > 0);
has = (cd->block != NULL) ? "#" : "";
- if (cd->flags&PSEUDO)
- fprintf(f, "#%2ld%s(ps): ", (long)co, has);
+ if (cd->flags & PSEUDO)
+ fprintf(f, "#%2ld%s(ps): ", (long) co, has);
else
- fprintf(f, "#%2ld%s(%2d): ", (long)co,
- has, cd->nchrs);
+ fprintf(f, "#%2ld%s(%2d): ", (long) co,
+ has, cd->nchrs);
/* it's hard to do this more efficiently */
for (c = CHR_MIN; c < CHR_MAX; c++)
if (GETCOLOR(cm, c) == co)
@@ -687,24 +737,26 @@ dumpcolors(struct colormap *cm,
* fillcheck - check proper filling of a tree
*/
static void
-fillcheck(struct colormap *cm,
- union tree *tree,
+fillcheck(struct colormap * cm,
+ union tree * tree,
int level, /* level number (top == 0) of this block */
FILE *f)
{
- int i;
+ int i;
union tree *t;
- union tree *fillt = &cm->tree[level+1];
+ union tree *fillt = &cm->tree[level + 1];
- assert(level < NBYTS-1); /* this level has pointers */
- for (i = BYTTAB-1; i >= 0; i--) {
+ assert(level < NBYTS - 1); /* this level has pointers */
+ for (i = BYTTAB - 1; i >= 0; i--)
+ {
t = tree->tptr[i];
if (t == NULL)
fprintf(f, "NULL found in filled tree!\n");
else if (t == fillt)
- {}
- else if (level < NBYTS-2) /* more pointer blocks below */
- fillcheck(cm, t, level+1, f);
+ {
+ }
+ else if (level < NBYTS - 2) /* more pointer blocks below */
+ fillcheck(cm, t, level + 1, f);
}
}
@@ -720,9 +772,9 @@ dumpchr(chr c,
if (c == '\\')
fprintf(f, "\\\\");
else if (c > ' ' && c <= '~')
- putc((char)c, f);
+ putc((char) c, f);
else
- fprintf(f, "\\u%04lx", (long)c);
+ fprintf(f, "\\u%04lx", (long) c);
}
-#endif /* REG_DEBUG */
+#endif /* REG_DEBUG */
diff --git a/src/backend/regex/regc_cvec.c b/src/backend/regex/regc_cvec.c
index 3b4e6ddb61b..502bbeeca7e 100644
--- a/src/backend/regex/regc_cvec.c
+++ b/src/backend/regex/regc_cvec.c
@@ -2,21 +2,21 @@
* Utility functions for handling cvecs
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
- *
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ *
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
- * thanks all of them.
- *
+ * thanks all of them.
+ *
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
- *
+ *
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
- *
+ *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/regc_cvec.c,v 1.1 2003/02/05 17:41:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/regc_cvec.c,v 1.2 2003/08/04 00:43:21 momjian Exp $
*
*/
@@ -40,23 +40,24 @@ newcvec(int nchrs, /* to hold this many chrs... */
int nranges, /* ... and this many ranges... */
int nmcces) /* ... and this many MCCEs */
{
- size_t n;
- size_t nc;
- struct cvec *cv;
-
- nc = (size_t)nchrs + (size_t)nmcces*(MAXMCCE+1) + (size_t)nranges*2;
- n = sizeof(struct cvec) + (size_t)(nmcces-1)*sizeof(chr *)
- + nc*sizeof(chr);
- cv = (struct cvec *)MALLOC(n);
- if (cv == NULL) {
- return NULL;
- }
- cv->chrspace = nchrs;
- cv->chrs = (chr *)&cv->mcces[nmcces]; /* chrs just after MCCE ptrs */
- cv->mccespace = nmcces;
- cv->ranges = cv->chrs + nchrs + nmcces*(MAXMCCE+1);
- cv->rangespace = nranges;
- return clearcvec(cv);
+ size_t n;
+ size_t nc;
+ struct cvec *cv;
+
+ nc = (size_t) nchrs + (size_t) nmcces *(MAXMCCE + 1) + (size_t) nranges *2;
+
+ n = sizeof(struct cvec) + (size_t) (nmcces - 1) * sizeof(chr *)
+ + nc * sizeof(chr);
+ cv = (struct cvec *) MALLOC(n);
+ if (cv == NULL)
+ return NULL;
+ cv->chrspace = nchrs;
+ cv->chrs = (chr *) & cv->mcces[nmcces]; /* chrs just after MCCE
+ * ptrs */
+ cv->mccespace = nmcces;
+ cv->ranges = cv->chrs + nchrs + nmcces * (MAXMCCE + 1);
+ cv->rangespace = nranges;
+ return clearcvec(cv);
}
/*
@@ -64,131 +65,125 @@ newcvec(int nchrs, /* to hold this many chrs... */
* Returns pointer as convenience.
*/
static struct cvec *
-clearcvec(struct cvec *cv)
+clearcvec(struct cvec * cv)
{
- int i;
-
- assert(cv != NULL);
- cv->nchrs = 0;
- assert(cv->chrs == (chr *)&cv->mcces[cv->mccespace]);
- cv->nmcces = 0;
- cv->nmccechrs = 0;
- cv->nranges = 0;
- for (i = 0; i < cv->mccespace; i++) {
- cv->mcces[i] = NULL;
- }
-
- return cv;
+ int i;
+
+ assert(cv != NULL);
+ cv->nchrs = 0;
+ assert(cv->chrs == (chr *) & cv->mcces[cv->mccespace]);
+ cv->nmcces = 0;
+ cv->nmccechrs = 0;
+ cv->nranges = 0;
+ for (i = 0; i < cv->mccespace; i++)
+ cv->mcces[i] = NULL;
+
+ return cv;
}
/*
* addchr - add a chr to a cvec
*/
static void
-addchr(struct cvec *cv, /* character vector */
- chr c) /* character to add */
+addchr(struct cvec * cv, /* character vector */
+ chr c) /* character to add */
{
- assert(cv->nchrs < cv->chrspace - cv->nmccechrs);
- cv->chrs[cv->nchrs++] = (chr)c;
+ assert(cv->nchrs < cv->chrspace - cv->nmccechrs);
+ cv->chrs[cv->nchrs++] = (chr) c;
}
/*
* addrange - add a range to a cvec
*/
static void
-addrange(struct cvec *cv, /* character vector */
+addrange(struct cvec * cv, /* character vector */
chr from, /* first character of range */
chr to) /* last character of range */
{
- assert(cv->nranges < cv->rangespace);
- cv->ranges[cv->nranges*2] = (chr)from;
- cv->ranges[cv->nranges*2 + 1] = (chr)to;
- cv->nranges++;
+ assert(cv->nranges < cv->rangespace);
+ cv->ranges[cv->nranges * 2] = (chr) from;
+ cv->ranges[cv->nranges * 2 + 1] = (chr) to;
+ cv->nranges++;
}
/*
* addmcce - add an MCCE to a cvec
*/
static void
-addmcce(struct cvec *cv, /* character vector */
- chr *startp, /* beginning of text */
- chr *endp) /* just past end of text */
+addmcce(struct cvec * cv, /* character vector */
+ chr * startp, /* beginning of text */
+ chr * endp) /* just past end of text */
{
- int len;
- int i;
- chr *s;
- chr *d;
-
- if (startp == NULL && endp == NULL) {
- return;
- }
- len = endp - startp;
- assert(len > 0);
- assert(cv->nchrs + len < cv->chrspace - cv->nmccechrs);
- assert(cv->nmcces < cv->mccespace);
- d = &cv->chrs[cv->chrspace - cv->nmccechrs - len - 1];
- cv->mcces[cv->nmcces++] = d;
- for (s = startp, i = len; i > 0; s++, i--) {
- *d++ = *s;
- }
- *d++ = 0; /* endmarker */
- assert(d == &cv->chrs[cv->chrspace - cv->nmccechrs]);
- cv->nmccechrs += len + 1;
+ int len;
+ int i;
+ chr *s;
+ chr *d;
+
+ if (startp == NULL && endp == NULL)
+ return;
+ len = endp - startp;
+ assert(len > 0);
+ assert(cv->nchrs + len < cv->chrspace - cv->nmccechrs);
+ assert(cv->nmcces < cv->mccespace);
+ d = &cv->chrs[cv->chrspace - cv->nmccechrs - len - 1];
+ cv->mcces[cv->nmcces++] = d;
+ for (s = startp, i = len; i > 0; s++, i--)
+ *d++ = *s;
+ *d++ = 0; /* endmarker */
+ assert(d == &cv->chrs[cv->chrspace - cv->nmccechrs]);
+ cv->nmccechrs += len + 1;
}
/*
* haschr - does a cvec contain this chr?
*/
-static int /* predicate */
-haschr(struct cvec *cv, /* character vector */
- chr c) /* character to test for */
+static int /* predicate */
+haschr(struct cvec * cv, /* character vector */
+ chr c) /* character to test for */
{
- int i;
- chr *p;
+ int i;
+ chr *p;
- for (p = cv->chrs, i = cv->nchrs; i > 0; p++, i--) {
- if (*p == c) {
- return 1;
+ for (p = cv->chrs, i = cv->nchrs; i > 0; p++, i--)
+ {
+ if (*p == c)
+ return 1;
}
- }
- for (p = cv->ranges, i = cv->nranges; i > 0; p += 2, i--) {
- if ((*p <= c) && (c <= *(p+1))) {
- return 1;
+ for (p = cv->ranges, i = cv->nranges; i > 0; p += 2, i--)
+ {
+ if ((*p <= c) && (c <= *(p + 1)))
+ return 1;
}
- }
- return 0;
+ return 0;
}
/*
* getcvec - get a cvec, remembering it as v->cv
*/
static struct cvec *
-getcvec(struct vars *v, /* context */
+getcvec(struct vars * v, /* context */
int nchrs, /* to hold this many chrs... */
int nranges, /* ... and this many ranges... */
int nmcces) /* ... and this many MCCEs */
{
- if (v->cv != NULL && nchrs <= v->cv->chrspace &&
- nranges <= v->cv->rangespace && nmcces <= v->cv->mccespace) {
- return clearcvec(v->cv);
- }
-
- if (v->cv != NULL) {
- freecvec(v->cv);
- }
- v->cv = newcvec(nchrs, nranges, nmcces);
- if (v->cv == NULL) {
- ERR(REG_ESPACE);
- }
-
- return v->cv;
+ if (v->cv != NULL && nchrs <= v->cv->chrspace &&
+ nranges <= v->cv->rangespace && nmcces <= v->cv->mccespace)
+ return clearcvec(v->cv);
+
+ if (v->cv != NULL)
+ freecvec(v->cv);
+ v->cv = newcvec(nchrs, nranges, nmcces);
+ if (v->cv == NULL)
+ ERR(REG_ESPACE);
+
+ return v->cv;
}
/*
* freecvec - free a cvec
*/
static void
-freecvec(struct cvec *cv)
+freecvec(struct cvec * cv)
{
- FREE(cv);
+ FREE(cv);
}
diff --git a/src/backend/regex/regc_lex.c b/src/backend/regex/regc_lex.c
index 2f1a5840ff2..2407e06ef47 100644
--- a/src/backend/regex/regc_lex.c
+++ b/src/backend/regex/regc_lex.c
@@ -2,21 +2,21 @@
* lexical analyzer
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
- *
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ *
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
- * thanks all of them.
- *
+ * thanks all of them.
+ *
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
- *
+ *
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
- *
+ *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -28,159 +28,168 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/regc_lex.c,v 1.1 2003/02/05 17:41:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/regc_lex.c,v 1.2 2003/08/04 00:43:21 momjian Exp $
*
*/
/* scanning macros (know about v) */
-#define ATEOS() (v->now >= v->stop)
-#define HAVE(n) (v->stop - v->now >= (n))
-#define NEXT1(c) (!ATEOS() && *v->now == CHR(c))
-#define NEXT2(a,b) (HAVE(2) && *v->now == CHR(a) && *(v->now+1) == CHR(b))
-#define NEXT3(a,b,c) (HAVE(3) && *v->now == CHR(a) && \
+#define ATEOS() (v->now >= v->stop)
+#define HAVE(n) (v->stop - v->now >= (n))
+#define NEXT1(c) (!ATEOS() && *v->now == CHR(c))
+#define NEXT2(a,b) (HAVE(2) && *v->now == CHR(a) && *(v->now+1) == CHR(b))
+#define NEXT3(a,b,c) (HAVE(3) && *v->now == CHR(a) && \
*(v->now+1) == CHR(b) && \
*(v->now+2) == CHR(c))
-#define SET(c) (v->nexttype = (c))
-#define SETV(c, n) (v->nexttype = (c), v->nextvalue = (n))
-#define RET(c) return (SET(c), 1)
-#define RETV(c, n) return (SETV(c, n), 1)
-#define FAILW(e) return (ERR(e), 0) /* ERR does SET(EOS) */
-#define LASTTYPE(t) (v->lasttype == (t))
+#define SET(c) (v->nexttype = (c))
+#define SETV(c, n) (v->nexttype = (c), v->nextvalue = (n))
+#define RET(c) return (SET(c), 1)
+#define RETV(c, n) return (SETV(c, n), 1)
+#define FAILW(e) return (ERR(e), 0) /* ERR does SET(EOS) */
+#define LASTTYPE(t) (v->lasttype == (t))
/* lexical contexts */
-#define L_ERE 1 /* mainline ERE/ARE */
-#define L_BRE 2 /* mainline BRE */
-#define L_Q 3 /* REG_QUOTE */
-#define L_EBND 4 /* ERE/ARE bound */
-#define L_BBND 5 /* BRE bound */
-#define L_BRACK 6 /* brackets */
-#define L_CEL 7 /* collating element */
-#define L_ECL 8 /* equivalence class */
-#define L_CCL 9 /* character class */
-#define INTOCON(c) (v->lexcon = (c))
-#define INCON(con) (v->lexcon == (con))
+#define L_ERE 1 /* mainline ERE/ARE */
+#define L_BRE 2 /* mainline BRE */
+#define L_Q 3 /* REG_QUOTE */
+#define L_EBND 4 /* ERE/ARE bound */
+#define L_BBND 5 /* BRE bound */
+#define L_BRACK 6 /* brackets */
+#define L_CEL 7 /* collating element */
+#define L_ECL 8 /* equivalence class */
+#define L_CCL 9 /* character class */
+#define INTOCON(c) (v->lexcon = (c))
+#define INCON(con) (v->lexcon == (con))
/* construct pointer past end of chr array */
-#define ENDOF(array) ((array) + sizeof(array)/sizeof(chr))
+#define ENDOF(array) ((array) + sizeof(array)/sizeof(chr))
/*
* lexstart - set up lexical stuff, scan leading options
*/
static void
-lexstart(struct vars *v)
+lexstart(struct vars * v)
{
- prefixes(v); /* may turn on new type bits etc. */
+ prefixes(v); /* may turn on new type bits etc. */
NOERR();
- if (v->cflags&REG_QUOTE) {
- assert(!(v->cflags&(REG_ADVANCED|REG_EXPANDED|REG_NEWLINE)));
+ if (v->cflags & REG_QUOTE)
+ {
+ assert(!(v->cflags & (REG_ADVANCED | REG_EXPANDED | REG_NEWLINE)));
INTOCON(L_Q);
- } else if (v->cflags&REG_EXTENDED) {
- assert(!(v->cflags&REG_QUOTE));
+ }
+ else if (v->cflags & REG_EXTENDED)
+ {
+ assert(!(v->cflags & REG_QUOTE));
INTOCON(L_ERE);
- } else {
- assert(!(v->cflags&(REG_QUOTE|REG_ADVF)));
+ }
+ else
+ {
+ assert(!(v->cflags & (REG_QUOTE | REG_ADVF)));
INTOCON(L_BRE);
}
v->nexttype = EMPTY; /* remember we were at the start */
- next(v); /* set up the first token */
+ next(v); /* set up the first token */
}
/*
* prefixes - implement various special prefixes
*/
static void
-prefixes(struct vars *v)
+prefixes(struct vars * v)
{
/* literal string doesn't get any of this stuff */
- if (v->cflags&REG_QUOTE)
+ if (v->cflags & REG_QUOTE)
return;
- /* initial "***" gets special things */
+ /* initial "***" gets special things */
if (HAVE(4) && NEXT3('*', '*', '*'))
- switch (*(v->now + 3)) {
- case CHR('?'): /* "***?" error, msg shows version */
- ERR(REG_BADPAT);
- return; /* proceed no further */
- break;
- case CHR('='): /* "***=" shifts to literal string */
- NOTE(REG_UNONPOSIX);
- v->cflags |= REG_QUOTE;
- v->cflags &= ~(REG_ADVANCED|REG_EXPANDED|REG_NEWLINE);
- v->now += 4;
- return; /* and there can be no more prefixes */
- break;
- case CHR(':'): /* "***:" shifts to AREs */
- NOTE(REG_UNONPOSIX);
- v->cflags |= REG_ADVANCED;
- v->now += 4;
- break;
- default: /* otherwise *** is just an error */
- ERR(REG_BADRPT);
- return;
- break;
+ switch (*(v->now + 3))
+ {
+ case CHR('?'): /* "***?" error, msg shows version */
+ ERR(REG_BADPAT);
+ return; /* proceed no further */
+ break;
+ case CHR('='): /* "***=" shifts to literal string */
+ NOTE(REG_UNONPOSIX);
+ v->cflags |= REG_QUOTE;
+ v->cflags &= ~(REG_ADVANCED | REG_EXPANDED | REG_NEWLINE);
+ v->now += 4;
+ return; /* and there can be no more prefixes */
+ break;
+ case CHR(':'): /* "***:" shifts to AREs */
+ NOTE(REG_UNONPOSIX);
+ v->cflags |= REG_ADVANCED;
+ v->now += 4;
+ break;
+ default: /* otherwise *** is just an error */
+ ERR(REG_BADRPT);
+ return;
+ break;
}
/* BREs and EREs don't get embedded options */
- if ((v->cflags&REG_ADVANCED) != REG_ADVANCED)
+ if ((v->cflags & REG_ADVANCED) != REG_ADVANCED)
return;
/* embedded options (AREs only) */
- if (HAVE(3) && NEXT2('(', '?') && iscalpha(*(v->now + 2))) {
+ if (HAVE(3) && NEXT2('(', '?') && iscalpha(*(v->now + 2)))
+ {
NOTE(REG_UNONPOSIX);
v->now += 2;
for (; !ATEOS() && iscalpha(*v->now); v->now++)
- switch (*v->now) {
- case CHR('b'): /* BREs (but why???) */
- v->cflags &= ~(REG_ADVANCED|REG_QUOTE);
- break;
- case CHR('c'): /* case sensitive */
- v->cflags &= ~REG_ICASE;
- break;
- case CHR('e'): /* plain EREs */
- v->cflags |= REG_EXTENDED;
- v->cflags &= ~(REG_ADVF|REG_QUOTE);
- break;
- case CHR('i'): /* case insensitive */
- v->cflags |= REG_ICASE;
- break;
- case CHR('m'): /* Perloid synonym for n */
- case CHR('n'): /* \n affects ^ $ . [^ */
- v->cflags |= REG_NEWLINE;
- break;
- case CHR('p'): /* ~Perl, \n affects . [^ */
- v->cflags |= REG_NLSTOP;
- v->cflags &= ~REG_NLANCH;
- break;
- case CHR('q'): /* literal string */
- v->cflags |= REG_QUOTE;
- v->cflags &= ~REG_ADVANCED;
- break;
- case CHR('s'): /* single line, \n ordinary */
- v->cflags &= ~REG_NEWLINE;
- break;
- case CHR('t'): /* tight syntax */
- v->cflags &= ~REG_EXPANDED;
- break;
- case CHR('w'): /* weird, \n affects ^ $ only */
- v->cflags &= ~REG_NLSTOP;
- v->cflags |= REG_NLANCH;
- break;
- case CHR('x'): /* expanded syntax */
- v->cflags |= REG_EXPANDED;
- break;
- default:
- ERR(REG_BADOPT);
- return;
+ switch (*v->now)
+ {
+ case CHR('b'): /* BREs (but why???) */
+ v->cflags &= ~(REG_ADVANCED | REG_QUOTE);
+ break;
+ case CHR('c'): /* case sensitive */
+ v->cflags &= ~REG_ICASE;
+ break;
+ case CHR('e'): /* plain EREs */
+ v->cflags |= REG_EXTENDED;
+ v->cflags &= ~(REG_ADVF | REG_QUOTE);
+ break;
+ case CHR('i'): /* case insensitive */
+ v->cflags |= REG_ICASE;
+ break;
+ case CHR('m'): /* Perloid synonym for n */
+ case CHR('n'): /* \n affects ^ $ . [^ */
+ v->cflags |= REG_NEWLINE;
+ break;
+ case CHR('p'): /* ~Perl, \n affects . [^ */
+ v->cflags |= REG_NLSTOP;
+ v->cflags &= ~REG_NLANCH;
+ break;
+ case CHR('q'): /* literal string */
+ v->cflags |= REG_QUOTE;
+ v->cflags &= ~REG_ADVANCED;
+ break;
+ case CHR('s'): /* single line, \n ordinary */
+ v->cflags &= ~REG_NEWLINE;
+ break;
+ case CHR('t'): /* tight syntax */
+ v->cflags &= ~REG_EXPANDED;
+ break;
+ case CHR('w'): /* weird, \n affects ^ $ only */
+ v->cflags &= ~REG_NLSTOP;
+ v->cflags |= REG_NLANCH;
+ break;
+ case CHR('x'): /* expanded syntax */
+ v->cflags |= REG_EXPANDED;
+ break;
+ default:
+ ERR(REG_BADOPT);
+ return;
}
- if (!NEXT1(')')) {
+ if (!NEXT1(')'))
+ {
ERR(REG_BADOPT);
return;
}
v->now++;
- if (v->cflags&REG_QUOTE)
- v->cflags &= ~(REG_EXPANDED|REG_NEWLINE);
+ if (v->cflags & REG_QUOTE)
+ v->cflags &= ~(REG_EXPANDED | REG_NEWLINE);
}
}
@@ -191,11 +200,11 @@ prefixes(struct vars *v)
* implicit assumptions about what sorts of strings can be subroutines.
*/
static void
-lexnest(struct vars *v,
- chr *beginp, /* start of interpolation */
- chr *endp) /* one past end of interpolation */
+lexnest(struct vars * v,
+ chr * beginp, /* start of interpolation */
+ chr * endp) /* one past end of interpolation */
{
- assert(v->savenow == NULL); /* only one level of nesting */
+ assert(v->savenow == NULL); /* only one level of nesting */
v->savenow = v->now;
v->savestop = v->stop;
v->now = beginp;
@@ -205,47 +214,47 @@ lexnest(struct vars *v,
/*
* string constants to interpolate as expansions of things like \d
*/
-static chr backd[] = { /* \d */
+static chr backd[] = { /* \d */
CHR('['), CHR('['), CHR(':'),
CHR('d'), CHR('i'), CHR('g'), CHR('i'), CHR('t'),
CHR(':'), CHR(']'), CHR(']')
};
-static chr backD[] = { /* \D */
+static chr backD[] = { /* \D */
CHR('['), CHR('^'), CHR('['), CHR(':'),
CHR('d'), CHR('i'), CHR('g'), CHR('i'), CHR('t'),
CHR(':'), CHR(']'), CHR(']')
};
-static chr brbackd[] = { /* \d within brackets */
+static chr brbackd[] = { /* \d within brackets */
CHR('['), CHR(':'),
CHR('d'), CHR('i'), CHR('g'), CHR('i'), CHR('t'),
CHR(':'), CHR(']')
};
-static chr backs[] = { /* \s */
+static chr backs[] = { /* \s */
CHR('['), CHR('['), CHR(':'),
CHR('s'), CHR('p'), CHR('a'), CHR('c'), CHR('e'),
CHR(':'), CHR(']'), CHR(']')
};
-static chr backS[] = { /* \S */
+static chr backS[] = { /* \S */
CHR('['), CHR('^'), CHR('['), CHR(':'),
CHR('s'), CHR('p'), CHR('a'), CHR('c'), CHR('e'),
CHR(':'), CHR(']'), CHR(']')
};
-static chr brbacks[] = { /* \s within brackets */
+static chr brbacks[] = { /* \s within brackets */
CHR('['), CHR(':'),
CHR('s'), CHR('p'), CHR('a'), CHR('c'), CHR('e'),
CHR(':'), CHR(']')
};
-static chr backw[] = { /* \w */
+static chr backw[] = { /* \w */
CHR('['), CHR('['), CHR(':'),
CHR('a'), CHR('l'), CHR('n'), CHR('u'), CHR('m'),
CHR(':'), CHR(']'), CHR('_'), CHR(']')
};
-static chr backW[] = { /* \W */
+static chr backW[] = { /* \W */
CHR('['), CHR('^'), CHR('['), CHR(':'),
CHR('a'), CHR('l'), CHR('n'), CHR('u'), CHR('m'),
CHR(':'), CHR(']'), CHR('_'), CHR(']')
};
-static chr brbackw[] = { /* \w within brackets */
+static chr brbackw[] = { /* \w within brackets */
CHR('['), CHR(':'),
CHR('a'), CHR('l'), CHR('n'), CHR('u'), CHR('m'),
CHR(':'), CHR(']'), CHR('_')
@@ -256,7 +265,7 @@ static chr brbackw[] = { /* \w within brackets */
* Possibly ought to inquire whether there is a "word" character class.
*/
static void
-lexword(struct vars *v)
+lexword(struct vars * v)
{
lexnest(v, backw, ENDOF(backw));
}
@@ -264,60 +273,65 @@ lexword(struct vars *v)
/*
* next - get next token
*/
-static int /* 1 normal, 0 failure */
-next(struct vars *v)
+static int /* 1 normal, 0 failure */
+next(struct vars * v)
{
- chr c;
+ chr c;
/* errors yield an infinite sequence of failures */
if (ISERR())
- return 0; /* the error has set nexttype to EOS */
+ return 0; /* the error has set nexttype to EOS */
/* remember flavor of last token */
v->lasttype = v->nexttype;
/* REG_BOSONLY */
- if (v->nexttype == EMPTY && (v->cflags&REG_BOSONLY)) {
+ if (v->nexttype == EMPTY && (v->cflags & REG_BOSONLY))
+ {
/* at start of a REG_BOSONLY RE */
RETV(SBEGIN, 0); /* same as \A */
}
/* if we're nested and we've hit end, return to outer level */
- if (v->savenow != NULL && ATEOS()) {
+ if (v->savenow != NULL && ATEOS())
+ {
v->now = v->savenow;
v->stop = v->savestop;
v->savenow = v->savestop = NULL;
}
/* skip white space etc. if appropriate (not in literal or []) */
- if (v->cflags&REG_EXPANDED)
- switch (v->lexcon) {
- case L_ERE:
- case L_BRE:
- case L_EBND:
- case L_BBND:
- skip(v);
- break;
+ if (v->cflags & REG_EXPANDED)
+ switch (v->lexcon)
+ {
+ case L_ERE:
+ case L_BRE:
+ case L_EBND:
+ case L_BBND:
+ skip(v);
+ break;
}
/* handle EOS, depending on context */
- if (ATEOS()) {
- switch (v->lexcon) {
- case L_ERE:
- case L_BRE:
- case L_Q:
- RET(EOS);
- break;
- case L_EBND:
- case L_BBND:
- FAILW(REG_EBRACE);
- break;
- case L_BRACK:
- case L_CEL:
- case L_ECL:
- case L_CCL:
- FAILW(REG_EBRACK);
- break;
+ if (ATEOS())
+ {
+ switch (v->lexcon)
+ {
+ case L_ERE:
+ case L_BRE:
+ case L_Q:
+ RET(EOS);
+ break;
+ case L_EBND:
+ case L_BBND:
+ FAILW(REG_EBRACE);
+ break;
+ case L_BRACK:
+ case L_CEL:
+ case L_ECL:
+ case L_CCL:
+ FAILW(REG_EBRACK);
+ break;
}
assert(NOTREACHED);
}
@@ -326,314 +340,365 @@ next(struct vars *v)
c = *v->now++;
/* deal with the easy contexts, punt EREs to code below */
- switch (v->lexcon) {
- case L_BRE: /* punt BREs to separate function */
- return brenext(v, c);
- break;
- case L_ERE: /* see below */
- break;
- case L_Q: /* literal strings are easy */
- RETV(PLAIN, c);
- break;
- case L_BBND: /* bounds are fairly simple */
- case L_EBND:
- switch (c) {
- case CHR('0'): case CHR('1'): case CHR('2'): case CHR('3'):
- case CHR('4'): case CHR('5'): case CHR('6'): case CHR('7'):
- case CHR('8'): case CHR('9'):
- RETV(DIGIT, (chr)DIGITVAL(c));
+ switch (v->lexcon)
+ {
+ case L_BRE: /* punt BREs to separate function */
+ return brenext(v, c);
break;
- case CHR(','):
- RET(',');
+ case L_ERE: /* see below */
break;
- case CHR('}'): /* ERE bound ends with } */
- if (INCON(L_EBND)) {
- INTOCON(L_ERE);
- if ((v->cflags&REG_ADVF) && NEXT1('?')) {
- v->now++;
- NOTE(REG_UNONPOSIX);
- RETV('}', 0);
- }
- RETV('}', 1);
- } else
- FAILW(REG_BADBR);
- break;
- case CHR('\\'): /* BRE bound ends with \} */
- if (INCON(L_BBND) && NEXT1('}')) {
- v->now++;
- INTOCON(L_BRE);
- RET('}');
- } else
- FAILW(REG_BADBR);
- break;
- default:
- FAILW(REG_BADBR);
+ case L_Q: /* literal strings are easy */
+ RETV(PLAIN, c);
break;
- }
- assert(NOTREACHED);
- break;
- case L_BRACK: /* brackets are not too hard */
- switch (c) {
- case CHR(']'):
- if (LASTTYPE('['))
- RETV(PLAIN, c);
- else {
- INTOCON((v->cflags&REG_EXTENDED) ?
- L_ERE : L_BRE);
- RET(']');
+ case L_BBND: /* bounds are fairly simple */
+ case L_EBND:
+ switch (c)
+ {
+ case CHR('0'):
+ case CHR('1'):
+ case CHR('2'):
+ case CHR('3'):
+ case CHR('4'):
+ case CHR('5'):
+ case CHR('6'):
+ case CHR('7'):
+ case CHR('8'):
+ case CHR('9'):
+ RETV(DIGIT, (chr) DIGITVAL(c));
+ break;
+ case CHR(','):
+ RET(',');
+ break;
+ case CHR('}'): /* ERE bound ends with } */
+ if (INCON(L_EBND))
+ {
+ INTOCON(L_ERE);
+ if ((v->cflags & REG_ADVF) && NEXT1('?'))
+ {
+ v->now++;
+ NOTE(REG_UNONPOSIX);
+ RETV('}', 0);
+ }
+ RETV('}', 1);
+ }
+ else
+ FAILW(REG_BADBR);
+ break;
+ case CHR('\\'): /* BRE bound ends with \} */
+ if (INCON(L_BBND) && NEXT1('}'))
+ {
+ v->now++;
+ INTOCON(L_BRE);
+ RET('}');
+ }
+ else
+ FAILW(REG_BADBR);
+ break;
+ default:
+ FAILW(REG_BADBR);
+ break;
}
+ assert(NOTREACHED);
break;
- case CHR('\\'):
- NOTE(REG_UBBS);
- if (!(v->cflags&REG_ADVF))
- RETV(PLAIN, c);
- NOTE(REG_UNONPOSIX);
- if (ATEOS())
- FAILW(REG_EESCAPE);
- (DISCARD)lexescape(v);
- switch (v->nexttype) { /* not all escapes okay here */
- case PLAIN:
- return 1;
- break;
- case CCLASS:
- switch (v->nextvalue) {
- case 'd':
- lexnest(v, brbackd, ENDOF(brbackd));
+ case L_BRACK: /* brackets are not too hard */
+ switch (c)
+ {
+ case CHR(']'):
+ if (LASTTYPE('['))
+ RETV(PLAIN, c);
+ else
+ {
+ INTOCON((v->cflags & REG_EXTENDED) ?
+ L_ERE : L_BRE);
+ RET(']');
+ }
+ break;
+ case CHR('\\'):
+ NOTE(REG_UBBS);
+ if (!(v->cflags & REG_ADVF))
+ RETV(PLAIN, c);
+ NOTE(REG_UNONPOSIX);
+ if (ATEOS())
+ FAILW(REG_EESCAPE);
+ (DISCARD) lexescape(v);
+ switch (v->nexttype)
+ { /* not all escapes okay here */
+ case PLAIN:
+ return 1;
+ break;
+ case CCLASS:
+ switch (v->nextvalue)
+ {
+ case 'd':
+ lexnest(v, brbackd, ENDOF(brbackd));
+ break;
+ case 's':
+ lexnest(v, brbacks, ENDOF(brbacks));
+ break;
+ case 'w':
+ lexnest(v, brbackw, ENDOF(brbackw));
+ break;
+ default:
+ FAILW(REG_EESCAPE);
+ break;
+ }
+ /* lexnest done, back up and try again */
+ v->nexttype = v->lasttype;
+ return next(v);
+ break;
+ }
+ /* not one of the acceptable escapes */
+ FAILW(REG_EESCAPE);
break;
- case 's':
- lexnest(v, brbacks, ENDOF(brbacks));
+ case CHR('-'):
+ if (LASTTYPE('[') || NEXT1(']'))
+ RETV(PLAIN, c);
+ else
+ RETV(RANGE, c);
break;
- case 'w':
- lexnest(v, brbackw, ENDOF(brbackw));
+ case CHR('['):
+ if (ATEOS())
+ FAILW(REG_EBRACK);
+ switch (*v->now++)
+ {
+ case CHR('.'):
+ INTOCON(L_CEL);
+ /* might or might not be locale-specific */
+ RET(COLLEL);
+ break;
+ case CHR('='):
+ INTOCON(L_ECL);
+ NOTE(REG_ULOCALE);
+ RET(ECLASS);
+ break;
+ case CHR(':'):
+ INTOCON(L_CCL);
+ NOTE(REG_ULOCALE);
+ RET(CCLASS);
+ break;
+ default: /* oops */
+ v->now--;
+ RETV(PLAIN, c);
+ break;
+ }
+ assert(NOTREACHED);
break;
default:
- FAILW(REG_EESCAPE);
+ RETV(PLAIN, c);
break;
- }
- /* lexnest done, back up and try again */
- v->nexttype = v->lasttype;
- return next(v);
- break;
}
- /* not one of the acceptable escapes */
- FAILW(REG_EESCAPE);
+ assert(NOTREACHED);
break;
- case CHR('-'):
- if (LASTTYPE('[') || NEXT1(']'))
- RETV(PLAIN, c);
+ case L_CEL: /* collating elements are easy */
+ if (c == CHR('.') && NEXT1(']'))
+ {
+ v->now++;
+ INTOCON(L_BRACK);
+ RETV(END, '.');
+ }
else
- RETV(RANGE, c);
+ RETV(PLAIN, c);
break;
- case CHR('['):
- if (ATEOS())
- FAILW(REG_EBRACK);
- switch (*v->now++) {
- case CHR('.'):
- INTOCON(L_CEL);
- /* might or might not be locale-specific */
- RET(COLLEL);
- break;
- case CHR('='):
- INTOCON(L_ECL);
- NOTE(REG_ULOCALE);
- RET(ECLASS);
- break;
- case CHR(':'):
- INTOCON(L_CCL);
- NOTE(REG_ULOCALE);
- RET(CCLASS);
- break;
- default: /* oops */
- v->now--;
+ case L_ECL: /* ditto equivalence classes */
+ if (c == CHR('=') && NEXT1(']'))
+ {
+ v->now++;
+ INTOCON(L_BRACK);
+ RETV(END, '=');
+ }
+ else
RETV(PLAIN, c);
- break;
+ break;
+ case L_CCL: /* ditto character classes */
+ if (c == CHR(':') && NEXT1(']'))
+ {
+ v->now++;
+ INTOCON(L_BRACK);
+ RETV(END, ':');
}
- assert(NOTREACHED);
+ else
+ RETV(PLAIN, c);
break;
default:
- RETV(PLAIN, c);
+ assert(NOTREACHED);
break;
- }
- assert(NOTREACHED);
- break;
- case L_CEL: /* collating elements are easy */
- if (c == CHR('.') && NEXT1(']')) {
- v->now++;
- INTOCON(L_BRACK);
- RETV(END, '.');
- } else
- RETV(PLAIN, c);
- break;
- case L_ECL: /* ditto equivalence classes */
- if (c == CHR('=') && NEXT1(']')) {
- v->now++;
- INTOCON(L_BRACK);
- RETV(END, '=');
- } else
- RETV(PLAIN, c);
- break;
- case L_CCL: /* ditto character classes */
- if (c == CHR(':') && NEXT1(']')) {
- v->now++;
- INTOCON(L_BRACK);
- RETV(END, ':');
- } else
- RETV(PLAIN, c);
- break;
- default:
- assert(NOTREACHED);
- break;
}
/* that got rid of everything except EREs and AREs */
assert(INCON(L_ERE));
/* deal with EREs and AREs, except for backslashes */
- switch (c) {
- case CHR('|'):
- RET('|');
- break;
- case CHR('*'):
- if ((v->cflags&REG_ADVF) && NEXT1('?')) {
- v->now++;
- NOTE(REG_UNONPOSIX);
- RETV('*', 0);
- }
- RETV('*', 1);
- break;
- case CHR('+'):
- if ((v->cflags&REG_ADVF) && NEXT1('?')) {
- v->now++;
- NOTE(REG_UNONPOSIX);
- RETV('+', 0);
- }
- RETV('+', 1);
- break;
- case CHR('?'):
- if ((v->cflags&REG_ADVF) && NEXT1('?')) {
- v->now++;
- NOTE(REG_UNONPOSIX);
- RETV('?', 0);
- }
- RETV('?', 1);
- break;
- case CHR('{'): /* bounds start or plain character */
- if (v->cflags&REG_EXPANDED)
- skip(v);
- if (ATEOS() || !iscdigit(*v->now)) {
- NOTE(REG_UBRACES);
- NOTE(REG_UUNSPEC);
- RETV(PLAIN, c);
- } else {
- NOTE(REG_UBOUNDS);
- INTOCON(L_EBND);
- RET('{');
- }
- assert(NOTREACHED);
- break;
- case CHR('('): /* parenthesis, or advanced extension */
- if ((v->cflags&REG_ADVF) && NEXT1('?')) {
- NOTE(REG_UNONPOSIX);
- v->now++;
- switch (*v->now++) {
- case CHR(':'): /* non-capturing paren */
- RETV('(', 0);
- break;
- case CHR('#'): /* comment */
- while (!ATEOS() && *v->now != CHR(')'))
- v->now++;
- if (!ATEOS())
- v->now++;
- assert(v->nexttype == v->lasttype);
- return next(v);
- break;
- case CHR('='): /* positive lookahead */
- NOTE(REG_ULOOKAHEAD);
- RETV(LACON, 1);
- break;
- case CHR('!'): /* negative lookahead */
- NOTE(REG_ULOOKAHEAD);
- RETV(LACON, 0);
- break;
- default:
- FAILW(REG_BADRPT);
- break;
+ switch (c)
+ {
+ case CHR('|'):
+ RET('|');
+ break;
+ case CHR('*'):
+ if ((v->cflags & REG_ADVF) && NEXT1('?'))
+ {
+ v->now++;
+ NOTE(REG_UNONPOSIX);
+ RETV('*', 0);
+ }
+ RETV('*', 1);
+ break;
+ case CHR('+'):
+ if ((v->cflags & REG_ADVF) && NEXT1('?'))
+ {
+ v->now++;
+ NOTE(REG_UNONPOSIX);
+ RETV('+', 0);
+ }
+ RETV('+', 1);
+ break;
+ case CHR('?'):
+ if ((v->cflags & REG_ADVF) && NEXT1('?'))
+ {
+ v->now++;
+ NOTE(REG_UNONPOSIX);
+ RETV('?', 0);
+ }
+ RETV('?', 1);
+ break;
+ case CHR('{'): /* bounds start or plain character */
+ if (v->cflags & REG_EXPANDED)
+ skip(v);
+ if (ATEOS() || !iscdigit(*v->now))
+ {
+ NOTE(REG_UBRACES);
+ NOTE(REG_UUNSPEC);
+ RETV(PLAIN, c);
+ }
+ else
+ {
+ NOTE(REG_UBOUNDS);
+ INTOCON(L_EBND);
+ RET('{');
}
assert(NOTREACHED);
- }
- if (v->cflags&REG_NOSUB)
- RETV('(', 0); /* all parens non-capturing */
- else
- RETV('(', 1);
- break;
- case CHR(')'):
- if (LASTTYPE('(')) {
- NOTE(REG_UUNSPEC);
- }
- RETV(')', c);
- break;
- case CHR('['): /* easy except for [[:<:]] and [[:>:]] */
- if (HAVE(6) && *(v->now+0) == CHR('[') &&
- *(v->now+1) == CHR(':') &&
- (*(v->now+2) == CHR('<') ||
- *(v->now+2) == CHR('>')) &&
- *(v->now+3) == CHR(':') &&
- *(v->now+4) == CHR(']') &&
- *(v->now+5) == CHR(']')) {
- c = *(v->now+2);
- v->now += 6;
- NOTE(REG_UNONPOSIX);
- RET((c == CHR('<')) ? '<' : '>');
- }
- INTOCON(L_BRACK);
- if (NEXT1('^')) {
- v->now++;
- RETV('[', 0);
- }
- RETV('[', 1);
- break;
- case CHR('.'):
- RET('.');
- break;
- case CHR('^'):
- RET('^');
- break;
- case CHR('$'):
- RET('$');
- break;
- case CHR('\\'): /* mostly punt backslashes to code below */
- if (ATEOS())
- FAILW(REG_EESCAPE);
- break;
- default: /* ordinary character */
- RETV(PLAIN, c);
- break;
+ break;
+ case CHR('('): /* parenthesis, or advanced extension */
+ if ((v->cflags & REG_ADVF) && NEXT1('?'))
+ {
+ NOTE(REG_UNONPOSIX);
+ v->now++;
+ switch (*v->now++)
+ {
+ case CHR(':'): /* non-capturing paren */
+ RETV('(', 0);
+ break;
+ case CHR('#'): /* comment */
+ while (!ATEOS() && *v->now != CHR(')'))
+ v->now++;
+ if (!ATEOS())
+ v->now++;
+ assert(v->nexttype == v->lasttype);
+ return next(v);
+ break;
+ case CHR('='): /* positive lookahead */
+ NOTE(REG_ULOOKAHEAD);
+ RETV(LACON, 1);
+ break;
+ case CHR('!'): /* negative lookahead */
+ NOTE(REG_ULOOKAHEAD);
+ RETV(LACON, 0);
+ break;
+ default:
+ FAILW(REG_BADRPT);
+ break;
+ }
+ assert(NOTREACHED);
+ }
+ if (v->cflags & REG_NOSUB)
+ RETV('(', 0); /* all parens non-capturing */
+ else
+ RETV('(', 1);
+ break;
+ case CHR(')'):
+ if (LASTTYPE('('))
+ NOTE(REG_UUNSPEC);
+ RETV(')', c);
+ break;
+ case CHR('['): /* easy except for [[:<:]] and [[:>:]] */
+ if (HAVE(6) && *(v->now + 0) == CHR('[') &&
+ *(v->now + 1) == CHR(':') &&
+ (*(v->now + 2) == CHR('<') ||
+ *(v->now + 2) == CHR('>')) &&
+ *(v->now + 3) == CHR(':') &&
+ *(v->now + 4) == CHR(']') &&
+ *(v->now + 5) == CHR(']'))
+ {
+ c = *(v->now + 2);
+ v->now += 6;
+ NOTE(REG_UNONPOSIX);
+ RET((c == CHR('<')) ? '<' : '>');
+ }
+ INTOCON(L_BRACK);
+ if (NEXT1('^'))
+ {
+ v->now++;
+ RETV('[', 0);
+ }
+ RETV('[', 1);
+ break;
+ case CHR('.'):
+ RET('.');
+ break;
+ case CHR('^'):
+ RET('^');
+ break;
+ case CHR('$'):
+ RET('$');
+ break;
+ case CHR('\\'): /* mostly punt backslashes to code below */
+ if (ATEOS())
+ FAILW(REG_EESCAPE);
+ break;
+ default: /* ordinary character */
+ RETV(PLAIN, c);
+ break;
}
/* ERE/ARE backslash handling; backslash already eaten */
assert(!ATEOS());
- if (!(v->cflags&REG_ADVF)) { /* only AREs have non-trivial escapes */
- if (iscalnum(*v->now)) {
+ if (!(v->cflags & REG_ADVF))
+ { /* only AREs have non-trivial escapes */
+ if (iscalnum(*v->now))
+ {
NOTE(REG_UBSALNUM);
NOTE(REG_UUNSPEC);
}
RETV(PLAIN, *v->now++);
}
- (DISCARD)lexescape(v);
+ (DISCARD) lexescape(v);
if (ISERR())
FAILW(REG_EESCAPE);
- if (v->nexttype == CCLASS) { /* fudge at lexical level */
- switch (v->nextvalue) {
- case 'd': lexnest(v, backd, ENDOF(backd)); break;
- case 'D': lexnest(v, backD, ENDOF(backD)); break;
- case 's': lexnest(v, backs, ENDOF(backs)); break;
- case 'S': lexnest(v, backS, ENDOF(backS)); break;
- case 'w': lexnest(v, backw, ENDOF(backw)); break;
- case 'W': lexnest(v, backW, ENDOF(backW)); break;
- default:
- assert(NOTREACHED);
- FAILW(REG_ASSERT);
- break;
+ if (v->nexttype == CCLASS)
+ { /* fudge at lexical level */
+ switch (v->nextvalue)
+ {
+ case 'd':
+ lexnest(v, backd, ENDOF(backd));
+ break;
+ case 'D':
+ lexnest(v, backD, ENDOF(backD));
+ break;
+ case 's':
+ lexnest(v, backs, ENDOF(backs));
+ break;
+ case 'S':
+ lexnest(v, backS, ENDOF(backS));
+ break;
+ case 'w':
+ lexnest(v, backw, ENDOF(backw));
+ break;
+ case 'W':
+ lexnest(v, backW, ENDOF(backW));
+ break;
+ default:
+ assert(NOTREACHED);
+ FAILW(REG_ASSERT);
+ break;
}
/* lexnest done, back up and try again */
v->nexttype = v->lasttype;
@@ -647,19 +712,20 @@ next(struct vars *v)
* lexescape - parse an ARE backslash escape (backslash already eaten)
* Note slightly nonstandard use of the CCLASS type code.
*/
-static int /* not actually used, but convenient for RETV */
-lexescape(struct vars *v)
+static int /* not actually used, but convenient for
+ * RETV */
+lexescape(struct vars * v)
{
- chr c;
- static chr alert[] = {
+ chr c;
+ static chr alert[] = {
CHR('a'), CHR('l'), CHR('e'), CHR('r'), CHR('t')
};
- static chr esc[] = {
+ static chr esc[] = {
CHR('E'), CHR('S'), CHR('C')
};
- chr *save;
+ chr *save;
- assert(v->cflags&REG_ADVF);
+ assert(v->cflags & REG_ADVF);
assert(!ATEOS());
c = *v->now++;
@@ -667,132 +733,142 @@ lexescape(struct vars *v)
RETV(PLAIN, c);
NOTE(REG_UNONPOSIX);
- switch (c) {
- case CHR('a'):
- RETV(PLAIN, chrnamed(v, alert, ENDOF(alert), CHR('\007')));
- break;
- case CHR('A'):
- RETV(SBEGIN, 0);
- break;
- case CHR('b'):
- RETV(PLAIN, CHR('\b'));
- break;
- case CHR('B'):
- RETV(PLAIN, CHR('\\'));
- break;
- case CHR('c'):
- NOTE(REG_UUNPORT);
- if (ATEOS())
- FAILW(REG_EESCAPE);
- RETV(PLAIN, (chr)(*v->now++ & 037));
- break;
- case CHR('d'):
- NOTE(REG_ULOCALE);
- RETV(CCLASS, 'd');
- break;
- case CHR('D'):
- NOTE(REG_ULOCALE);
- RETV(CCLASS, 'D');
- break;
- case CHR('e'):
- NOTE(REG_UUNPORT);
- RETV(PLAIN, chrnamed(v, esc, ENDOF(esc), CHR('\033')));
- break;
- case CHR('f'):
- RETV(PLAIN, CHR('\f'));
- break;
- case CHR('m'):
- RET('<');
- break;
- case CHR('M'):
- RET('>');
- break;
- case CHR('n'):
- RETV(PLAIN, CHR('\n'));
- break;
- case CHR('r'):
- RETV(PLAIN, CHR('\r'));
- break;
- case CHR('s'):
- NOTE(REG_ULOCALE);
- RETV(CCLASS, 's');
- break;
- case CHR('S'):
- NOTE(REG_ULOCALE);
- RETV(CCLASS, 'S');
- break;
- case CHR('t'):
- RETV(PLAIN, CHR('\t'));
- break;
- case CHR('u'):
- c = lexdigits(v, 16, 4, 4);
- if (ISERR())
- FAILW(REG_EESCAPE);
- RETV(PLAIN, c);
- break;
- case CHR('U'):
- c = lexdigits(v, 16, 8, 8);
- if (ISERR())
- FAILW(REG_EESCAPE);
- RETV(PLAIN, c);
- break;
- case CHR('v'):
- RETV(PLAIN, CHR('\v'));
- break;
- case CHR('w'):
- NOTE(REG_ULOCALE);
- RETV(CCLASS, 'w');
- break;
- case CHR('W'):
- NOTE(REG_ULOCALE);
- RETV(CCLASS, 'W');
- break;
- case CHR('x'):
- NOTE(REG_UUNPORT);
- c = lexdigits(v, 16, 1, 255); /* REs >255 long outside spec */
- if (ISERR())
- FAILW(REG_EESCAPE);
- RETV(PLAIN, c);
- break;
- case CHR('y'):
- NOTE(REG_ULOCALE);
- RETV(WBDRY, 0);
- break;
- case CHR('Y'):
- NOTE(REG_ULOCALE);
- RETV(NWBDRY, 0);
- break;
- case CHR('Z'):
- RETV(SEND, 0);
- break;
- case CHR('1'): case CHR('2'): case CHR('3'): case CHR('4'):
- case CHR('5'): case CHR('6'): case CHR('7'): case CHR('8'):
- case CHR('9'):
- save = v->now;
- v->now--; /* put first digit back */
- c = lexdigits(v, 10, 1, 255); /* REs >255 long outside spec */
- if (ISERR())
- FAILW(REG_EESCAPE);
- /* ugly heuristic (first test is "exactly 1 digit?") */
- if (v->now - save == 0 || (int)c <= v->nsubexp) {
- NOTE(REG_UBACKREF);
- RETV(BACKREF, (chr)c);
- }
- /* oops, doesn't look like it's a backref after all... */
- v->now = save;
- /* and fall through into octal number */
- case CHR('0'):
- NOTE(REG_UUNPORT);
- v->now--; /* put first digit back */
- c = lexdigits(v, 8, 1, 3);
- if (ISERR())
- FAILW(REG_EESCAPE);
- RETV(PLAIN, c);
- break;
- default:
- assert(iscalpha(c));
- FAILW(REG_EESCAPE); /* unknown alphabetic escape */
- break;
+ switch (c)
+ {
+ case CHR('a'):
+ RETV(PLAIN, chrnamed(v, alert, ENDOF(alert), CHR('\007')));
+ break;
+ case CHR('A'):
+ RETV(SBEGIN, 0);
+ break;
+ case CHR('b'):
+ RETV(PLAIN, CHR('\b'));
+ break;
+ case CHR('B'):
+ RETV(PLAIN, CHR('\\'));
+ break;
+ case CHR('c'):
+ NOTE(REG_UUNPORT);
+ if (ATEOS())
+ FAILW(REG_EESCAPE);
+ RETV(PLAIN, (chr) (*v->now++ & 037));
+ break;
+ case CHR('d'):
+ NOTE(REG_ULOCALE);
+ RETV(CCLASS, 'd');
+ break;
+ case CHR('D'):
+ NOTE(REG_ULOCALE);
+ RETV(CCLASS, 'D');
+ break;
+ case CHR('e'):
+ NOTE(REG_UUNPORT);
+ RETV(PLAIN, chrnamed(v, esc, ENDOF(esc), CHR('\033')));
+ break;
+ case CHR('f'):
+ RETV(PLAIN, CHR('\f'));
+ break;
+ case CHR('m'):
+ RET('<');
+ break;
+ case CHR('M'):
+ RET('>');
+ break;
+ case CHR('n'):
+ RETV(PLAIN, CHR('\n'));
+ break;
+ case CHR('r'):
+ RETV(PLAIN, CHR('\r'));
+ break;
+ case CHR('s'):
+ NOTE(REG_ULOCALE);
+ RETV(CCLASS, 's');
+ break;
+ case CHR('S'):
+ NOTE(REG_ULOCALE);
+ RETV(CCLASS, 'S');
+ break;
+ case CHR('t'):
+ RETV(PLAIN, CHR('\t'));
+ break;
+ case CHR('u'):
+ c = lexdigits(v, 16, 4, 4);
+ if (ISERR())
+ FAILW(REG_EESCAPE);
+ RETV(PLAIN, c);
+ break;
+ case CHR('U'):
+ c = lexdigits(v, 16, 8, 8);
+ if (ISERR())
+ FAILW(REG_EESCAPE);
+ RETV(PLAIN, c);
+ break;
+ case CHR('v'):
+ RETV(PLAIN, CHR('\v'));
+ break;
+ case CHR('w'):
+ NOTE(REG_ULOCALE);
+ RETV(CCLASS, 'w');
+ break;
+ case CHR('W'):
+ NOTE(REG_ULOCALE);
+ RETV(CCLASS, 'W');
+ break;
+ case CHR('x'):
+ NOTE(REG_UUNPORT);
+ c = lexdigits(v, 16, 1, 255); /* REs >255 long outside
+ * spec */
+ if (ISERR())
+ FAILW(REG_EESCAPE);
+ RETV(PLAIN, c);
+ break;
+ case CHR('y'):
+ NOTE(REG_ULOCALE);
+ RETV(WBDRY, 0);
+ break;
+ case CHR('Y'):
+ NOTE(REG_ULOCALE);
+ RETV(NWBDRY, 0);
+ break;
+ case CHR('Z'):
+ RETV(SEND, 0);
+ break;
+ case CHR('1'):
+ case CHR('2'):
+ case CHR('3'):
+ case CHR('4'):
+ case CHR('5'):
+ case CHR('6'):
+ case CHR('7'):
+ case CHR('8'):
+ case CHR('9'):
+ save = v->now;
+ v->now--; /* put first digit back */
+ c = lexdigits(v, 10, 1, 255); /* REs >255 long outside
+ * spec */
+ if (ISERR())
+ FAILW(REG_EESCAPE);
+ /* ugly heuristic (first test is "exactly 1 digit?") */
+ if (v->now - save == 0 || (int) c <= v->nsubexp)
+ {
+ NOTE(REG_UBACKREF);
+ RETV(BACKREF, (chr) c);
+ }
+ /* oops, doesn't look like it's a backref after all... */
+ v->now = save;
+ /* and fall through into octal number */
+ case CHR('0'):
+ NOTE(REG_UUNPORT);
+ v->now--; /* put first digit back */
+ c = lexdigits(v, 8, 1, 3);
+ if (ISERR())
+ FAILW(REG_EESCAPE);
+ RETV(PLAIN, c);
+ break;
+ default:
+ assert(iscalpha(c));
+ FAILW(REG_EESCAPE); /* unknown alphabetic escape */
+ break;
}
assert(NOTREACHED);
}
@@ -800,51 +876,79 @@ lexescape(struct vars *v)
/*
* lexdigits - slurp up digits and return chr value
*/
-static chr /* chr value; errors signalled via ERR */
-lexdigits(struct vars *v,
+static chr /* chr value; errors signalled via ERR */
+lexdigits(struct vars * v,
int base,
int minlen,
int maxlen)
{
- uchr n; /* unsigned to avoid overflow misbehavior */
- int len;
- chr c;
- int d;
- const uchr ub = (uchr) base;
+ uchr n; /* unsigned to avoid overflow misbehavior */
+ int len;
+ chr c;
+ int d;
+ const uchr ub = (uchr) base;
n = 0;
- for (len = 0; len < maxlen && !ATEOS(); len++) {
+ for (len = 0; len < maxlen && !ATEOS(); len++)
+ {
c = *v->now++;
- switch (c) {
- case CHR('0'): case CHR('1'): case CHR('2'): case CHR('3'):
- case CHR('4'): case CHR('5'): case CHR('6'): case CHR('7'):
- case CHR('8'): case CHR('9'):
- d = DIGITVAL(c);
- break;
- case CHR('a'): case CHR('A'): d = 10; break;
- case CHR('b'): case CHR('B'): d = 11; break;
- case CHR('c'): case CHR('C'): d = 12; break;
- case CHR('d'): case CHR('D'): d = 13; break;
- case CHR('e'): case CHR('E'): d = 14; break;
- case CHR('f'): case CHR('F'): d = 15; break;
- default:
- v->now--; /* oops, not a digit at all */
- d = -1;
- break;
+ switch (c)
+ {
+ case CHR('0'):
+ case CHR('1'):
+ case CHR('2'):
+ case CHR('3'):
+ case CHR('4'):
+ case CHR('5'):
+ case CHR('6'):
+ case CHR('7'):
+ case CHR('8'):
+ case CHR('9'):
+ d = DIGITVAL(c);
+ break;
+ case CHR('a'):
+ case CHR('A'):
+ d = 10;
+ break;
+ case CHR('b'):
+ case CHR('B'):
+ d = 11;
+ break;
+ case CHR('c'):
+ case CHR('C'):
+ d = 12;
+ break;
+ case CHR('d'):
+ case CHR('D'):
+ d = 13;
+ break;
+ case CHR('e'):
+ case CHR('E'):
+ d = 14;
+ break;
+ case CHR('f'):
+ case CHR('F'):
+ d = 15;
+ break;
+ default:
+ v->now--; /* oops, not a digit at all */
+ d = -1;
+ break;
}
- if (d >= base) { /* not a plausible digit */
+ if (d >= base)
+ { /* not a plausible digit */
v->now--;
d = -1;
}
if (d < 0)
- break; /* NOTE BREAK OUT */
- n = n*ub + (uchr)d;
+ break; /* NOTE BREAK OUT */
+ n = n * ub + (uchr) d;
}
if (len < minlen)
ERR(REG_EESCAPE);
- return (chr)n;
+ return (chr) n;
}
/*
@@ -853,66 +957,71 @@ lexdigits(struct vars *v,
* This is much like EREs except for all the stupid backslashes and the
* context-dependency of some things.
*/
-static int /* 1 normal, 0 failure */
-brenext(struct vars *v,
+static int /* 1 normal, 0 failure */
+brenext(struct vars * v,
chr pc)
{
- chr c = (chr)pc;
+ chr c = (chr) pc;
- switch (c) {
- case CHR('*'):
- if (LASTTYPE(EMPTY) || LASTTYPE('(') || LASTTYPE('^'))
+ switch (c)
+ {
+ case CHR('*'):
+ if (LASTTYPE(EMPTY) || LASTTYPE('(') || LASTTYPE('^'))
+ RETV(PLAIN, c);
+ RET('*');
+ break;
+ case CHR('['):
+ if (HAVE(6) && *(v->now + 0) == CHR('[') &&
+ *(v->now + 1) == CHR(':') &&
+ (*(v->now + 2) == CHR('<') ||
+ *(v->now + 2) == CHR('>')) &&
+ *(v->now + 3) == CHR(':') &&
+ *(v->now + 4) == CHR(']') &&
+ *(v->now + 5) == CHR(']'))
+ {
+ c = *(v->now + 2);
+ v->now += 6;
+ NOTE(REG_UNONPOSIX);
+ RET((c == CHR('<')) ? '<' : '>');
+ }
+ INTOCON(L_BRACK);
+ if (NEXT1('^'))
+ {
+ v->now++;
+ RETV('[', 0);
+ }
+ RETV('[', 1);
+ break;
+ case CHR('.'):
+ RET('.');
+ break;
+ case CHR('^'):
+ if (LASTTYPE(EMPTY))
+ RET('^');
+ if (LASTTYPE('('))
+ {
+ NOTE(REG_UUNSPEC);
+ RET('^');
+ }
RETV(PLAIN, c);
- RET('*');
- break;
- case CHR('['):
- if (HAVE(6) && *(v->now+0) == CHR('[') &&
- *(v->now+1) == CHR(':') &&
- (*(v->now+2) == CHR('<') ||
- *(v->now+2) == CHR('>')) &&
- *(v->now+3) == CHR(':') &&
- *(v->now+4) == CHR(']') &&
- *(v->now+5) == CHR(']')) {
- c = *(v->now+2);
- v->now += 6;
- NOTE(REG_UNONPOSIX);
- RET((c == CHR('<')) ? '<' : '>');
- }
- INTOCON(L_BRACK);
- if (NEXT1('^')) {
- v->now++;
- RETV('[', 0);
- }
- RETV('[', 1);
- break;
- case CHR('.'):
- RET('.');
- break;
- case CHR('^'):
- if (LASTTYPE(EMPTY))
- RET('^');
- if (LASTTYPE('(')) {
- NOTE(REG_UUNSPEC);
- RET('^');
- }
- RETV(PLAIN, c);
- break;
- case CHR('$'):
- if (v->cflags&REG_EXPANDED)
- skip(v);
- if (ATEOS())
- RET('$');
- if (NEXT2('\\', ')')) {
- NOTE(REG_UUNSPEC);
- RET('$');
- }
- RETV(PLAIN, c);
- break;
- case CHR('\\'):
- break; /* see below */
- default:
- RETV(PLAIN, c);
- break;
+ break;
+ case CHR('$'):
+ if (v->cflags & REG_EXPANDED)
+ skip(v);
+ if (ATEOS())
+ RET('$');
+ if (NEXT2('\\', ')'))
+ {
+ NOTE(REG_UUNSPEC);
+ RET('$');
+ }
+ RETV(PLAIN, c);
+ break;
+ case CHR('\\'):
+ break; /* see below */
+ default:
+ RETV(PLAIN, c);
+ break;
}
assert(c == CHR('\\'));
@@ -921,39 +1030,47 @@ brenext(struct vars *v,
FAILW(REG_EESCAPE);
c = *v->now++;
- switch (c) {
- case CHR('{'):
- INTOCON(L_BBND);
- NOTE(REG_UBOUNDS);
- RET('{');
- break;
- case CHR('('):
- RETV('(', 1);
- break;
- case CHR(')'):
- RETV(')', c);
- break;
- case CHR('<'):
- NOTE(REG_UNONPOSIX);
- RET('<');
- break;
- case CHR('>'):
- NOTE(REG_UNONPOSIX);
- RET('>');
- break;
- case CHR('1'): case CHR('2'): case CHR('3'): case CHR('4'):
- case CHR('5'): case CHR('6'): case CHR('7'): case CHR('8'):
- case CHR('9'):
- NOTE(REG_UBACKREF);
- RETV(BACKREF, (chr)DIGITVAL(c));
- break;
- default:
- if (iscalnum(c)) {
- NOTE(REG_UBSALNUM);
- NOTE(REG_UUNSPEC);
- }
- RETV(PLAIN, c);
- break;
+ switch (c)
+ {
+ case CHR('{'):
+ INTOCON(L_BBND);
+ NOTE(REG_UBOUNDS);
+ RET('{');
+ break;
+ case CHR('('):
+ RETV('(', 1);
+ break;
+ case CHR(')'):
+ RETV(')', c);
+ break;
+ case CHR('<'):
+ NOTE(REG_UNONPOSIX);
+ RET('<');
+ break;
+ case CHR('>'):
+ NOTE(REG_UNONPOSIX);
+ RET('>');
+ break;
+ case CHR('1'):
+ case CHR('2'):
+ case CHR('3'):
+ case CHR('4'):
+ case CHR('5'):
+ case CHR('6'):
+ case CHR('7'):
+ case CHR('8'):
+ case CHR('9'):
+ NOTE(REG_UBACKREF);
+ RETV(BACKREF, (chr) DIGITVAL(c));
+ break;
+ default:
+ if (iscalnum(c))
+ {
+ NOTE(REG_UBSALNUM);
+ NOTE(REG_UUNSPEC);
+ }
+ RETV(PLAIN, c);
+ break;
}
assert(NOTREACHED);
@@ -963,13 +1080,14 @@ brenext(struct vars *v,
* skip - skip white space and comments in expanded form
*/
static void
-skip(struct vars *v)
+skip(struct vars * v)
{
- chr *start = v->now;
+ chr *start = v->now;
- assert(v->cflags&REG_EXPANDED);
+ assert(v->cflags & REG_EXPANDED);
- for (;;) {
+ for (;;)
+ {
while (!ATEOS() && iscspace(*v->now))
v->now++;
if (ATEOS() || *v->now != CHR('#'))
@@ -1002,14 +1120,14 @@ newline(void)
* use that it hardly matters.
*/
static chr
-chrnamed(struct vars *v,
- chr *startp, /* start of name */
- chr *endp, /* just past end of name */
+chrnamed(struct vars * v,
+ chr * startp, /* start of name */
+ chr * endp, /* just past end of name */
chr lastresort) /* what to return if name lookup fails */
{
- celt c;
- int errsave;
- int e;
+ celt c;
+ int errsave;
+ int e;
struct cvec *cv;
errsave = v->err;
@@ -1019,10 +1137,10 @@ chrnamed(struct vars *v,
v->err = errsave;
if (e != 0)
- return (chr)lastresort;
+ return (chr) lastresort;
cv = range(v, c, c, 0);
if (cv->nchrs == 0)
- return (chr)lastresort;
+ return (chr) lastresort;
return cv->chrs[0];
}
diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c
index 41ea9fe1f29..69c7fd7214a 100644
--- a/src/backend/regex/regc_locale.c
+++ b/src/backend/regex/regc_locale.c
@@ -1,4 +1,4 @@
-/*
+/*
* regc_locale.c --
*
* This file contains locale-specific regexp routines.
@@ -11,7 +11,7 @@
* Corporation and other parties. The following terms apply to all files
* associated with the software unless explicitly disclaimed in
* individual files.
- *
+ *
* The authors hereby grant permission to use, copy, modify, distribute,
* and license this software and its documentation for any purpose, provided
* that existing copyright notices are retained in all copies and that this
@@ -21,137 +21,332 @@
* and need not follow the licensing terms described here, provided that
* the new terms are clearly indicated on the first page of each file where
* they apply.
- *
+ *
* IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
* FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
* DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
- *
+ *
* THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
+ * FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
* IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
* NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
* MODIFICATIONS.
- *
+ *
* GOVERNMENT USE: If you are acquiring this software on behalf of the
* U.S. government, the Government shall have only "Restricted Rights"
- * in the software and related documentation as defined in the Federal
- * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
+ * in the software and related documentation as defined in the Federal
+ * Acquisition Regulations (FARs) in Clause 52.227.19 (c) (2). If you
* are acquiring the software on behalf of the Department of Defense, the
* software shall be classified as "Commercial Computer Software" and the
* Government shall have only "Restricted Rights" as defined in Clause
* 252.227-7013 (c) (1) of DFARs. Notwithstanding the foregoing, the
* authors grant the U.S. Government and others acting in its behalf
* permission to use and distribute the software in accordance with the
- * terms specified in this license.
+ * terms specified in this license.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/regc_locale.c,v 1.1 2003/02/05 17:41:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/regc_locale.c,v 1.2 2003/08/04 00:43:21 momjian Exp $
*/
/* ASCII character-name table */
-static struct cname {
- char *name;
- char code;
-} cnames[] = {
- {"NUL", '\0'},
- {"SOH", '\001'},
- {"STX", '\002'},
- {"ETX", '\003'},
- {"EOT", '\004'},
- {"ENQ", '\005'},
- {"ACK", '\006'},
- {"BEL", '\007'},
- {"alert", '\007'},
- {"BS", '\010'},
- {"backspace", '\b'},
- {"HT", '\011'},
- {"tab", '\t'},
- {"LF", '\012'},
- {"newline", '\n'},
- {"VT", '\013'},
- {"vertical-tab", '\v'},
- {"FF", '\014'},
- {"form-feed", '\f'},
- {"CR", '\015'},
- {"carriage-return", '\r'},
- {"SO", '\016'},
- {"SI", '\017'},
- {"DLE", '\020'},
- {"DC1", '\021'},
- {"DC2", '\022'},
- {"DC3", '\023'},
- {"DC4", '\024'},
- {"NAK", '\025'},
- {"SYN", '\026'},
- {"ETB", '\027'},
- {"CAN", '\030'},
- {"EM", '\031'},
- {"SUB", '\032'},
- {"ESC", '\033'},
- {"IS4", '\034'},
- {"FS", '\034'},
- {"IS3", '\035'},
- {"GS", '\035'},
- {"IS2", '\036'},
- {"RS", '\036'},
- {"IS1", '\037'},
- {"US", '\037'},
- {"space", ' '},
- {"exclamation-mark",'!'},
- {"quotation-mark", '"'},
- {"number-sign", '#'},
- {"dollar-sign", '$'},
- {"percent-sign", '%'},
- {"ampersand", '&'},
- {"apostrophe", '\''},
- {"left-parenthesis",'('},
- {"right-parenthesis", ')'},
- {"asterisk", '*'},
- {"plus-sign", '+'},
- {"comma", ','},
- {"hyphen", '-'},
- {"hyphen-minus", '-'},
- {"period", '.'},
- {"full-stop", '.'},
- {"slash", '/'},
- {"solidus", '/'},
- {"zero", '0'},
- {"one", '1'},
- {"two", '2'},
- {"three", '3'},
- {"four", '4'},
- {"five", '5'},
- {"six", '6'},
- {"seven", '7'},
- {"eight", '8'},
- {"nine", '9'},
- {"colon", ':'},
- {"semicolon", ';'},
- {"less-than-sign", '<'},
- {"equals-sign", '='},
- {"greater-than-sign", '>'},
- {"question-mark", '?'},
- {"commercial-at", '@'},
- {"left-square-bracket", '['},
- {"backslash", '\\'},
- {"reverse-solidus", '\\'},
- {"right-square-bracket", ']'},
- {"circumflex", '^'},
- {"circumflex-accent", '^'},
- {"underscore", '_'},
- {"low-line", '_'},
- {"grave-accent", '`'},
- {"left-brace", '{'},
- {"left-curly-bracket", '{'},
- {"vertical-line", '|'},
- {"right-brace", '}'},
- {"right-curly-bracket", '}'},
- {"tilde", '~'},
- {"DEL", '\177'},
- {NULL, 0}
+static struct cname
+{
+ char *name;
+ char code;
+} cnames[] =
+
+{
+ {
+ "NUL", '\0'
+ },
+ {
+ "SOH", '\001'
+ },
+ {
+ "STX", '\002'
+ },
+ {
+ "ETX", '\003'
+ },
+ {
+ "EOT", '\004'
+ },
+ {
+ "ENQ", '\005'
+ },
+ {
+ "ACK", '\006'
+ },
+ {
+ "BEL", '\007'
+ },
+ {
+ "alert", '\007'
+ },
+ {
+ "BS", '\010'
+ },
+ {
+ "backspace", '\b'
+ },
+ {
+ "HT", '\011'
+ },
+ {
+ "tab", '\t'
+ },
+ {
+ "LF", '\012'
+ },
+ {
+ "newline", '\n'
+ },
+ {
+ "VT", '\013'
+ },
+ {
+ "vertical-tab", '\v'
+ },
+ {
+ "FF", '\014'
+ },
+ {
+ "form-feed", '\f'
+ },
+ {
+ "CR", '\015'
+ },
+ {
+ "carriage-return", '\r'
+ },
+ {
+ "SO", '\016'
+ },
+ {
+ "SI", '\017'
+ },
+ {
+ "DLE", '\020'
+ },
+ {
+ "DC1", '\021'
+ },
+ {
+ "DC2", '\022'
+ },
+ {
+ "DC3", '\023'
+ },
+ {
+ "DC4", '\024'
+ },
+ {
+ "NAK", '\025'
+ },
+ {
+ "SYN", '\026'
+ },
+ {
+ "ETB", '\027'
+ },
+ {
+ "CAN", '\030'
+ },
+ {
+ "EM", '\031'
+ },
+ {
+ "SUB", '\032'
+ },
+ {
+ "ESC", '\033'
+ },
+ {
+ "IS4", '\034'
+ },
+ {
+ "FS", '\034'
+ },
+ {
+ "IS3", '\035'
+ },
+ {
+ "GS", '\035'
+ },
+ {
+ "IS2", '\036'
+ },
+ {
+ "RS", '\036'
+ },
+ {
+ "IS1", '\037'
+ },
+ {
+ "US", '\037'
+ },
+ {
+ "space", ' '
+ },
+ {
+ "exclamation-mark", '!'
+ },
+ {
+ "quotation-mark", '"'
+ },
+ {
+ "number-sign", '#'
+ },
+ {
+ "dollar-sign", '$'
+ },
+ {
+ "percent-sign", '%'
+ },
+ {
+ "ampersand", '&'
+ },
+ {
+ "apostrophe", '\''
+ },
+ {
+ "left-parenthesis", '('
+ },
+ {
+ "right-parenthesis", ')'
+ },
+ {
+ "asterisk", '*'
+ },
+ {
+ "plus-sign", '+'
+ },
+ {
+ "comma", ','
+ },
+ {
+ "hyphen", '-'
+ },
+ {
+ "hyphen-minus", '-'
+ },
+ {
+ "period", '.'
+ },
+ {
+ "full-stop", '.'
+ },
+ {
+ "slash", '/'
+ },
+ {
+ "solidus", '/'
+ },
+ {
+ "zero", '0'
+ },
+ {
+ "one", '1'
+ },
+ {
+ "two", '2'
+ },
+ {
+ "three", '3'
+ },
+ {
+ "four", '4'
+ },
+ {
+ "five", '5'
+ },
+ {
+ "six", '6'
+ },
+ {
+ "seven", '7'
+ },
+ {
+ "eight", '8'
+ },
+ {
+ "nine", '9'
+ },
+ {
+ "colon", ':'
+ },
+ {
+ "semicolon", ';'
+ },
+ {
+ "less-than-sign", '<'
+ },
+ {
+ "equals-sign", '='
+ },
+ {
+ "greater-than-sign", '>'
+ },
+ {
+ "question-mark", '?'
+ },
+ {
+ "commercial-at", '@'
+ },
+ {
+ "left-square-bracket", '['
+ },
+ {
+ "backslash", '\\'
+ },
+ {
+ "reverse-solidus", '\\'
+ },
+ {
+ "right-square-bracket", ']'
+ },
+ {
+ "circumflex", '^'
+ },
+ {
+ "circumflex-accent", '^'
+ },
+ {
+ "underscore", '_'
+ },
+ {
+ "low-line", '_'
+ },
+ {
+ "grave-accent", '`'
+ },
+ {
+ "left-brace", '{'
+ },
+ {
+ "left-curly-bracket", '{'
+ },
+ {
+ "vertical-line", '|'
+ },
+ {
+ "right-brace", '}'
+ },
+ {
+ "right-curly-bracket", '}'
+ },
+ {
+ "tilde", '~'
+ },
+ {
+ "DEL", '\177'
+ },
+ {
+ NULL, 0
+ }
};
/*
@@ -226,132 +421,134 @@ pg_tolower(pg_wchar c)
* nmcces - how many distinct MCCEs are there?
*/
static int
-nmcces(struct vars *v)
+nmcces(struct vars * v)
{
- /*
- * No multi-character collating elements defined at the moment.
- */
- return 0;
+ /*
+ * No multi-character collating elements defined at the moment.
+ */
+ return 0;
}
/*
* nleaders - how many chrs can be first chrs of MCCEs?
*/
static int
-nleaders(struct vars *v)
+nleaders(struct vars * v)
{
- return 0;
+ return 0;
}
/*
* allmcces - return a cvec with all the MCCEs of the locale
*/
static struct cvec *
-allmcces(struct vars *v, /* context */
- struct cvec *cv) /* this is supposed to have enough room */
+allmcces(struct vars * v, /* context */
+ struct cvec * cv) /* this is supposed to have enough room */
{
- return clearcvec(cv);
+ return clearcvec(cv);
}
/*
* element - map collating-element name to celt
*/
static celt
-element(struct vars *v, /* context */
- chr *startp, /* points to start of name */
- chr *endp) /* points just past end of name */
+element(struct vars * v, /* context */
+ chr * startp, /* points to start of name */
+ chr * endp) /* points just past end of name */
{
- struct cname *cn;
- size_t len;
-
- /* generic: one-chr names stand for themselves */
- assert(startp < endp);
- len = endp - startp;
- if (len == 1) {
- return *startp;
- }
-
- NOTE(REG_ULOCALE);
-
- /* search table */
- for (cn=cnames; cn->name!=NULL; cn++) {
- if (strlen(cn->name)==len &&
- pg_char_and_wchar_strncmp(cn->name, startp, len)==0) {
- break; /* NOTE BREAK OUT */
+ struct cname *cn;
+ size_t len;
+
+ /* generic: one-chr names stand for themselves */
+ assert(startp < endp);
+ len = endp - startp;
+ if (len == 1)
+ return *startp;
+
+ NOTE(REG_ULOCALE);
+
+ /* search table */
+ for (cn = cnames; cn->name != NULL; cn++)
+ {
+ if (strlen(cn->name) == len &&
+ pg_char_and_wchar_strncmp(cn->name, startp, len) == 0)
+ {
+ break; /* NOTE BREAK OUT */
+ }
}
- }
- if (cn->name != NULL) {
- return CHR(cn->code);
- }
-
- /* couldn't find it */
- ERR(REG_ECOLLATE);
- return 0;
+ if (cn->name != NULL)
+ return CHR(cn->code);
+
+ /* couldn't find it */
+ ERR(REG_ECOLLATE);
+ return 0;
}
/*
* range - supply cvec for a range, including legality check
*/
static struct cvec *
-range(struct vars *v, /* context */
- celt a, /* range start */
- celt b, /* range end, might equal a */
+range(struct vars * v, /* context */
+ celt a, /* range start */
+ celt b, /* range end, might equal a */
int cases) /* case-independent? */
{
- int nchrs;
- struct cvec *cv;
- celt c, lc, uc;
-
- if (a != b && !before(a, b)) {
- ERR(REG_ERANGE);
- return NULL;
- }
+ int nchrs;
+ struct cvec *cv;
+ celt c,
+ lc,
+ uc;
+
+ if (a != b && !before(a, b))
+ {
+ ERR(REG_ERANGE);
+ return NULL;
+ }
- if (!cases) { /* easy version */
- cv = getcvec(v, 0, 1, 0);
- NOERRN();
- addrange(cv, a, b);
- return cv;
- }
+ if (!cases)
+ { /* easy version */
+ cv = getcvec(v, 0, 1, 0);
+ NOERRN();
+ addrange(cv, a, b);
+ return cv;
+ }
- /*
- * When case-independent, it's hard to decide when cvec ranges are
- * usable, so for now at least, we won't try. We allocate enough
- * space for two case variants plus a little extra for the two
- * title case variants.
- */
+ /*
+ * When case-independent, it's hard to decide when cvec ranges are
+ * usable, so for now at least, we won't try. We allocate enough
+ * space for two case variants plus a little extra for the two title
+ * case variants.
+ */
- nchrs = (b - a + 1)*2 + 4;
+ nchrs = (b - a + 1) * 2 + 4;
- cv = getcvec(v, nchrs, 0, 0);
- NOERRN();
+ cv = getcvec(v, nchrs, 0, 0);
+ NOERRN();
- for (c=a; c<=b; c++) {
- addchr(cv, c);
- lc = pg_tolower((chr)c);
- if (c != lc) {
- addchr(cv, lc);
- }
- uc = pg_toupper((chr)c);
- if (c != uc) {
- addchr(cv, uc);
+ for (c = a; c <= b; c++)
+ {
+ addchr(cv, c);
+ lc = pg_tolower((chr) c);
+ if (c != lc)
+ addchr(cv, lc);
+ uc = pg_toupper((chr) c);
+ if (c != uc)
+ addchr(cv, uc);
}
- }
- return cv;
+ return cv;
}
/*
* before - is celt x before celt y, for purposes of range legality?
*/
-static int /* predicate */
+static int /* predicate */
before(celt x, celt y)
{
- /* trivial because no MCCEs */
- if (x < y) {
- return 1;
- }
- return 0;
+ /* trivial because no MCCEs */
+ if (x < y)
+ return 1;
+ return 0;
}
/*
@@ -359,33 +556,34 @@ before(celt x, celt y)
* Must include case counterparts on request.
*/
static struct cvec *
-eclass(struct vars *v, /* context */
- celt c, /* Collating element representing
- * the equivalence class. */
+eclass(struct vars * v, /* context */
+ celt c, /* Collating element representing the
+ * equivalence class. */
int cases) /* all cases? */
{
- struct cvec *cv;
-
- /* crude fake equivalence class for testing */
- if ((v->cflags&REG_FAKE) && c == 'x') {
- cv = getcvec(v, 4, 0, 0);
- addchr(cv, (chr)'x');
- addchr(cv, (chr)'y');
- if (cases) {
- addchr(cv, (chr)'X');
- addchr(cv, (chr)'Y');
+ struct cvec *cv;
+
+ /* crude fake equivalence class for testing */
+ if ((v->cflags & REG_FAKE) && c == 'x')
+ {
+ cv = getcvec(v, 4, 0, 0);
+ addchr(cv, (chr) 'x');
+ addchr(cv, (chr) 'y');
+ if (cases)
+ {
+ addchr(cv, (chr) 'X');
+ addchr(cv, (chr) 'Y');
+ }
+ return cv;
}
+
+ /* otherwise, none */
+ if (cases)
+ return allcases(v, c);
+ cv = getcvec(v, 1, 0, 0);
+ assert(cv != NULL);
+ addchr(cv, (chr) c);
return cv;
- }
-
- /* otherwise, none */
- if (cases) {
- return allcases(v, c);
- }
- cv = getcvec(v, 1, 0, 0);
- assert(cv != NULL);
- addchr(cv, (chr)c);
- return cv;
}
/*
@@ -394,164 +592,182 @@ eclass(struct vars *v, /* context */
* Must include case counterparts on request.
*/
static struct cvec *
-cclass(struct vars *v, /* context */
- chr *startp, /* where the name starts */
- chr *endp, /* just past the end of the name */
+cclass(struct vars * v, /* context */
+ chr * startp, /* where the name starts */
+ chr * endp, /* just past the end of the name */
int cases) /* case-independent? */
{
- size_t len;
- struct cvec *cv = NULL;
- char **namePtr;
- int i, index;
-
- /*
- * The following arrays define the valid character class names.
- */
-
- static char *classNames[] = {
- "alnum", "alpha", "ascii", "blank", "cntrl", "digit", "graph",
- "lower", "print", "punct", "space", "upper", "xdigit", NULL
- };
-
- enum classes {
- CC_ALNUM, CC_ALPHA, CC_ASCII, CC_BLANK, CC_CNTRL, CC_DIGIT, CC_GRAPH,
- CC_LOWER, CC_PRINT, CC_PUNCT, CC_SPACE, CC_UPPER, CC_XDIGIT
- };
-
- /*
- * Map the name to the corresponding enumerated value.
- */
- len = endp - startp;
- index = -1;
- for (namePtr=classNames,i=0 ; *namePtr!=NULL ; namePtr++,i++) {
- if (strlen(*namePtr) == len &&
- pg_char_and_wchar_strncmp(*namePtr, startp, len) == 0) {
- index = i;
- break;
+ size_t len;
+ struct cvec *cv = NULL;
+ char **namePtr;
+ int i,
+ index;
+
+ /*
+ * The following arrays define the valid character class names.
+ */
+
+ static char *classNames[] = {
+ "alnum", "alpha", "ascii", "blank", "cntrl", "digit", "graph",
+ "lower", "print", "punct", "space", "upper", "xdigit", NULL
+ };
+
+ enum classes
+ {
+ CC_ALNUM, CC_ALPHA, CC_ASCII, CC_BLANK, CC_CNTRL, CC_DIGIT, CC_GRAPH,
+ CC_LOWER, CC_PRINT, CC_PUNCT, CC_SPACE, CC_UPPER, CC_XDIGIT
+ };
+
+ /*
+ * Map the name to the corresponding enumerated value.
+ */
+ len = endp - startp;
+ index = -1;
+ for (namePtr = classNames, i = 0; *namePtr != NULL; namePtr++, i++)
+ {
+ if (strlen(*namePtr) == len &&
+ pg_char_and_wchar_strncmp(*namePtr, startp, len) == 0)
+ {
+ index = i;
+ break;
+ }
+ }
+ if (index == -1)
+ {
+ ERR(REG_ECTYPE);
+ return NULL;
}
- }
- if (index == -1) {
- ERR(REG_ECTYPE);
- return NULL;
- }
- /*
- * Remap lower and upper to alpha if the match is case insensitive.
- */
+ /*
+ * Remap lower and upper to alpha if the match is case insensitive.
+ */
- if (cases &&
+ if (cases &&
((enum classes) index == CC_LOWER ||
(enum classes) index == CC_UPPER))
index = (int) CC_ALPHA;
-
- /*
- * Now compute the character class contents.
+
+ /*
+ * Now compute the character class contents.
*
* For the moment, assume that only char codes < 256 can be in these
* classes.
- */
-
- switch((enum classes) index) {
- case CC_PRINT:
- case CC_ALNUM:
- cv = getcvec(v, UCHAR_MAX, 1, 0);
- if (cv) {
- for (i=0 ; i<= UCHAR_MAX ; i++) {
- if (pg_isalpha((chr) i))
- addchr(cv, (chr) i);
- }
- addrange(cv, (chr) '0', (chr) '9');
- }
- break;
- case CC_ALPHA:
- cv = getcvec(v, UCHAR_MAX, 0, 0);
- if (cv) {
- for (i=0 ; i<= UCHAR_MAX ; i++) {
- if (pg_isalpha((chr) i))
- addchr(cv, (chr) i);
- }
- }
- break;
- case CC_ASCII:
- cv = getcvec(v, 0, 1, 0);
- if (cv) {
- addrange(cv, 0, 0x7f);
- }
- break;
- case CC_BLANK:
- cv = getcvec(v, 2, 0, 0);
- addchr(cv, '\t');
- addchr(cv, ' ');
- break;
- case CC_CNTRL:
- cv = getcvec(v, 0, 2, 0);
- addrange(cv, 0x0, 0x1f);
- addrange(cv, 0x7f, 0x9f);
- break;
- case CC_DIGIT:
- cv = getcvec(v, 0, 1, 0);
- if (cv) {
- addrange(cv, (chr) '0', (chr) '9');
+ */
+
+ switch ((enum classes) index)
+ {
+ case CC_PRINT:
+ case CC_ALNUM:
+ cv = getcvec(v, UCHAR_MAX, 1, 0);
+ if (cv)
+ {
+ for (i = 0; i <= UCHAR_MAX; i++)
+ {
+ if (pg_isalpha((chr) i))
+ addchr(cv, (chr) i);
+ }
+ addrange(cv, (chr) '0', (chr) '9');
+ }
+ break;
+ case CC_ALPHA:
+ cv = getcvec(v, UCHAR_MAX, 0, 0);
+ if (cv)
+ {
+ for (i = 0; i <= UCHAR_MAX; i++)
+ {
+ if (pg_isalpha((chr) i))
+ addchr(cv, (chr) i);
+ }
+ }
+ break;
+ case CC_ASCII:
+ cv = getcvec(v, 0, 1, 0);
+ if (cv)
+ addrange(cv, 0, 0x7f);
+ break;
+ case CC_BLANK:
+ cv = getcvec(v, 2, 0, 0);
+ addchr(cv, '\t');
+ addchr(cv, ' ');
+ break;
+ case CC_CNTRL:
+ cv = getcvec(v, 0, 2, 0);
+ addrange(cv, 0x0, 0x1f);
+ addrange(cv, 0x7f, 0x9f);
+ break;
+ case CC_DIGIT:
+ cv = getcvec(v, 0, 1, 0);
+ if (cv)
+ addrange(cv, (chr) '0', (chr) '9');
+ break;
+ case CC_PUNCT:
+ cv = getcvec(v, UCHAR_MAX, 0, 0);
+ if (cv)
+ {
+ for (i = 0; i <= UCHAR_MAX; i++)
+ {
+ if (pg_ispunct((chr) i))
+ addchr(cv, (chr) i);
+ }
+ }
+ break;
+ case CC_XDIGIT:
+ cv = getcvec(v, 0, 3, 0);
+ if (cv)
+ {
+ addrange(cv, '0', '9');
+ addrange(cv, 'a', 'f');
+ addrange(cv, 'A', 'F');
+ }
+ break;
+ case CC_SPACE:
+ cv = getcvec(v, UCHAR_MAX, 0, 0);
+ if (cv)
+ {
+ for (i = 0; i <= UCHAR_MAX; i++)
+ {
+ if (pg_isspace((chr) i))
+ addchr(cv, (chr) i);
+ }
+ }
+ break;
+ case CC_LOWER:
+ cv = getcvec(v, UCHAR_MAX, 0, 0);
+ if (cv)
+ {
+ for (i = 0; i <= UCHAR_MAX; i++)
+ {
+ if (pg_islower((chr) i))
+ addchr(cv, (chr) i);
+ }
+ }
+ break;
+ case CC_UPPER:
+ cv = getcvec(v, UCHAR_MAX, 0, 0);
+ if (cv)
+ {
+ for (i = 0; i <= UCHAR_MAX; i++)
+ {
+ if (pg_isupper((chr) i))
+ addchr(cv, (chr) i);
+ }
+ }
+ break;
+ case CC_GRAPH:
+ cv = getcvec(v, UCHAR_MAX, 0, 0);
+ if (cv)
+ {
+ for (i = 0; i <= UCHAR_MAX; i++)
+ {
+ if (pg_isgraph((chr) i))
+ addchr(cv, (chr) i);
+ }
+ }
+ break;
}
- break;
- case CC_PUNCT:
- cv = getcvec(v, UCHAR_MAX, 0, 0);
- if (cv) {
- for (i=0 ; i<= UCHAR_MAX ; i++) {
- if (pg_ispunct((chr) i))
- addchr(cv, (chr) i);
- }
- }
- break;
- case CC_XDIGIT:
- cv = getcvec(v, 0, 3, 0);
- if (cv) {
- addrange(cv, '0', '9');
- addrange(cv, 'a', 'f');
- addrange(cv, 'A', 'F');
- }
- break;
- case CC_SPACE:
- cv = getcvec(v, UCHAR_MAX, 0, 0);
- if (cv) {
- for (i=0 ; i<= UCHAR_MAX ; i++) {
- if (pg_isspace((chr) i))
- addchr(cv, (chr) i);
- }
- }
- break;
- case CC_LOWER:
- cv = getcvec(v, UCHAR_MAX, 0, 0);
- if (cv) {
- for (i=0 ; i<= UCHAR_MAX ; i++) {
- if (pg_islower((chr) i))
- addchr(cv, (chr) i);
- }
- }
- break;
- case CC_UPPER:
- cv = getcvec(v, UCHAR_MAX, 0, 0);
- if (cv) {
- for (i=0 ; i<= UCHAR_MAX ; i++) {
- if (pg_isupper((chr) i))
- addchr(cv, (chr) i);
- }
- }
- break;
- case CC_GRAPH:
- cv = getcvec(v, UCHAR_MAX, 0, 0);
- if (cv) {
- for (i=0 ; i<= UCHAR_MAX ; i++) {
- if (pg_isgraph((chr) i))
- addchr(cv, (chr) i);
- }
- }
- break;
- }
- if (cv == NULL) {
- ERR(REG_ESPACE);
- }
- return cv;
+ if (cv == NULL)
+ ERR(REG_ESPACE);
+ return cv;
}
/*
@@ -561,37 +777,37 @@ cclass(struct vars *v, /* context */
* messy cases are done via range().
*/
static struct cvec *
-allcases(struct vars *v, /* context */
+allcases(struct vars * v, /* context */
chr pc) /* character to get case equivs of */
{
- struct cvec *cv;
- chr c = (chr)pc;
- chr lc, uc;
+ struct cvec *cv;
+ chr c = (chr) pc;
+ chr lc,
+ uc;
- lc = pg_tolower((chr)c);
- uc = pg_toupper((chr)c);
+ lc = pg_tolower((chr) c);
+ uc = pg_toupper((chr) c);
cv = getcvec(v, 2, 0, 0);
- addchr(cv, lc);
- if (lc != uc) {
- addchr(cv, uc);
- }
- return cv;
+ addchr(cv, lc);
+ if (lc != uc)
+ addchr(cv, uc);
+ return cv;
}
/*
* cmp - chr-substring compare
*
- * Backrefs need this. It should preferably be efficient.
+ * Backrefs need this. It should preferably be efficient.
* Note that it does not need to report anything except equal/unequal.
* Note also that the length is exact, and the comparison should not
* stop at embedded NULs!
*/
-static int /* 0 for equal, nonzero for unequal */
-cmp(const chr *x, const chr *y, /* strings to compare */
- size_t len) /* exact length of comparison */
+static int /* 0 for equal, nonzero for unequal */
+cmp(const chr * x, const chr * y, /* strings to compare */
+ size_t len) /* exact length of comparison */
{
- return memcmp(VS(x), VS(y), len*sizeof(chr));
+ return memcmp(VS(x), VS(y), len * sizeof(chr));
}
/*
@@ -602,14 +818,14 @@ cmp(const chr *x, const chr *y, /* strings to compare */
* Note also that the length is exact, and the comparison should not
* stop at embedded NULs!
*/
-static int /* 0 for equal, nonzero for unequal */
-casecmp(const chr *x, const chr *y, /* strings to compare */
+static int /* 0 for equal, nonzero for unequal */
+casecmp(const chr * x, const chr * y, /* strings to compare */
size_t len) /* exact length of comparison */
{
- for (; len > 0; len--, x++, y++) {
- if ((*x!=*y) && (pg_tolower(*x) != pg_tolower(*y))) {
- return 1;
+ for (; len > 0; len--, x++, y++)
+ {
+ if ((*x != *y) && (pg_tolower(*x) != pg_tolower(*y)))
+ return 1;
}
- }
- return 0;
+ return 0;
}
diff --git a/src/backend/regex/regc_nfa.c b/src/backend/regex/regc_nfa.c
index 43e01ebe92b..51fd8bfb859 100644
--- a/src/backend/regex/regc_nfa.c
+++ b/src/backend/regex/regc_nfa.c
@@ -2,21 +2,21 @@
* NFA utilities.
* This file is #included by regcomp.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
- *
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ *
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
- * thanks all of them.
- *
+ * thanks all of them.
+ *
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
- *
+ *
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
- *
+ *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/regc_nfa.c,v 1.1 2003/02/05 17:41:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/regc_nfa.c,v 1.2 2003/08/04 00:43:21 momjian Exp $
*
*
* One or two things that technically ought to be in here
@@ -36,21 +36,21 @@
* the color chains.
*/
-#define NISERR() VISERR(nfa->v)
-#define NERR(e) VERR(nfa->v, (e))
+#define NISERR() VISERR(nfa->v)
+#define NERR(e) VERR(nfa->v, (e))
/*
* newnfa - set up an NFA
*/
-static struct nfa * /* the NFA, or NULL */
-newnfa(struct vars *v,
- struct colormap *cm,
- struct nfa *parent) /* NULL if primary NFA */
+static struct nfa * /* the NFA, or NULL */
+newnfa(struct vars * v,
+ struct colormap * cm,
+ struct nfa * parent) /* NULL if primary NFA */
{
struct nfa *nfa;
- nfa = (struct nfa *)MALLOC(sizeof(struct nfa));
+ nfa = (struct nfa *) MALLOC(sizeof(struct nfa));
if (nfa == NULL)
return NULL;
@@ -66,9 +66,10 @@ newnfa(struct vars *v,
nfa->pre = newfstate(nfa, '>'); /* number 1 */
nfa->parent = parent;
- nfa->init = newstate(nfa); /* may become invalid later */
+ nfa->init = newstate(nfa); /* may become invalid later */
nfa->final = newstate(nfa);
- if (ISERR()) {
+ if (ISERR())
+ {
freenfa(nfa);
return NULL;
}
@@ -79,7 +80,8 @@ newnfa(struct vars *v,
newarc(nfa, '$', 1, nfa->final, nfa->post);
newarc(nfa, '$', 0, nfa->final, nfa->post);
- if (ISERR()) {
+ if (ISERR())
+ {
freenfa(nfa);
return NULL;
}
@@ -90,15 +92,17 @@ newnfa(struct vars *v,
* freenfa - free an entire NFA
*/
static void
-freenfa(struct nfa *nfa)
+freenfa(struct nfa * nfa)
{
struct state *s;
- while ((s = nfa->states) != NULL) {
- s->nins = s->nouts = 0; /* don't worry about arcs */
+ while ((s = nfa->states) != NULL)
+ {
+ s->nins = s->nouts = 0; /* don't worry about arcs */
freestate(nfa, s);
}
- while ((s = nfa->free) != NULL) {
+ while ((s = nfa->free) != NULL)
+ {
nfa->free = s->next;
destroystate(nfa, s);
}
@@ -113,17 +117,21 @@ freenfa(struct nfa *nfa)
/*
* newstate - allocate an NFA state, with zero flag value
*/
-static struct state * /* NULL on error */
-newstate(struct nfa *nfa)
+static struct state * /* NULL on error */
+newstate(struct nfa * nfa)
{
struct state *s;
- if (nfa->free != NULL) {
+ if (nfa->free != NULL)
+ {
s = nfa->free;
nfa->free = s->next;
- } else {
- s = (struct state *)MALLOC(sizeof(struct state));
- if (s == NULL) {
+ }
+ else
+ {
+ s = (struct state *) MALLOC(sizeof(struct state));
+ if (s == NULL)
+ {
NERR(REG_ESPACE);
return NULL;
}
@@ -143,7 +151,8 @@ newstate(struct nfa *nfa)
s->outs = NULL;
s->tmp = NULL;
s->next = NULL;
- if (nfa->slast != NULL) {
+ if (nfa->slast != NULL)
+ {
assert(nfa->slast->next == NULL);
nfa->slast->next = s;
}
@@ -155,14 +164,14 @@ newstate(struct nfa *nfa)
/*
* newfstate - allocate an NFA state with a specified flag value
*/
-static struct state * /* NULL on error */
-newfstate(struct nfa *nfa, int flag)
+static struct state * /* NULL on error */
+newfstate(struct nfa * nfa, int flag)
{
struct state *s;
s = newstate(nfa);
if (s != NULL)
- s->flag = (char)flag;
+ s->flag = (char) flag;
return s;
}
@@ -170,8 +179,8 @@ newfstate(struct nfa *nfa, int flag)
* dropstate - delete a state's inarcs and outarcs and free it
*/
static void
-dropstate(struct nfa *nfa,
- struct state *s)
+dropstate(struct nfa * nfa,
+ struct state * s)
{
struct arc *a;
@@ -186,8 +195,8 @@ dropstate(struct nfa *nfa,
* freestate - free a state, which has no in-arcs or out-arcs
*/
static void
-freestate(struct nfa *nfa,
- struct state *s)
+freestate(struct nfa * nfa,
+ struct state * s)
{
assert(s != NULL);
assert(s->nins == 0 && s->nouts == 0);
@@ -196,18 +205,21 @@ freestate(struct nfa *nfa,
s->flag = 0;
if (s->next != NULL)
s->next->prev = s->prev;
- else {
+ else
+ {
assert(s == nfa->slast);
nfa->slast = s->prev;
}
if (s->prev != NULL)
s->prev->next = s->next;
- else {
+ else
+ {
assert(s == nfa->states);
nfa->states = s->next;
}
s->prev = NULL;
- s->next = nfa->free; /* don't delete it, put it on the free list */
+ s->next = nfa->free; /* don't delete it, put it on the free
+ * list */
nfa->free = s;
}
@@ -215,14 +227,15 @@ freestate(struct nfa *nfa,
* destroystate - really get rid of an already-freed state
*/
static void
-destroystate(struct nfa *nfa,
- struct state *s)
+destroystate(struct nfa * nfa,
+ struct state * s)
{
struct arcbatch *ab;
struct arcbatch *abnext;
assert(s->no == FREESTATE);
- for (ab = s->oas.next; ab != NULL; ab = abnext) {
+ for (ab = s->oas.next; ab != NULL; ab = abnext)
+ {
abnext = ab->next;
FREE(ab);
}
@@ -236,11 +249,11 @@ destroystate(struct nfa *nfa,
* newarc - set up a new arc within an NFA
*/
static void
-newarc(struct nfa *nfa,
+newarc(struct nfa * nfa,
int t,
pcolor co,
- struct state *from,
- struct state *to)
+ struct state * from,
+ struct state * to)
{
struct arc *a;
@@ -257,13 +270,13 @@ newarc(struct nfa *nfa,
assert(a != NULL);
a->type = t;
- a->co = (color)co;
+ a->co = (color) co;
a->to = to;
a->from = from;
/*
- * Put the new arc on the beginning, not the end, of the chains.
- * Not only is this easier, it has the very useful side effect that
+ * Put the new arc on the beginning, not the end, of the chains. Not
+ * only is this easier, it has the very useful side effect that
* deleting the most-recently-added arc is the cheapest case rather
* than the most expensive one.
*/
@@ -284,36 +297,40 @@ newarc(struct nfa *nfa,
/*
* allocarc - allocate a new out-arc within a state
*/
-static struct arc * /* NULL for failure */
-allocarc(struct nfa *nfa,
- struct state *s)
+static struct arc * /* NULL for failure */
+allocarc(struct nfa * nfa,
+ struct state * s)
{
struct arc *a;
struct arcbatch *new;
- int i;
+ int i;
/* shortcut */
- if (s->free == NULL && s->noas < ABSIZE) {
+ if (s->free == NULL && s->noas < ABSIZE)
+ {
a = &s->oas.a[s->noas];
s->noas++;
return a;
}
/* if none at hand, get more */
- if (s->free == NULL) {
- new = (struct arcbatch *)MALLOC(sizeof(struct arcbatch));
- if (new == NULL) {
+ if (s->free == NULL)
+ {
+ new = (struct arcbatch *) MALLOC(sizeof(struct arcbatch));
+ if (new == NULL)
+ {
NERR(REG_ESPACE);
return NULL;
}
new->next = s->oas.next;
s->oas.next = new;
- for (i = 0; i < ABSIZE; i++) {
+ for (i = 0; i < ABSIZE; i++)
+ {
new->a[i].type = 0;
- new->a[i].freechain = &new->a[i+1];
+ new->a[i].freechain = &new->a[i + 1];
}
- new->a[ABSIZE-1].freechain = NULL;
+ new->a[ABSIZE - 1].freechain = NULL;
s->free = &new->a[0];
}
assert(s->free != NULL);
@@ -327,8 +344,8 @@ allocarc(struct nfa *nfa,
* freearc - free an arc
*/
static void
-freearc(struct nfa *nfa,
- struct arc *victim)
+freearc(struct nfa * nfa,
+ struct arc * victim)
{
struct state *from = victim->from;
struct state *to = victim->to;
@@ -344,9 +361,10 @@ freearc(struct nfa *nfa,
assert(from != NULL);
assert(from->outs != NULL);
a = from->outs;
- if (a == victim) /* simple case: first in chain */
+ if (a == victim) /* simple case: first in chain */
from->outs = victim->outchain;
- else {
+ else
+ {
for (; a != NULL && a->outchain != victim; a = a->outchain)
continue;
assert(a != NULL);
@@ -358,9 +376,10 @@ freearc(struct nfa *nfa,
assert(to != NULL);
assert(to->ins != NULL);
a = to->ins;
- if (a == victim) /* simple case: first in chain */
+ if (a == victim) /* simple case: first in chain */
to->ins = victim->inchain;
- else {
+ else
+ {
for (; a != NULL && a->inchain != victim; a = a->inchain)
continue;
assert(a != NULL);
@@ -383,7 +402,7 @@ freearc(struct nfa *nfa,
* If there is more than one such arc, the result is random.
*/
static struct arc *
-findarc(struct state *s,
+findarc(struct state * s,
int type,
pcolor co)
{
@@ -399,10 +418,10 @@ findarc(struct state *s,
* cparc - allocate a new arc within an NFA, copying details from old one
*/
static void
-cparc(struct nfa *nfa,
- struct arc *oa,
- struct state *from,
- struct state *to)
+cparc(struct nfa * nfa,
+ struct arc * oa,
+ struct state * from,
+ struct state * to)
{
newarc(nfa, oa->type, oa->co, from, to);
}
@@ -416,15 +435,16 @@ cparc(struct nfa *nfa,
* ones to exploit the suppression built into newarc.
*/
static void
-moveins(struct nfa *nfa,
- struct state *old,
- struct state *new)
+moveins(struct nfa * nfa,
+ struct state * old,
+ struct state * new)
{
struct arc *a;
assert(old != new);
- while ((a = old->ins) != NULL) {
+ while ((a = old->ins) != NULL)
+ {
cparc(nfa, a, a->from, new);
freearc(nfa, a);
}
@@ -436,9 +456,9 @@ moveins(struct nfa *nfa,
* copyins - copy all in arcs of a state to another state
*/
static void
-copyins(struct nfa *nfa,
- struct state *old,
- struct state *new)
+copyins(struct nfa * nfa,
+ struct state * old,
+ struct state * new)
{
struct arc *a;
@@ -452,15 +472,16 @@ copyins(struct nfa *nfa,
* moveouts - move all out arcs of a state to another state
*/
static void
-moveouts(struct nfa *nfa,
- struct state *old,
- struct state *new)
+moveouts(struct nfa * nfa,
+ struct state * old,
+ struct state * new)
{
struct arc *a;
assert(old != new);
- while ((a = old->outs) != NULL) {
+ while ((a = old->outs) != NULL)
+ {
cparc(nfa, a, new, a->to);
freearc(nfa, a);
}
@@ -470,9 +491,9 @@ moveouts(struct nfa *nfa,
* copyouts - copy all out arcs of a state to another state
*/
static void
-copyouts(struct nfa *nfa,
- struct state *old,
- struct state *new)
+copyouts(struct nfa * nfa,
+ struct state * old,
+ struct state * new)
{
struct arc *a;
@@ -486,10 +507,10 @@ copyouts(struct nfa *nfa,
* cloneouts - copy out arcs of a state to another state pair, modifying type
*/
static void
-cloneouts(struct nfa *nfa,
- struct state *old,
- struct state *from,
- struct state *to,
+cloneouts(struct nfa * nfa,
+ struct state * old,
+ struct state * from,
+ struct state * to,
int type)
{
struct arc *a;
@@ -507,20 +528,20 @@ cloneouts(struct nfa *nfa,
* states using their tmp pointer.
*/
static void
-delsub(struct nfa *nfa,
- struct state *lp, /* the sub-NFA goes from here... */
- struct state *rp) /* ...to here, *not* inclusive */
+delsub(struct nfa * nfa,
+ struct state * lp, /* the sub-NFA goes from here... */
+ struct state * rp) /* ...to here, *not* inclusive */
{
assert(lp != rp);
- rp->tmp = rp; /* mark end */
+ rp->tmp = rp; /* mark end */
deltraverse(nfa, lp, lp);
assert(lp->nouts == 0 && rp->nins == 0); /* did the job */
- assert(lp->no != FREESTATE && rp->no != FREESTATE); /* no more */
+ assert(lp->no != FREESTATE && rp->no != FREESTATE); /* no more */
- rp->tmp = NULL; /* unmark end */
- lp->tmp = NULL; /* and begin, marked by deltraverse */
+ rp->tmp = NULL; /* unmark end */
+ lp->tmp = NULL; /* and begin, marked by deltraverse */
}
/*
@@ -528,36 +549,38 @@ delsub(struct nfa *nfa,
* This routine's basic job is to destroy all out-arcs of the state.
*/
static void
-deltraverse(struct nfa *nfa,
- struct state *leftend,
- struct state *s)
+deltraverse(struct nfa * nfa,
+ struct state * leftend,
+ struct state * s)
{
struct arc *a;
struct state *to;
if (s->nouts == 0)
- return; /* nothing to do */
+ return; /* nothing to do */
if (s->tmp != NULL)
- return; /* already in progress */
+ return; /* already in progress */
- s->tmp = s; /* mark as in progress */
+ s->tmp = s; /* mark as in progress */
- while ((a = s->outs) != NULL) {
+ while ((a = s->outs) != NULL)
+ {
to = a->to;
deltraverse(nfa, leftend, to);
assert(to->nouts == 0 || to->tmp != NULL);
freearc(nfa, a);
- if (to->nins == 0 && to->tmp == NULL) {
+ if (to->nins == 0 && to->tmp == NULL)
+ {
assert(to->nouts == 0);
freestate(nfa, to);
}
}
- assert(s->no != FREESTATE); /* we're still here */
- assert(s == leftend || s->nins != 0); /* and still reachable */
+ assert(s->no != FREESTATE); /* we're still here */
+ assert(s == leftend || s->nins != 0); /* and still reachable */
assert(s->nouts == 0); /* but have no outarcs */
- s->tmp = NULL; /* we're done here */
+ s->tmp = NULL; /* we're done here */
}
/*
@@ -568,13 +591,14 @@ deltraverse(struct nfa *nfa,
* it's a state pointer, didn't you? :-))
*/
static void
-dupnfa(struct nfa *nfa,
- struct state *start, /* duplicate of subNFA starting here */
- struct state *stop, /* and stopping here */
- struct state *from, /* stringing duplicate from here */
- struct state *to) /* to here */
+dupnfa(struct nfa * nfa,
+ struct state * start, /* duplicate of subNFA starting here */
+ struct state * stop, /* and stopping here */
+ struct state * from, /* stringing duplicate from here */
+ struct state * to) /* to here */
{
- if (start == stop) {
+ if (start == stop)
+ {
newarc(nfa, EMPTY, 0, from, to);
return;
}
@@ -591,23 +615,25 @@ dupnfa(struct nfa *nfa,
* duptraverse - recursive heart of dupnfa
*/
static void
-duptraverse(struct nfa *nfa,
- struct state *s,
- struct state *stmp) /* s's duplicate, or NULL */
+duptraverse(struct nfa * nfa,
+ struct state * s,
+ struct state * stmp) /* s's duplicate, or NULL */
{
struct arc *a;
if (s->tmp != NULL)
- return; /* already done */
+ return; /* already done */
s->tmp = (stmp == NULL) ? newstate(nfa) : stmp;
- if (s->tmp == NULL) {
+ if (s->tmp == NULL)
+ {
assert(NISERR());
return;
}
- for (a = s->outs; a != NULL && !NISERR(); a = a->outchain) {
- duptraverse(nfa, a->to, (struct state *)NULL);
+ for (a = s->outs; a != NULL && !NISERR(); a = a->outchain)
+ {
+ duptraverse(nfa, a->to, (struct state *) NULL);
assert(a->to->tmp != NULL);
cparc(nfa, a, s->tmp, a->to->tmp);
}
@@ -617,8 +643,8 @@ duptraverse(struct nfa *nfa,
* cleartraverse - recursive cleanup for algorithms that leave tmp ptrs set
*/
static void
-cleartraverse(struct nfa *nfa,
- struct state *s)
+cleartraverse(struct nfa * nfa,
+ struct state * s)
{
struct arc *a;
@@ -634,15 +660,18 @@ cleartraverse(struct nfa *nfa,
* specialcolors - fill in special colors for an NFA
*/
static void
-specialcolors(struct nfa *nfa)
+specialcolors(struct nfa * nfa)
{
/* false colors for BOS, BOL, EOS, EOL */
- if (nfa->parent == NULL) {
+ if (nfa->parent == NULL)
+ {
nfa->bos[0] = pseudocolor(nfa->cm);
nfa->bos[1] = pseudocolor(nfa->cm);
nfa->eos[0] = pseudocolor(nfa->cm);
nfa->eos[1] = pseudocolor(nfa->cm);
- } else {
+ }
+ else
+ {
assert(nfa->parent->bos[0] != COLORLESS);
nfa->bos[0] = nfa->parent->bos[0];
assert(nfa->parent->bos[1] != COLORLESS);
@@ -657,57 +686,60 @@ specialcolors(struct nfa *nfa)
/*
* optimize - optimize an NFA
*/
-static long /* re_info bits */
-optimize(struct nfa *nfa,
+static long /* re_info bits */
+optimize(struct nfa * nfa,
FILE *f) /* for debug output; NULL none */
{
#ifdef REG_DEBUG
- int verbose = (f != NULL) ? 1 : 0;
+ int verbose = (f != NULL) ? 1 : 0;
if (verbose)
fprintf(f, "\ninitial cleanup:\n");
#endif
- cleanup(nfa); /* may simplify situation */
+ cleanup(nfa); /* may simplify situation */
#ifdef REG_DEBUG
if (verbose)
dumpnfa(nfa, f);
if (verbose)
fprintf(f, "\nempties:\n");
#endif
- fixempties(nfa, f); /* get rid of EMPTY arcs */
+ fixempties(nfa, f); /* get rid of EMPTY arcs */
#ifdef REG_DEBUG
if (verbose)
fprintf(f, "\nconstraints:\n");
#endif
- pullback(nfa, f); /* pull back constraints backward */
- pushfwd(nfa, f); /* push fwd constraints forward */
+ pullback(nfa, f); /* pull back constraints backward */
+ pushfwd(nfa, f); /* push fwd constraints forward */
#ifdef REG_DEBUG
if (verbose)
fprintf(f, "\nfinal cleanup:\n");
#endif
- cleanup(nfa); /* final tidying */
- return analyze(nfa); /* and analysis */
+ cleanup(nfa); /* final tidying */
+ return analyze(nfa); /* and analysis */
}
/*
* pullback - pull back constraints backward to (with luck) eliminate them
*/
static void
-pullback(struct nfa *nfa,
- FILE *f) /* for debug output; NULL none */
+pullback(struct nfa * nfa,
+ FILE *f) /* for debug output; NULL none */
{
struct state *s;
struct state *nexts;
struct arc *a;
struct arc *nexta;
- int progress;
+ int progress;
/* find and pull until there are no more */
- do {
+ do
+ {
progress = 0;
- for (s = nfa->states; s != NULL && !NISERR(); s = nexts) {
+ for (s = nfa->states; s != NULL && !NISERR(); s = nexts)
+ {
nexts = s->next;
- for (a = s->outs; a != NULL && !NISERR(); a = nexta) {
+ for (a = s->outs; a != NULL && !NISERR(); a = nexta)
+ {
nexta = a->outchain;
if (a->type == '^' || a->type == BEHIND)
if (pull(nfa, a))
@@ -721,9 +753,11 @@ pullback(struct nfa *nfa,
if (NISERR())
return;
- for (a = nfa->pre->outs; a != NULL; a = nexta) {
+ for (a = nfa->pre->outs; a != NULL; a = nexta)
+ {
nexta = a->outchain;
- if (a->type == '^') {
+ if (a->type == '^')
+ {
assert(a->co == 0 || a->co == 1);
newarc(nfa, PLAIN, nfa->bos[a->co], a->from, a->to);
freearc(nfa, a);
@@ -737,9 +771,9 @@ pullback(struct nfa *nfa,
* one state -- the constraint's from state -- and only if the constraint
* was that state's last outarc.
*/
-static int /* 0 couldn't, 1 could */
-pull(struct nfa *nfa,
- struct arc *con)
+static int /* 0 couldn't, 1 could */
+pull(struct nfa * nfa,
+ struct arc * con)
{
struct state *from = con->from;
struct state *to = con->to;
@@ -747,25 +781,28 @@ pull(struct nfa *nfa,
struct arc *nexta;
struct state *s;
- if (from == to) { /* circular constraint is pointless */
+ if (from == to)
+ { /* circular constraint is pointless */
freearc(nfa, con);
return 1;
}
- if (from->flag) /* can't pull back beyond start */
+ if (from->flag) /* can't pull back beyond start */
return 0;
- if (from->nins == 0) { /* unreachable */
+ if (from->nins == 0)
+ { /* unreachable */
freearc(nfa, con);
return 1;
}
/* first, clone from state if necessary to avoid other outarcs */
- if (from->nouts > 1) {
+ if (from->nouts > 1)
+ {
s = newstate(nfa);
if (NISERR())
return 0;
assert(to != from); /* con is not an inarc */
- copyins(nfa, from, s); /* duplicate inarcs */
- cparc(nfa, con, s, to); /* move constraint arc */
+ copyins(nfa, from, s); /* duplicate inarcs */
+ cparc(nfa, con, s, to); /* move constraint arc */
freearc(nfa, con);
from = s;
con = from->outs;
@@ -773,27 +810,29 @@ pull(struct nfa *nfa,
assert(from->nouts == 1);
/* propagate the constraint into the from state's inarcs */
- for (a = from->ins; a != NULL; a = nexta) {
+ for (a = from->ins; a != NULL; a = nexta)
+ {
nexta = a->inchain;
- switch (combine(con, a)) {
- case INCOMPATIBLE: /* destroy the arc */
- freearc(nfa, a);
- break;
- case SATISFIED: /* no action needed */
- break;
- case COMPATIBLE: /* swap the two arcs, more or less */
- s = newstate(nfa);
- if (NISERR())
- return 0;
- cparc(nfa, a, s, to); /* anticipate move */
- cparc(nfa, con, a->from, s);
- if (NISERR())
- return 0;
- freearc(nfa, a);
- break;
- default:
- assert(NOTREACHED);
- break;
+ switch (combine(con, a))
+ {
+ case INCOMPATIBLE: /* destroy the arc */
+ freearc(nfa, a);
+ break;
+ case SATISFIED: /* no action needed */
+ break;
+ case COMPATIBLE: /* swap the two arcs, more or less */
+ s = newstate(nfa);
+ if (NISERR())
+ return 0;
+ cparc(nfa, a, s, to); /* anticipate move */
+ cparc(nfa, con, a->from, s);
+ if (NISERR())
+ return 0;
+ freearc(nfa, a);
+ break;
+ default:
+ assert(NOTREACHED);
+ break;
}
}
@@ -807,21 +846,24 @@ pull(struct nfa *nfa,
* pushfwd - push forward constraints forward to (with luck) eliminate them
*/
static void
-pushfwd(struct nfa *nfa,
- FILE *f) /* for debug output; NULL none */
+pushfwd(struct nfa * nfa,
+ FILE *f) /* for debug output; NULL none */
{
struct state *s;
struct state *nexts;
struct arc *a;
struct arc *nexta;
- int progress;
+ int progress;
/* find and push until there are no more */
- do {
+ do
+ {
progress = 0;
- for (s = nfa->states; s != NULL && !NISERR(); s = nexts) {
+ for (s = nfa->states; s != NULL && !NISERR(); s = nexts)
+ {
nexts = s->next;
- for (a = s->ins; a != NULL && !NISERR(); a = nexta) {
+ for (a = s->ins; a != NULL && !NISERR(); a = nexta)
+ {
nexta = a->inchain;
if (a->type == '$' || a->type == AHEAD)
if (push(nfa, a))
@@ -835,9 +877,11 @@ pushfwd(struct nfa *nfa,
if (NISERR())
return;
- for (a = nfa->post->ins; a != NULL; a = nexta) {
+ for (a = nfa->post->ins; a != NULL; a = nexta)
+ {
nexta = a->inchain;
- if (a->type == '$') {
+ if (a->type == '$')
+ {
assert(a->co == 0 || a->co == 1);
newarc(nfa, PLAIN, nfa->eos[a->co], a->from, a->to);
freearc(nfa, a);
@@ -851,9 +895,9 @@ pushfwd(struct nfa *nfa,
* one state -- the constraint's to state -- and only if the constraint
* was that state's last inarc.
*/
-static int /* 0 couldn't, 1 could */
-push(struct nfa *nfa,
- struct arc *con)
+static int /* 0 couldn't, 1 could */
+push(struct nfa * nfa,
+ struct arc * con)
{
struct state *from = con->from;
struct state *to = con->to;
@@ -861,24 +905,27 @@ push(struct nfa *nfa,
struct arc *nexta;
struct state *s;
- if (to == from) { /* circular constraint is pointless */
+ if (to == from)
+ { /* circular constraint is pointless */
freearc(nfa, con);
return 1;
}
- if (to->flag) /* can't push forward beyond end */
+ if (to->flag) /* can't push forward beyond end */
return 0;
- if (to->nouts == 0) { /* dead end */
+ if (to->nouts == 0)
+ { /* dead end */
freearc(nfa, con);
return 1;
}
/* first, clone to state if necessary to avoid other inarcs */
- if (to->nins > 1) {
+ if (to->nins > 1)
+ {
s = newstate(nfa);
if (NISERR())
return 0;
- copyouts(nfa, to, s); /* duplicate outarcs */
- cparc(nfa, con, from, s); /* move constraint */
+ copyouts(nfa, to, s); /* duplicate outarcs */
+ cparc(nfa, con, from, s); /* move constraint */
freearc(nfa, con);
to = s;
con = to->ins;
@@ -886,88 +933,91 @@ push(struct nfa *nfa,
assert(to->nins == 1);
/* propagate the constraint into the to state's outarcs */
- for (a = to->outs; a != NULL; a = nexta) {
+ for (a = to->outs; a != NULL; a = nexta)
+ {
nexta = a->outchain;
- switch (combine(con, a)) {
- case INCOMPATIBLE: /* destroy the arc */
- freearc(nfa, a);
- break;
- case SATISFIED: /* no action needed */
- break;
- case COMPATIBLE: /* swap the two arcs, more or less */
- s = newstate(nfa);
- if (NISERR())
- return 0;
- cparc(nfa, con, s, a->to); /* anticipate move */
- cparc(nfa, a, from, s);
- if (NISERR())
- return 0;
- freearc(nfa, a);
- break;
- default:
- assert(NOTREACHED);
- break;
+ switch (combine(con, a))
+ {
+ case INCOMPATIBLE: /* destroy the arc */
+ freearc(nfa, a);
+ break;
+ case SATISFIED: /* no action needed */
+ break;
+ case COMPATIBLE: /* swap the two arcs, more or less */
+ s = newstate(nfa);
+ if (NISERR())
+ return 0;
+ cparc(nfa, con, s, a->to); /* anticipate move */
+ cparc(nfa, a, from, s);
+ if (NISERR())
+ return 0;
+ freearc(nfa, a);
+ break;
+ default:
+ assert(NOTREACHED);
+ break;
}
}
/* remaining outarcs, if any, incorporate the constraint */
moveouts(nfa, to, from);
- dropstate(nfa, to); /* will free the constraint */
+ dropstate(nfa, to); /* will free the constraint */
return 1;
}
/*
* combine - constraint lands on an arc, what happens?
*
- * #def INCOMPATIBLE 1 // destroys arc
- * #def SATISFIED 2 // constraint satisfied
- * #def COMPATIBLE 3 // compatible but not satisfied yet
+ * #def INCOMPATIBLE 1 // destroys arc
+ * #def SATISFIED 2 // constraint satisfied
+ * #def COMPATIBLE 3 // compatible but not satisfied yet
*/
static int
-combine(struct arc *con,
- struct arc *a)
+combine(struct arc * con,
+ struct arc * a)
{
-# define CA(ct,at) (((ct)<<CHAR_BIT) | (at))
-
- switch (CA(con->type, a->type)) {
- case CA('^', PLAIN): /* newlines are handled separately */
- case CA('$', PLAIN):
- return INCOMPATIBLE;
- break;
- case CA(AHEAD, PLAIN): /* color constraints meet colors */
- case CA(BEHIND, PLAIN):
- if (con->co == a->co)
- return SATISFIED;
- return INCOMPATIBLE;
- break;
- case CA('^', '^'): /* collision, similar constraints */
- case CA('$', '$'):
- case CA(AHEAD, AHEAD):
- case CA(BEHIND, BEHIND):
- if (con->co == a->co) /* true duplication */
- return SATISFIED;
- return INCOMPATIBLE;
- break;
- case CA('^', BEHIND): /* collision, dissimilar constraints */
- case CA(BEHIND, '^'):
- case CA('$', AHEAD):
- case CA(AHEAD, '$'):
- return INCOMPATIBLE;
- break;
- case CA('^', '$'): /* constraints passing each other */
- case CA('^', AHEAD):
- case CA(BEHIND, '$'):
- case CA(BEHIND, AHEAD):
- case CA('$', '^'):
- case CA('$', BEHIND):
- case CA(AHEAD, '^'):
- case CA(AHEAD, BEHIND):
- case CA('^', LACON):
- case CA(BEHIND, LACON):
- case CA('$', LACON):
- case CA(AHEAD, LACON):
- return COMPATIBLE;
- break;
+#define CA(ct,at) (((ct)<<CHAR_BIT) | (at))
+
+ switch (CA(con->type, a->type))
+ {
+ case CA('^', PLAIN): /* newlines are handled separately */
+ case CA('$', PLAIN):
+ return INCOMPATIBLE;
+ break;
+ case CA(AHEAD, PLAIN): /* color constraints meet colors */
+ case CA(BEHIND, PLAIN):
+ if (con->co == a->co)
+ return SATISFIED;
+ return INCOMPATIBLE;
+ break;
+ case CA('^', '^'): /* collision, similar constraints */
+ case CA('$', '$'):
+ case CA(AHEAD, AHEAD):
+ case CA(BEHIND, BEHIND):
+ if (con->co == a->co) /* true duplication */
+ return SATISFIED;
+ return INCOMPATIBLE;
+ break;
+ case CA('^', BEHIND): /* collision, dissimilar constraints */
+ case CA(BEHIND, '^'):
+ case CA('$', AHEAD):
+ case CA(AHEAD, '$'):
+ return INCOMPATIBLE;
+ break;
+ case CA('^', '$'): /* constraints passing each other */
+ case CA('^', AHEAD):
+ case CA(BEHIND, '$'):
+ case CA(BEHIND, AHEAD):
+ case CA('$', '^'):
+ case CA('$', BEHIND):
+ case CA(AHEAD, '^'):
+ case CA(AHEAD, BEHIND):
+ case CA('^', LACON):
+ case CA(BEHIND, LACON):
+ case CA('$', LACON):
+ case CA(AHEAD, LACON):
+ return COMPATIBLE;
+ break;
}
assert(NOTREACHED);
return INCOMPATIBLE; /* for benefit of blind compilers */
@@ -977,21 +1027,24 @@ combine(struct arc *con,
* fixempties - get rid of EMPTY arcs
*/
static void
-fixempties(struct nfa *nfa,
- FILE *f) /* for debug output; NULL none */
+fixempties(struct nfa * nfa,
+ FILE *f) /* for debug output; NULL none */
{
struct state *s;
struct state *nexts;
struct arc *a;
struct arc *nexta;
- int progress;
+ int progress;
/* find and eliminate empties until there are no more */
- do {
+ do
+ {
progress = 0;
- for (s = nfa->states; s != NULL && !NISERR(); s = nexts) {
+ for (s = nfa->states; s != NULL && !NISERR(); s = nexts)
+ {
nexts = s->next;
- for (a = s->outs; a != NULL && !NISERR(); a = nexta) {
+ for (a = s->outs; a != NULL && !NISERR(); a = nexta)
+ {
nexta = a->outchain;
if (a->type == EMPTY && unempty(nfa, a))
progress = 1;
@@ -1009,46 +1062,55 @@ fixempties(struct nfa *nfa,
* Actually, as it stands this function always succeeds, but the return
* value is kept with an eye on possible future changes.
*/
-static int /* 0 couldn't, 1 could */
-unempty(struct nfa *nfa,
- struct arc *a)
+static int /* 0 couldn't, 1 could */
+unempty(struct nfa * nfa,
+ struct arc * a)
{
struct state *from = a->from;
struct state *to = a->to;
- int usefrom; /* work on from, as opposed to to? */
+ int usefrom; /* work on from, as opposed to to? */
assert(a->type == EMPTY);
assert(from != nfa->pre && to != nfa->post);
- if (from == to) { /* vacuous loop */
+ if (from == to)
+ { /* vacuous loop */
freearc(nfa, a);
return 1;
}
/* decide which end to work on */
- usefrom = 1; /* default: attack from */
+ usefrom = 1; /* default: attack from */
if (from->nouts > to->nins)
usefrom = 0;
- else if (from->nouts == to->nins) {
+ else if (from->nouts == to->nins)
+ {
/* decide on secondary issue: move/copy fewest arcs */
if (from->nins > to->nouts)
usefrom = 0;
}
-
+
freearc(nfa, a);
- if (usefrom) {
- if (from->nouts == 0) {
+ if (usefrom)
+ {
+ if (from->nouts == 0)
+ {
/* was the state's only outarc */
moveins(nfa, from, to);
freestate(nfa, from);
- } else
+ }
+ else
copyins(nfa, from, to);
- } else {
- if (to->nins == 0) {
+ }
+ else
+ {
+ if (to->nins == 0)
+ {
/* was the state's only inarc */
moveouts(nfa, to, from);
freestate(nfa, to);
- } else
+ }
+ else
copyouts(nfa, to, from);
}
@@ -1059,17 +1121,18 @@ unempty(struct nfa *nfa,
* cleanup - clean up NFA after optimizations
*/
static void
-cleanup(struct nfa *nfa)
+cleanup(struct nfa * nfa)
{
struct state *s;
struct state *nexts;
- int n;
+ int n;
/* clear out unreachable or dead-end states */
/* use pre to mark reachable, then post to mark can-reach-post */
- markreachable(nfa, nfa->pre, (struct state *)NULL, nfa->pre);
+ markreachable(nfa, nfa->pre, (struct state *) NULL, nfa->pre);
markcanreach(nfa, nfa->post, nfa->pre, nfa->post);
- for (s = nfa->states; s != NULL; s = nexts) {
+ for (s = nfa->states; s != NULL; s = nexts)
+ {
nexts = s->next;
if (s->tmp != nfa->post && !s->flag)
dropstate(nfa, s);
@@ -1090,10 +1153,11 @@ cleanup(struct nfa *nfa)
* markreachable - recursive marking of reachable states
*/
static void
-markreachable(struct nfa *nfa,
- struct state *s,
- struct state *okay, /* consider only states with this mark */
- struct state *mark) /* the value to mark with */
+markreachable(struct nfa * nfa,
+ struct state * s,
+ struct state * okay, /* consider only states with this
+ * mark */
+ struct state * mark) /* the value to mark with */
{
struct arc *a;
@@ -1109,10 +1173,11 @@ markreachable(struct nfa *nfa,
* markcanreach - recursive marking of states which can reach here
*/
static void
-markcanreach(struct nfa *nfa,
- struct state *s,
- struct state *okay, /* consider only states with this mark */
- struct state *mark) /* the value to mark with */
+markcanreach(struct nfa * nfa,
+ struct state * s,
+ struct state * okay, /* consider only states with this
+ * mark */
+ struct state * mark) /* the value to mark with */
{
struct arc *a;
@@ -1127,8 +1192,8 @@ markcanreach(struct nfa *nfa,
/*
* analyze - ascertain potentially-useful facts about an optimized NFA
*/
-static long /* re_info bits to be ORed in */
-analyze(struct nfa *nfa)
+static long /* re_info bits to be ORed in */
+analyze(struct nfa * nfa)
{
struct arc *a;
struct arc *aa;
@@ -1146,29 +1211,31 @@ analyze(struct nfa *nfa)
* compact - compact an NFA
*/
static void
-compact(struct nfa *nfa,
- struct cnfa *cnfa)
+compact(struct nfa * nfa,
+ struct cnfa * cnfa)
{
struct state *s;
struct arc *a;
- size_t nstates;
- size_t narcs;
+ size_t nstates;
+ size_t narcs;
struct carc *ca;
struct carc *first;
- assert (!NISERR());
+ assert(!NISERR());
nstates = 0;
narcs = 0;
- for (s = nfa->states; s != NULL; s = s->next) {
+ for (s = nfa->states; s != NULL; s = s->next)
+ {
nstates++;
narcs += 1 + s->nouts + 1;
/* 1 as a fake for flags, nouts for arcs, 1 as endmarker */
}
- cnfa->states = (struct carc **)MALLOC(nstates * sizeof(struct carc *));
- cnfa->arcs = (struct carc *)MALLOC(narcs * sizeof(struct carc));
- if (cnfa->states == NULL || cnfa->arcs == NULL) {
+ cnfa->states = (struct carc **) MALLOC(nstates * sizeof(struct carc *));
+ cnfa->arcs = (struct carc *) MALLOC(narcs * sizeof(struct carc));
+ if (cnfa->states == NULL || cnfa->arcs == NULL)
+ {
if (cnfa->states != NULL)
FREE(cnfa->states);
if (cnfa->arcs != NULL)
@@ -1187,31 +1254,33 @@ compact(struct nfa *nfa,
cnfa->flags = 0;
ca = cnfa->arcs;
- for (s = nfa->states; s != NULL; s = s->next) {
- assert((size_t)s->no < nstates);
+ for (s = nfa->states; s != NULL; s = s->next)
+ {
+ assert((size_t) s->no < nstates);
cnfa->states[s->no] = ca;
- ca->co = 0; /* clear and skip flags "arc" */
+ ca->co = 0; /* clear and skip flags "arc" */
ca++;
first = ca;
for (a = s->outs; a != NULL; a = a->outchain)
- switch (a->type) {
- case PLAIN:
- ca->co = a->co;
- ca->to = a->to->no;
- ca++;
- break;
- case LACON:
- assert(s->no != cnfa->pre);
- ca->co = (color)(cnfa->ncolors + a->co);
- ca->to = a->to->no;
- ca++;
- cnfa->flags |= HASLACONS;
- break;
- default:
- assert(NOTREACHED);
- break;
+ switch (a->type)
+ {
+ case PLAIN:
+ ca->co = a->co;
+ ca->to = a->to->no;
+ ca++;
+ break;
+ case LACON:
+ assert(s->no != cnfa->pre);
+ ca->co = (color) (cnfa->ncolors + a->co);
+ ca->to = a->to->no;
+ ca++;
+ cnfa->flags |= HASLACONS;
+ break;
+ default:
+ assert(NOTREACHED);
+ break;
}
- carcsort(first, ca-1);
+ carcsort(first, ca - 1);
ca->co = COLORLESS;
ca->to = 0;
ca++;
@@ -1232,8 +1301,8 @@ compact(struct nfa *nfa,
* you're in real trouble anyway.
*/
static void
-carcsort(struct carc *first,
- struct carc *last)
+carcsort(struct carc * first,
+ struct carc * last)
{
struct carc *p;
struct carc *q;
@@ -1245,7 +1314,8 @@ carcsort(struct carc *first,
for (p = first; p <= last; p++)
for (q = p; q <= last; q++)
if (p->co > q->co ||
- (p->co == q->co && p->to > q->to)) {
+ (p->co == q->co && p->to > q->to))
+ {
assert(p != q);
tmp = *p;
*p = *q;
@@ -1257,9 +1327,9 @@ carcsort(struct carc *first,
* freecnfa - free a compacted NFA
*/
static void
-freecnfa(struct cnfa *cnfa)
+freecnfa(struct cnfa * cnfa)
{
- assert(cnfa->nstates != 0); /* not empty already */
+ assert(cnfa->nstates != 0); /* not empty already */
cnfa->nstates = 0;
FREE(cnfa->states);
FREE(cnfa->arcs);
@@ -1269,7 +1339,7 @@ freecnfa(struct cnfa *cnfa)
* dumpnfa - dump an NFA in human-readable form
*/
static void
-dumpnfa(struct nfa *nfa,
+dumpnfa(struct nfa * nfa,
FILE *f)
{
#ifdef REG_DEBUG
@@ -1277,13 +1347,13 @@ dumpnfa(struct nfa *nfa,
fprintf(f, "pre %d, post %d", nfa->pre->no, nfa->post->no);
if (nfa->bos[0] != COLORLESS)
- fprintf(f, ", bos [%ld]", (long)nfa->bos[0]);
+ fprintf(f, ", bos [%ld]", (long) nfa->bos[0]);
if (nfa->bos[1] != COLORLESS)
- fprintf(f, ", bol [%ld]", (long)nfa->bos[1]);
+ fprintf(f, ", bol [%ld]", (long) nfa->bos[1]);
if (nfa->eos[0] != COLORLESS)
- fprintf(f, ", eos [%ld]", (long)nfa->eos[0]);
+ fprintf(f, ", eos [%ld]", (long) nfa->eos[0]);
if (nfa->eos[1] != COLORLESS)
- fprintf(f, ", eol [%ld]", (long)nfa->eos[1]);
+ fprintf(f, ", eol [%ld]", (long) nfa->eos[1]);
fprintf(f, "\n");
for (s = nfa->states; s != NULL; s = s->next)
dumpstate(s, f);
@@ -1293,19 +1363,19 @@ dumpnfa(struct nfa *nfa,
#endif
}
-#ifdef REG_DEBUG /* subordinates of dumpnfa */
+#ifdef REG_DEBUG /* subordinates of dumpnfa */
/*
* dumpstate - dump an NFA state in human-readable form
*/
static void
-dumpstate(struct state *s,
+dumpstate(struct state * s,
FILE *f)
{
struct arc *a;
fprintf(f, "%d%s%c", s->no, (s->tmp != NULL) ? "T" : "",
- (s->flag) ? s->flag : '.');
+ (s->flag) ? s->flag : '.');
if (s->prev != NULL && s->prev->next != s)
fprintf(f, "\tstate chain bad\n");
if (s->nouts == 0)
@@ -1313,7 +1383,8 @@ dumpstate(struct state *s,
else
dumparcs(s, f);
fflush(f);
- for (a = s->ins; a != NULL; a = a->inchain) {
+ for (a = s->ins; a != NULL; a = a->inchain)
+ {
if (a->to != s)
fprintf(f, "\tlink from %d to %d on %d's in-chain\n",
a->from->no, a->to->no, s->no);
@@ -1324,10 +1395,10 @@ dumpstate(struct state *s,
* dumparcs - dump out-arcs in human-readable form
*/
static void
-dumparcs(struct state *s,
+dumparcs(struct state * s,
FILE *f)
{
- int pos;
+ int pos;
assert(s->nouts > 0);
/* printing arcs in reverse order is usually clearer */
@@ -1339,19 +1410,21 @@ dumparcs(struct state *s,
/*
* dumprarcs - dump remaining outarcs, recursively, in reverse order
*/
-static int /* resulting print position */
-dumprarcs(struct arc *a,
- struct state *s,
+static int /* resulting print position */
+dumprarcs(struct arc * a,
+ struct state * s,
FILE *f,
- int pos) /* initial print position */
+ int pos) /* initial print position */
{
if (a->outchain != NULL)
pos = dumprarcs(a->outchain, s, f, pos);
dumparc(a, s, f);
- if (pos == 5) {
+ if (pos == 5)
+ {
fprintf(f, "\n");
pos = 1;
- } else
+ }
+ else
pos++;
return pos;
}
@@ -1360,83 +1433,85 @@ dumprarcs(struct arc *a,
* dumparc - dump one outarc in readable form, including prefixing tab
*/
static void
-dumparc(struct arc *a,
- struct state *s,
+dumparc(struct arc * a,
+ struct state * s,
FILE *f)
{
struct arc *aa;
struct arcbatch *ab;
fprintf(f, "\t");
- switch (a->type) {
- case PLAIN:
- fprintf(f, "[%ld]", (long)a->co);
- break;
- case AHEAD:
- fprintf(f, ">%ld>", (long)a->co);
- break;
- case BEHIND:
- fprintf(f, "<%ld<", (long)a->co);
- break;
- case LACON:
- fprintf(f, ":%ld:", (long)a->co);
- break;
- case '^':
- case '$':
- fprintf(f, "%c%d", a->type, (int)a->co);
- break;
- case EMPTY:
- break;
- default:
- fprintf(f, "0x%x/0%lo", a->type, (long)a->co);
- break;
+ switch (a->type)
+ {
+ case PLAIN:
+ fprintf(f, "[%ld]", (long) a->co);
+ break;
+ case AHEAD:
+ fprintf(f, ">%ld>", (long) a->co);
+ break;
+ case BEHIND:
+ fprintf(f, "<%ld<", (long) a->co);
+ break;
+ case LACON:
+ fprintf(f, ":%ld:", (long) a->co);
+ break;
+ case '^':
+ case '$':
+ fprintf(f, "%c%d", a->type, (int) a->co);
+ break;
+ case EMPTY:
+ break;
+ default:
+ fprintf(f, "0x%x/0%lo", a->type, (long) a->co);
+ break;
}
if (a->from != s)
fprintf(f, "?%d?", a->from->no);
- for (ab = &a->from->oas; ab != NULL; ab = ab->next) {
+ for (ab = &a->from->oas; ab != NULL; ab = ab->next)
+ {
for (aa = &ab->a[0]; aa < &ab->a[ABSIZE]; aa++)
if (aa == a)
- break; /* NOTE BREAK OUT */
+ break; /* NOTE BREAK OUT */
if (aa < &ab->a[ABSIZE]) /* propagate break */
- break; /* NOTE BREAK OUT */
+ break; /* NOTE BREAK OUT */
}
if (ab == NULL)
- fprintf(f, "?!?"); /* not in allocated space */
+ fprintf(f, "?!?"); /* not in allocated space */
fprintf(f, "->");
- if (a->to == NULL) {
+ if (a->to == NULL)
+ {
fprintf(f, "NULL");
return;
}
fprintf(f, "%d", a->to->no);
for (aa = a->to->ins; aa != NULL; aa = aa->inchain)
if (aa == a)
- break; /* NOTE BREAK OUT */
+ break; /* NOTE BREAK OUT */
if (aa == NULL)
- fprintf(f, "?!?"); /* missing from in-chain */
+ fprintf(f, "?!?"); /* missing from in-chain */
}
-
-#endif /* REG_DEBUG */
+#endif /* REG_DEBUG */
/*
* dumpcnfa - dump a compacted NFA in human-readable form
*/
#ifdef REG_DEBUG
static void
-dumpcnfa(struct cnfa *cnfa,
+dumpcnfa(struct cnfa * cnfa,
FILE *f)
{
- int st;
+ int st;
fprintf(f, "pre %d, post %d", cnfa->pre, cnfa->post);
if (cnfa->bos[0] != COLORLESS)
- fprintf(f, ", bos [%ld]", (long)cnfa->bos[0]);
+ fprintf(f, ", bos [%ld]", (long) cnfa->bos[0]);
if (cnfa->bos[1] != COLORLESS)
- fprintf(f, ", bol [%ld]", (long)cnfa->bos[1]);
+ fprintf(f, ", bol [%ld]", (long) cnfa->bos[1]);
if (cnfa->eos[0] != COLORLESS)
- fprintf(f, ", eos [%ld]", (long)cnfa->eos[0]);
+ fprintf(f, ", eos [%ld]", (long) cnfa->eos[0]);
if (cnfa->eos[1] != COLORLESS)
- fprintf(f, ", eol [%ld]", (long)cnfa->eos[1]);
- if (cnfa->flags&HASLACONS)
+ fprintf(f, ", eol [%ld]", (long) cnfa->eos[1]);
+ if (cnfa->flags & HASLACONS)
fprintf(f, ", haslacons");
fprintf(f, "\n");
for (st = 0; st < cnfa->nstates; st++)
@@ -1445,32 +1520,35 @@ dumpcnfa(struct cnfa *cnfa,
}
#endif
-#ifdef REG_DEBUG /* subordinates of dumpcnfa */
+#ifdef REG_DEBUG /* subordinates of dumpcnfa */
/*
* dumpcstate - dump a compacted-NFA state in human-readable form
*/
static void
dumpcstate(int st,
- struct carc *ca,
- struct cnfa *cnfa,
+ struct carc * ca,
+ struct cnfa * cnfa,
FILE *f)
{
- int i;
- int pos;
+ int i;
+ int pos;
fprintf(f, "%d%s", st, (ca[0].co) ? ":" : ".");
pos = 1;
- for (i = 1; ca[i].co != COLORLESS; i++) {
+ for (i = 1; ca[i].co != COLORLESS; i++)
+ {
if (ca[i].co < cnfa->ncolors)
- fprintf(f, "\t[%ld]->%d", (long)ca[i].co, ca[i].to);
+ fprintf(f, "\t[%ld]->%d", (long) ca[i].co, ca[i].to);
else
- fprintf(f, "\t:%ld:->%d", (long)ca[i].co-cnfa->ncolors,
- ca[i].to);
- if (pos == 5) {
+ fprintf(f, "\t:%ld:->%d", (long) ca[i].co - cnfa->ncolors,
+ ca[i].to);
+ if (pos == 5)
+ {
fprintf(f, "\n");
pos = 1;
- } else
+ }
+ else
pos++;
}
if (i == 1 || pos != 1)
@@ -1478,4 +1556,4 @@ dumpcstate(int st,
fflush(f);
}
-#endif /* REG_DEBUG */
+#endif /* REG_DEBUG */
diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c
index 099a1872a8d..58af64539d8 100644
--- a/src/backend/regex/regcomp.c
+++ b/src/backend/regex/regcomp.c
@@ -2,21 +2,21 @@
* re_*comp and friends - compile REs
* This file #includes several others (see the bottom).
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
- *
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ *
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
- * thanks all of them.
- *
+ * thanks all of them.
+ *
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
- *
+ *
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
- *
+ *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -28,7 +28,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/regcomp.c,v 1.36 2003/02/05 17:41:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/regcomp.c,v 1.37 2003/08/04 00:43:21 momjian Exp $
*
*/
@@ -38,243 +38,252 @@
* forward declarations, up here so forward datatypes etc. are defined early
*/
/* === regcomp.c === */
-static void moresubs (struct vars *, int);
-static int freev (struct vars *, int);
-static void makesearch (struct vars *, struct nfa *);
-static struct subre *parse (struct vars *, int, int, struct state *, struct state *);
-static struct subre *parsebranch (struct vars *, int, int, struct state *, struct state *, int);
-static void parseqatom (struct vars *, int, int, struct state *, struct state *, struct subre *);
-static void nonword (struct vars *, int, struct state *, struct state *);
-static void word (struct vars *, int, struct state *, struct state *);
-static int scannum (struct vars *);
-static void repeat (struct vars *, struct state *, struct state *, int, int);
-static void bracket (struct vars *, struct state *, struct state *);
-static void cbracket (struct vars *, struct state *, struct state *);
-static void brackpart (struct vars *, struct state *, struct state *);
-static chr *scanplain (struct vars *);
-static void leaders (struct vars *, struct cvec *);
-static void onechr (struct vars *, chr, struct state *, struct state *);
-static void dovec (struct vars *, struct cvec *, struct state *, struct state *);
-static celt nextleader (struct vars *, chr, chr);
-static void wordchrs (struct vars *);
-static struct subre *subre (struct vars *, int, int, struct state *, struct state *);
-static void freesubre (struct vars *, struct subre *);
-static void freesrnode (struct vars *, struct subre *);
-static void optst (struct vars *, struct subre *);
-static int numst (struct subre *, int);
-static void markst (struct subre *);
-static void cleanst (struct vars *);
-static long nfatree (struct vars *, struct subre *, FILE *);
-static long nfanode (struct vars *, struct subre *, FILE *);
-static int newlacon (struct vars *, struct state *, struct state *, int);
-static void freelacons (struct subre *, int);
-static void rfree (regex_t *);
+static void moresubs(struct vars *, int);
+static int freev(struct vars *, int);
+static void makesearch(struct vars *, struct nfa *);
+static struct subre *parse(struct vars *, int, int, struct state *, struct state *);
+static struct subre *parsebranch(struct vars *, int, int, struct state *, struct state *, int);
+static void parseqatom(struct vars *, int, int, struct state *, struct state *, struct subre *);
+static void nonword(struct vars *, int, struct state *, struct state *);
+static void word(struct vars *, int, struct state *, struct state *);
+static int scannum(struct vars *);
+static void repeat(struct vars *, struct state *, struct state *, int, int);
+static void bracket(struct vars *, struct state *, struct state *);
+static void cbracket(struct vars *, struct state *, struct state *);
+static void brackpart(struct vars *, struct state *, struct state *);
+static chr *scanplain(struct vars *);
+static void leaders(struct vars *, struct cvec *);
+static void onechr(struct vars *, chr, struct state *, struct state *);
+static void dovec(struct vars *, struct cvec *, struct state *, struct state *);
+static celt nextleader(struct vars *, chr, chr);
+static void wordchrs(struct vars *);
+static struct subre *subre(struct vars *, int, int, struct state *, struct state *);
+static void freesubre(struct vars *, struct subre *);
+static void freesrnode(struct vars *, struct subre *);
+static void optst(struct vars *, struct subre *);
+static int numst(struct subre *, int);
+static void markst(struct subre *);
+static void cleanst(struct vars *);
+static long nfatree(struct vars *, struct subre *, FILE *);
+static long nfanode(struct vars *, struct subre *, FILE *);
+static int newlacon(struct vars *, struct state *, struct state *, int);
+static void freelacons(struct subre *, int);
+static void rfree(regex_t *);
+
#ifdef REG_DEBUG
-static void dump (regex_t *, FILE *);
-static void dumpst (struct subre *, FILE *, int);
-static void stdump (struct subre *, FILE *, int);
-static char *stid (struct subre *, char *, size_t);
+static void dump(regex_t *, FILE *);
+static void dumpst(struct subre *, FILE *, int);
+static void stdump(struct subre *, FILE *, int);
+static char *stid(struct subre *, char *, size_t);
#endif
/* === regc_lex.c === */
-static void lexstart (struct vars *);
-static void prefixes (struct vars *);
-static void lexnest (struct vars *, chr *, chr *);
-static void lexword (struct vars *);
-static int next (struct vars *);
-static int lexescape (struct vars *);
-static chr lexdigits (struct vars *, int, int, int);
-static int brenext (struct vars *, chr);
-static void skip (struct vars *);
-static chr newline (void);
-static chr chrnamed (struct vars *, chr *, chr *, chr);
+static void lexstart(struct vars *);
+static void prefixes(struct vars *);
+static void lexnest(struct vars *, chr *, chr *);
+static void lexword(struct vars *);
+static int next(struct vars *);
+static int lexescape(struct vars *);
+static chr lexdigits(struct vars *, int, int, int);
+static int brenext(struct vars *, chr);
+static void skip(struct vars *);
+static chr newline(void);
+static chr chrnamed(struct vars *, chr *, chr *, chr);
+
/* === regc_color.c === */
-static void initcm (struct vars *, struct colormap *);
-static void freecm (struct colormap *);
-static void cmtreefree (struct colormap *, union tree *, int);
-static color setcolor (struct colormap *, chr, pcolor);
-static color maxcolor (struct colormap *);
-static color newcolor (struct colormap *);
-static void freecolor (struct colormap *, pcolor);
-static color pseudocolor (struct colormap *);
-static color subcolor (struct colormap *, chr c);
-static color newsub (struct colormap *, pcolor);
-static void subrange (struct vars *, chr, chr, struct state *, struct state *);
-static void subblock (struct vars *, chr, struct state *, struct state *);
-static void okcolors (struct nfa *, struct colormap *);
-static void colorchain (struct colormap *, struct arc *);
-static void uncolorchain (struct colormap *, struct arc *);
-static int singleton (struct colormap *, chr c);
-static void rainbow (struct nfa *, struct colormap *, int, pcolor, struct state *, struct state *);
-static void colorcomplement (struct nfa *, struct colormap *, int, struct state *, struct state *, struct state *);
+static void initcm(struct vars *, struct colormap *);
+static void freecm(struct colormap *);
+static void cmtreefree(struct colormap *, union tree *, int);
+static color setcolor(struct colormap *, chr, pcolor);
+static color maxcolor(struct colormap *);
+static color newcolor(struct colormap *);
+static void freecolor(struct colormap *, pcolor);
+static color pseudocolor(struct colormap *);
+static color subcolor(struct colormap *, chr c);
+static color newsub(struct colormap *, pcolor);
+static void subrange(struct vars *, chr, chr, struct state *, struct state *);
+static void subblock(struct vars *, chr, struct state *, struct state *);
+static void okcolors(struct nfa *, struct colormap *);
+static void colorchain(struct colormap *, struct arc *);
+static void uncolorchain(struct colormap *, struct arc *);
+static int singleton(struct colormap *, chr c);
+static void rainbow(struct nfa *, struct colormap *, int, pcolor, struct state *, struct state *);
+static void colorcomplement(struct nfa *, struct colormap *, int, struct state *, struct state *, struct state *);
+
#ifdef REG_DEBUG
-static void dumpcolors (struct colormap *, FILE *);
-static void fillcheck (struct colormap *, union tree *, int, FILE *);
-static void dumpchr (chr, FILE *);
+static void dumpcolors(struct colormap *, FILE *);
+static void fillcheck(struct colormap *, union tree *, int, FILE *);
+static void dumpchr(chr, FILE *);
#endif
/* === regc_nfa.c === */
-static struct nfa *newnfa (struct vars *, struct colormap *, struct nfa *);
-static void freenfa (struct nfa *);
-static struct state *newstate (struct nfa *);
-static struct state *newfstate (struct nfa *, int flag);
-static void dropstate (struct nfa *, struct state *);
-static void freestate (struct nfa *, struct state *);
-static void destroystate (struct nfa *, struct state *);
-static void newarc (struct nfa *, int, pcolor, struct state *, struct state *);
-static struct arc *allocarc (struct nfa *, struct state *);
-static void freearc (struct nfa *, struct arc *);
-static struct arc *findarc (struct state *, int, pcolor);
-static void cparc (struct nfa *, struct arc *, struct state *, struct state *);
-static void moveins (struct nfa *, struct state *, struct state *);
-static void copyins (struct nfa *, struct state *, struct state *);
-static void moveouts (struct nfa *, struct state *, struct state *);
-static void copyouts (struct nfa *, struct state *, struct state *);
-static void cloneouts (struct nfa *, struct state *, struct state *, struct state *, int);
-static void delsub (struct nfa *, struct state *, struct state *);
-static void deltraverse (struct nfa *, struct state *, struct state *);
-static void dupnfa (struct nfa *, struct state *, struct state *, struct state *, struct state *);
-static void duptraverse (struct nfa *, struct state *, struct state *);
-static void cleartraverse (struct nfa *, struct state *);
-static void specialcolors (struct nfa *);
-static long optimize (struct nfa *, FILE *);
-static void pullback (struct nfa *, FILE *);
-static int pull (struct nfa *, struct arc *);
-static void pushfwd (struct nfa *, FILE *);
-static int push (struct nfa *, struct arc *);
-#define INCOMPATIBLE 1 /* destroys arc */
-#define SATISFIED 2 /* constraint satisfied */
-#define COMPATIBLE 3 /* compatible but not satisfied yet */
-static int combine (struct arc *, struct arc *);
-static void fixempties (struct nfa *, FILE *);
-static int unempty (struct nfa *, struct arc *);
-static void cleanup (struct nfa *);
-static void markreachable (struct nfa *, struct state *, struct state *, struct state *);
-static void markcanreach (struct nfa *, struct state *, struct state *, struct state *);
-static long analyze (struct nfa *);
-static void compact (struct nfa *, struct cnfa *);
-static void carcsort (struct carc *, struct carc *);
-static void freecnfa (struct cnfa *);
-static void dumpnfa (struct nfa *, FILE *);
+static struct nfa *newnfa(struct vars *, struct colormap *, struct nfa *);
+static void freenfa(struct nfa *);
+static struct state *newstate(struct nfa *);
+static struct state *newfstate(struct nfa *, int flag);
+static void dropstate(struct nfa *, struct state *);
+static void freestate(struct nfa *, struct state *);
+static void destroystate(struct nfa *, struct state *);
+static void newarc(struct nfa *, int, pcolor, struct state *, struct state *);
+static struct arc *allocarc(struct nfa *, struct state *);
+static void freearc(struct nfa *, struct arc *);
+static struct arc *findarc(struct state *, int, pcolor);
+static void cparc(struct nfa *, struct arc *, struct state *, struct state *);
+static void moveins(struct nfa *, struct state *, struct state *);
+static void copyins(struct nfa *, struct state *, struct state *);
+static void moveouts(struct nfa *, struct state *, struct state *);
+static void copyouts(struct nfa *, struct state *, struct state *);
+static void cloneouts(struct nfa *, struct state *, struct state *, struct state *, int);
+static void delsub(struct nfa *, struct state *, struct state *);
+static void deltraverse(struct nfa *, struct state *, struct state *);
+static void dupnfa(struct nfa *, struct state *, struct state *, struct state *, struct state *);
+static void duptraverse(struct nfa *, struct state *, struct state *);
+static void cleartraverse(struct nfa *, struct state *);
+static void specialcolors(struct nfa *);
+static long optimize(struct nfa *, FILE *);
+static void pullback(struct nfa *, FILE *);
+static int pull(struct nfa *, struct arc *);
+static void pushfwd(struct nfa *, FILE *);
+static int push(struct nfa *, struct arc *);
+
+#define INCOMPATIBLE 1 /* destroys arc */
+#define SATISFIED 2 /* constraint satisfied */
+#define COMPATIBLE 3 /* compatible but not satisfied yet */
+static int combine(struct arc *, struct arc *);
+static void fixempties(struct nfa *, FILE *);
+static int unempty(struct nfa *, struct arc *);
+static void cleanup(struct nfa *);
+static void markreachable(struct nfa *, struct state *, struct state *, struct state *);
+static void markcanreach(struct nfa *, struct state *, struct state *, struct state *);
+static long analyze(struct nfa *);
+static void compact(struct nfa *, struct cnfa *);
+static void carcsort(struct carc *, struct carc *);
+static void freecnfa(struct cnfa *);
+static void dumpnfa(struct nfa *, FILE *);
+
#ifdef REG_DEBUG
-static void dumpstate (struct state *, FILE *);
-static void dumparcs (struct state *, FILE *);
-static int dumprarcs (struct arc *, struct state *, FILE *, int);
-static void dumparc (struct arc *, struct state *, FILE *);
-static void dumpcnfa (struct cnfa *, FILE *);
-static void dumpcstate (int, struct carc *, struct cnfa *, FILE *);
+static void dumpstate(struct state *, FILE *);
+static void dumparcs(struct state *, FILE *);
+static int dumprarcs(struct arc *, struct state *, FILE *, int);
+static void dumparc(struct arc *, struct state *, FILE *);
+static void dumpcnfa(struct cnfa *, FILE *);
+static void dumpcstate(int, struct carc *, struct cnfa *, FILE *);
#endif
/* === regc_cvec.c === */
-static struct cvec *newcvec (int, int, int);
-static struct cvec *clearcvec (struct cvec *);
-static void addchr (struct cvec *, chr);
-static void addrange (struct cvec *, chr, chr);
-static void addmcce (struct cvec *, chr *, chr *);
-static int haschr (struct cvec *, chr);
-static struct cvec *getcvec (struct vars *, int, int, int);
-static void freecvec (struct cvec *);
+static struct cvec *newcvec(int, int, int);
+static struct cvec *clearcvec(struct cvec *);
+static void addchr(struct cvec *, chr);
+static void addrange(struct cvec *, chr, chr);
+static void addmcce(struct cvec *, chr *, chr *);
+static int haschr(struct cvec *, chr);
+static struct cvec *getcvec(struct vars *, int, int, int);
+static void freecvec(struct cvec *);
+
/* === regc_locale.c === */
-static int pg_isdigit(pg_wchar c);
-static int pg_isalpha(pg_wchar c);
-static int pg_isalnum(pg_wchar c);
-static int pg_isupper(pg_wchar c);
-static int pg_islower(pg_wchar c);
-static int pg_isgraph(pg_wchar c);
-static int pg_ispunct(pg_wchar c);
-static int pg_isspace(pg_wchar c);
+static int pg_isdigit(pg_wchar c);
+static int pg_isalpha(pg_wchar c);
+static int pg_isalnum(pg_wchar c);
+static int pg_isupper(pg_wchar c);
+static int pg_islower(pg_wchar c);
+static int pg_isgraph(pg_wchar c);
+static int pg_ispunct(pg_wchar c);
+static int pg_isspace(pg_wchar c);
static pg_wchar pg_toupper(pg_wchar c);
static pg_wchar pg_tolower(pg_wchar c);
-static int nmcces (struct vars *);
-static int nleaders (struct vars *);
-static struct cvec *allmcces (struct vars *, struct cvec *);
-static celt element (struct vars *, chr *, chr *);
-static struct cvec *range (struct vars *, celt, celt, int);
-static int before (celt, celt);
-static struct cvec *eclass (struct vars *, celt, int);
-static struct cvec *cclass (struct vars *, chr *, chr *, int);
-static struct cvec *allcases (struct vars *, chr);
-static int cmp (const chr *, const chr *, size_t);
-static int casecmp (const chr *, const chr *, size_t);
+static int nmcces(struct vars *);
+static int nleaders(struct vars *);
+static struct cvec *allmcces(struct vars *, struct cvec *);
+static celt element(struct vars *, chr *, chr *);
+static struct cvec *range(struct vars *, celt, celt, int);
+static int before(celt, celt);
+static struct cvec *eclass(struct vars *, celt, int);
+static struct cvec *cclass(struct vars *, chr *, chr *, int);
+static struct cvec *allcases(struct vars *, chr);
+static int cmp(const chr *, const chr *, size_t);
+static int casecmp(const chr *, const chr *, size_t);
/* internal variables, bundled for easy passing around */
-struct vars {
- regex_t *re;
- chr *now; /* scan pointer into string */
- chr *stop; /* end of string */
- chr *savenow; /* saved now and stop for "subroutine call" */
- chr *savestop;
- int err; /* error code (0 if none) */
- int cflags; /* copy of compile flags */
- int lasttype; /* type of previous token */
- int nexttype; /* type of next token */
- chr nextvalue; /* value (if any) of next token */
- int lexcon; /* lexical context type (see lex.c) */
- int nsubexp; /* subexpression count */
- struct subre **subs; /* subRE pointer vector */
- size_t nsubs; /* length of vector */
+struct vars
+{
+ regex_t *re;
+ chr *now; /* scan pointer into string */
+ chr *stop; /* end of string */
+ chr *savenow; /* saved now and stop for "subroutine
+ * call" */
+ chr *savestop;
+ int err; /* error code (0 if none) */
+ int cflags; /* copy of compile flags */
+ int lasttype; /* type of previous token */
+ int nexttype; /* type of next token */
+ chr nextvalue; /* value (if any) of next token */
+ int lexcon; /* lexical context type (see lex.c) */
+ int nsubexp; /* subexpression count */
+ struct subre **subs; /* subRE pointer vector */
+ size_t nsubs; /* length of vector */
struct subre *sub10[10]; /* initial vector, enough for most */
- struct nfa *nfa; /* the NFA */
- struct colormap *cm; /* character color map */
- color nlcolor; /* color of newline */
- struct state *wordchrs; /* state in nfa holding word-char outarcs */
- struct subre *tree; /* subexpression tree */
+ struct nfa *nfa; /* the NFA */
+ struct colormap *cm; /* character color map */
+ color nlcolor; /* color of newline */
+ struct state *wordchrs; /* state in nfa holding word-char outarcs */
+ struct subre *tree; /* subexpression tree */
struct subre *treechain; /* all tree nodes allocated */
struct subre *treefree; /* any free tree nodes */
- int ntree; /* number of tree nodes */
- struct cvec *cv; /* interface cvec */
- struct cvec *cv2; /* utility cvec */
- struct cvec *mcces; /* collating-element information */
-# define ISCELEADER(v,c) (v->mcces != NULL && haschr(v->mcces, (c)))
+ int ntree; /* number of tree nodes */
+ struct cvec *cv; /* interface cvec */
+ struct cvec *cv2; /* utility cvec */
+ struct cvec *mcces; /* collating-element information */
+#define ISCELEADER(v,c) (v->mcces != NULL && haschr(v->mcces, (c)))
struct state *mccepbegin; /* in nfa, start of MCCE prototypes */
- struct state *mccepend; /* in nfa, end of MCCE prototypes */
- struct subre *lacons; /* lookahead-constraint vector */
- int nlacons; /* size of lacons */
+ struct state *mccepend; /* in nfa, end of MCCE prototypes */
+ struct subre *lacons; /* lookahead-constraint vector */
+ int nlacons; /* size of lacons */
};
/* parsing macros; most know that `v' is the struct vars pointer */
-#define NEXT() (next(v)) /* advance by one token */
-#define SEE(t) (v->nexttype == (t)) /* is next token this? */
-#define EAT(t) (SEE(t) && next(v)) /* if next is this, swallow it */
-#define VISERR(vv) ((vv)->err != 0) /* have we seen an error yet? */
-#define ISERR() VISERR(v)
-#define VERR(vv,e) ((vv)->nexttype = EOS, ((vv)->err) ? (vv)->err :\
+#define NEXT() (next(v)) /* advance by one token */
+#define SEE(t) (v->nexttype == (t)) /* is next token this? */
+#define EAT(t) (SEE(t) && next(v)) /* if next is this, swallow it */
+#define VISERR(vv) ((vv)->err != 0) /* have we seen an error yet? */
+#define ISERR() VISERR(v)
+#define VERR(vv,e) ((vv)->nexttype = EOS, ((vv)->err) ? (vv)->err :\
((vv)->err = (e)))
-#define ERR(e) VERR(v, e) /* record an error */
-#define NOERR() {if (ISERR()) return;} /* if error seen, return */
-#define NOERRN() {if (ISERR()) return NULL;} /* NOERR with retval */
-#define NOERRZ() {if (ISERR()) return 0;} /* NOERR with retval */
-#define INSIST(c, e) ((c) ? 0 : ERR(e)) /* if condition false, error */
-#define NOTE(b) (v->re->re_info |= (b)) /* note visible condition */
-#define EMPTYARC(x, y) newarc(v->nfa, EMPTY, 0, x, y)
+#define ERR(e) VERR(v, e) /* record an error */
+#define NOERR() {if (ISERR()) return;} /* if error seen, return */
+#define NOERRN() {if (ISERR()) return NULL;} /* NOERR with retval */
+#define NOERRZ() {if (ISERR()) return 0;} /* NOERR with retval */
+#define INSIST(c, e) ((c) ? 0 : ERR(e)) /* if condition false,
+ * error */
+#define NOTE(b) (v->re->re_info |= (b)) /* note visible condition */
+#define EMPTYARC(x, y) newarc(v->nfa, EMPTY, 0, x, y)
/* token type codes, some also used as NFA arc types */
-#define EMPTY 'n' /* no token present */
-#define EOS 'e' /* end of string */
-#define PLAIN 'p' /* ordinary character */
-#define DIGIT 'd' /* digit (in bound) */
-#define BACKREF 'b' /* back reference */
-#define COLLEL 'I' /* start of [. */
-#define ECLASS 'E' /* start of [= */
-#define CCLASS 'C' /* start of [: */
-#define END 'X' /* end of [. [= [: */
-#define RANGE 'R' /* - within [] which might be range delim. */
-#define LACON 'L' /* lookahead constraint subRE */
-#define AHEAD 'a' /* color-lookahead arc */
-#define BEHIND 'r' /* color-lookbehind arc */
-#define WBDRY 'w' /* word boundary constraint */
-#define NWBDRY 'W' /* non-word-boundary constraint */
-#define SBEGIN 'A' /* beginning of string (even if not BOL) */
-#define SEND 'Z' /* end of string (even if not EOL) */
-#define PREFER 'P' /* length preference */
+#define EMPTY 'n' /* no token present */
+#define EOS 'e' /* end of string */
+#define PLAIN 'p' /* ordinary character */
+#define DIGIT 'd' /* digit (in bound) */
+#define BACKREF 'b' /* back reference */
+#define COLLEL 'I' /* start of [. */
+#define ECLASS 'E' /* start of [= */
+#define CCLASS 'C' /* start of [: */
+#define END 'X' /* end of [. [= [: */
+#define RANGE 'R' /* - within [] which might be range delim. */
+#define LACON 'L' /* lookahead constraint subRE */
+#define AHEAD 'a' /* color-lookahead arc */
+#define BEHIND 'r' /* color-lookbehind arc */
+#define WBDRY 'w' /* word boundary constraint */
+#define NWBDRY 'W' /* non-word-boundary constraint */
+#define SBEGIN 'A' /* beginning of string (even if not BOL) */
+#define SEND 'Z' /* end of string (even if not EOL) */
+#define PREFER 'P' /* length preference */
/* is an arc colored, and hence on a color chain? */
-#define COLORED(a) ((a)->type == PLAIN || (a)->type == AHEAD || \
+#define COLORED(a) ((a)->type == PLAIN || (a)->type == AHEAD || \
(a)->type == BEHIND)
/* static function list */
static struct fns functions = {
- rfree, /* regfree insides */
+ rfree, /* regfree insides */
};
@@ -284,36 +293,38 @@ static struct fns functions = {
*/
int
pg_regcomp(regex_t *re,
- const chr *string,
+ const chr * string,
size_t len,
int flags)
{
struct vars var;
struct vars *v = &var;
struct guts *g;
- int i;
- size_t j;
+ int i;
+ size_t j;
+
#ifdef REG_DEBUG
- FILE *debug = (flags&REG_PROGRESS) ? stdout : (FILE *)NULL;
+ FILE *debug = (flags & REG_PROGRESS) ? stdout : (FILE *) NULL;
+
#else
- FILE *debug = (FILE *) NULL;
+ FILE *debug = (FILE *) NULL;
#endif
-# define CNOERR() { if (ISERR()) return freev(v, v->err); }
+#define CNOERR() { if (ISERR()) return freev(v, v->err); }
/* sanity checks */
if (re == NULL || string == NULL)
return REG_INVARG;
- if ((flags&REG_QUOTE) &&
- (flags&(REG_ADVANCED|REG_EXPANDED|REG_NEWLINE)))
+ if ((flags & REG_QUOTE) &&
+ (flags & (REG_ADVANCED | REG_EXPANDED | REG_NEWLINE)))
return REG_INVARG;
- if (!(flags&REG_EXTENDED) && (flags&REG_ADVF))
+ if (!(flags & REG_EXTENDED) && (flags & REG_ADVF))
return REG_INVARG;
/* initial setup (after which freev() is callable) */
v->re = re;
- v->now = (chr *)string;
+ v->now = (chr *) string;
v->stop = v->now + len;
v->savenow = v->savestop = NULL;
v->err = 0;
@@ -336,7 +347,7 @@ pg_regcomp(regex_t *re,
v->lacons = NULL;
v->nlacons = 0;
re->re_magic = REMAGIC;
- re->re_info = 0; /* bits get set during parse */
+ re->re_info = 0; /* bits get set during parse */
re->re_csize = sizeof(chr);
re->re_guts = NULL;
re->re_fns = VS(&functions);
@@ -345,38 +356,40 @@ pg_regcomp(regex_t *re,
re->re_guts = VS(MALLOC(sizeof(struct guts)));
if (re->re_guts == NULL)
return freev(v, REG_ESPACE);
- g = (struct guts *)re->re_guts;
+ g = (struct guts *) re->re_guts;
g->tree = NULL;
initcm(v, &g->cmap);
v->cm = &g->cmap;
g->lacons = NULL;
g->nlacons = 0;
ZAPCNFA(g->search);
- v->nfa = newnfa(v, v->cm, (struct nfa *)NULL);
+ v->nfa = newnfa(v, v->cm, (struct nfa *) NULL);
CNOERR();
v->cv = newcvec(100, 20, 10);
if (v->cv == NULL)
return freev(v, REG_ESPACE);
i = nmcces(v);
- if (i > 0) {
+ if (i > 0)
+ {
v->mcces = newcvec(nleaders(v), 0, i);
CNOERR();
v->mcces = allmcces(v, v->mcces);
leaders(v, v->mcces);
- addmcce(v->mcces, (chr *)NULL, (chr *)NULL); /* dummy */
+ addmcce(v->mcces, (chr *) NULL, (chr *) NULL); /* dummy */
}
CNOERR();
/* parsing */
- lexstart(v); /* also handles prefixes */
- if ((v->cflags&REG_NLSTOP) || (v->cflags&REG_NLANCH)) {
+ lexstart(v); /* also handles prefixes */
+ if ((v->cflags & REG_NLSTOP) || (v->cflags & REG_NLANCH))
+ {
/* assign newline a unique color */
v->nlcolor = subcolor(v->cm, newline());
okcolors(v->nfa, v->cm);
}
CNOERR();
v->tree = parse(v, EOS, PLAIN, v->nfa->init, v->nfa->final);
- assert(SEE(EOS)); /* even if error; ISERR() => SEE(EOS) */
+ assert(SEE(EOS)); /* even if error; ISERR() => SEE(EOS) */
CNOERR();
assert(v->tree != NULL);
@@ -384,7 +397,8 @@ pg_regcomp(regex_t *re,
specialcolors(v->nfa);
CNOERR();
#ifdef REG_DEBUG
- if (debug != NULL) {
+ if (debug != NULL)
+ {
fprintf(debug, "\n\n\n========= RAW ==========\n");
dumpnfa(v->nfa, debug);
dumpst(v->tree, debug, 1);
@@ -395,7 +409,8 @@ pg_regcomp(regex_t *re,
markst(v->tree);
cleanst(v);
#ifdef REG_DEBUG
- if (debug != NULL) {
+ if (debug != NULL)
+ {
fprintf(debug, "\n\n\n========= TREE FIXED ==========\n");
dumpst(v->tree, debug, 1);
}
@@ -405,7 +420,8 @@ pg_regcomp(regex_t *re,
re->re_info |= nfatree(v, v->tree, debug);
CNOERR();
assert(v->nlacons == 0 || v->lacons != NULL);
- for (i = 1; i < v->nlacons; i++) {
+ for (i = 1; i < v->nlacons; i++)
+ {
#ifdef REG_DEBUG
if (debug != NULL)
fprintf(debug, "\n\n\n========= LA%d ==========\n", i);
@@ -413,7 +429,7 @@ pg_regcomp(regex_t *re,
nfanode(v, &v->lacons[i], debug);
}
CNOERR();
- if (v->tree->flags&SHORTER)
+ if (v->tree->flags & SHORTER)
NOTE(REG_USHORTEST);
/* build compacted NFAs for tree, lacons, fast search */
@@ -422,7 +438,7 @@ pg_regcomp(regex_t *re,
fprintf(debug, "\n\n\n========= SEARCH ==========\n");
#endif
/* can sacrifice main NFA now, so use it as work area */
- (DISCARD)optimize(v->nfa, debug);
+ (DISCARD) optimize(v->nfa, debug);
CNOERR();
makesearch(v, v->nfa);
CNOERR();
@@ -431,7 +447,7 @@ pg_regcomp(regex_t *re,
/* looks okay, package it up */
re->re_nsub = v->nsubexp;
- v->re = NULL; /* freev no longer frees re */
+ v->re = NULL; /* freev no longer frees re */
g->magic = GUTSMAGIC;
g->cflags = v->cflags;
g->info = re->re_info;
@@ -439,13 +455,13 @@ pg_regcomp(regex_t *re,
g->tree = v->tree;
v->tree = NULL;
g->ntree = v->ntree;
- g->compare = (v->cflags&REG_ICASE) ? casecmp : cmp;
+ g->compare = (v->cflags & REG_ICASE) ? casecmp : cmp;
g->lacons = v->lacons;
v->lacons = NULL;
g->nlacons = v->nlacons;
#ifdef REG_DEBUG
- if (flags&REG_DUMP)
+ if (flags & REG_DUMP)
dump(re, stdout);
#endif
@@ -457,22 +473,26 @@ pg_regcomp(regex_t *re,
* moresubs - enlarge subRE vector
*/
static void
-moresubs(struct vars *v,
+moresubs(struct vars * v,
int wanted) /* want enough room for this one */
{
struct subre **p;
- size_t n;
+ size_t n;
- assert(wanted > 0 && (size_t)wanted >= v->nsubs);
- n = (size_t)wanted * 3 / 2 + 1;
- if (v->subs == v->sub10) {
- p = (struct subre **)MALLOC(n * sizeof(struct subre *));
+ assert(wanted > 0 && (size_t) wanted >= v->nsubs);
+ n = (size_t) wanted *3 / 2 + 1;
+
+ if (v->subs == v->sub10)
+ {
+ p = (struct subre **) MALLOC(n * sizeof(struct subre *));
if (p != NULL)
memcpy(VS(p), VS(v->subs),
- v->nsubs * sizeof(struct subre *));
- } else
- p = (struct subre **)REALLOC(v->subs, n*sizeof(struct subre *));
- if (p == NULL) {
+ v->nsubs * sizeof(struct subre *));
+ }
+ else
+ p = (struct subre **) REALLOC(v->subs, n * sizeof(struct subre *));
+ if (p == NULL)
+ {
ERR(REG_ESPACE);
return;
}
@@ -480,7 +500,7 @@ moresubs(struct vars *v,
for (p = &v->subs[v->nsubs]; v->nsubs < n; p++, v->nsubs++)
*p = NULL;
assert(v->nsubs == n);
- assert((size_t)wanted < v->nsubs);
+ assert((size_t) wanted < v->nsubs);
}
/*
@@ -490,7 +510,7 @@ moresubs(struct vars *v,
* (if any), to make error-handling code terser.
*/
static int
-freev(struct vars *v,
+freev(struct vars * v,
int err)
{
if (v->re != NULL)
@@ -511,7 +531,7 @@ freev(struct vars *v,
freecvec(v->mcces);
if (v->lacons != NULL)
freelacons(v->lacons, v->nlacons);
- ERR(err); /* nop if err==0 */
+ ERR(err); /* nop if err==0 */
return v->err;
}
@@ -521,8 +541,8 @@ freev(struct vars *v,
* NFA must have been optimize()d already.
*/
static void
-makesearch(struct vars *v,
- struct nfa *nfa)
+makesearch(struct vars * v,
+ struct nfa * nfa)
{
struct arc *a;
struct arc *b;
@@ -532,12 +552,14 @@ makesearch(struct vars *v,
struct state *slist;
/* no loops are needed if it's anchored */
- for (a = pre->outs; a != NULL; a = a->outchain) {
+ for (a = pre->outs; a != NULL; a = a->outchain)
+ {
assert(a->type == PLAIN);
if (a->co != nfa->bos[0] && a->co != nfa->bos[1])
break;
}
- if (a != NULL) {
+ if (a != NULL)
+ {
/* add implicit .* in front */
rainbow(nfa, v->cm, PLAIN, COLORLESS, pre, pre);
@@ -548,40 +570,45 @@ makesearch(struct vars *v,
/*
* Now here's the subtle part. Because many REs have no lookback
- * constraints, often knowing when you were in the pre state tells
- * you little; it's the next state(s) that are informative. But
- * some of them may have other inarcs, i.e. it may be possible to
- * make actual progress and then return to one of them. We must
- * de-optimize such cases, splitting each such state into progress
- * and no-progress states.
+ * constraints, often knowing when you were in the pre state tells you
+ * little; it's the next state(s) that are informative. But some of
+ * them may have other inarcs, i.e. it may be possible to make actual
+ * progress and then return to one of them. We must de-optimize such
+ * cases, splitting each such state into progress and no-progress
+ * states.
*/
/* first, make a list of the states */
slist = NULL;
- for (a = pre->outs; a != NULL; a = a->outchain) {
+ for (a = pre->outs; a != NULL; a = a->outchain)
+ {
s = a->to;
for (b = s->ins; b != NULL; b = b->inchain)
if (b->from != pre)
break;
- if (b != NULL) { /* must be split */
+ if (b != NULL)
+ { /* must be split */
s->tmp = slist;
slist = s;
}
}
/* do the splits */
- for (s = slist; s != NULL; s = s2) {
+ for (s = slist; s != NULL; s = s2)
+ {
s2 = newstate(nfa);
copyouts(nfa, s, s2);
- for (a = s->ins; a != NULL; a = b) {
+ for (a = s->ins; a != NULL; a = b)
+ {
b = a->inchain;
- if (a->from != pre) {
+ if (a->from != pre)
+ {
cparc(nfa, a, a->from, s2);
freearc(nfa, a);
}
}
s2 = s->tmp;
- s->tmp = NULL; /* clean up while we're at it */
+ s->tmp = NULL; /* clean up while we're at it */
}
}
@@ -589,22 +616,22 @@ makesearch(struct vars *v,
* parse - parse an RE
*
* This is actually just the top level, which parses a bunch of branches
- * tied together with '|'. They appear in the tree as the left children
+ * tied together with '|'. They appear in the tree as the left children
* of a chain of '|' subres.
*/
static struct subre *
-parse(struct vars *v,
- int stopper, /* EOS or ')' */
- int type, /* LACON (lookahead subRE) or PLAIN */
- struct state *init, /* initial state */
- struct state *final) /* final state */
+parse(struct vars * v,
+ int stopper, /* EOS or ')' */
+ int type, /* LACON (lookahead subRE) or PLAIN */
+ struct state * init, /* initial state */
+ struct state * final) /* final state */
{
- struct state *left; /* scaffolding for branch */
+ struct state *left; /* scaffolding for branch */
struct state *right;
- struct subre *branches; /* top level */
- struct subre *branch; /* current branch */
- struct subre *t; /* temporary */
- int firstbranch; /* is this the first branch? */
+ struct subre *branches; /* top level */
+ struct subre *branch; /* current branch */
+ struct subre *t; /* temporary */
+ int firstbranch; /* is this the first branch? */
assert(stopper == ')' || stopper == EOS);
@@ -612,8 +639,10 @@ parse(struct vars *v,
NOERRN();
branch = branches;
firstbranch = 1;
- do { /* a branch */
- if (!firstbranch) {
+ do
+ { /* a branch */
+ if (!firstbranch)
+ {
/* need a place to hang it */
branch->right = subre(v, '|', LONGER, init, final);
NOERRN();
@@ -629,25 +658,29 @@ parse(struct vars *v,
branch->left = parsebranch(v, stopper, type, left, right, 0);
NOERRN();
branch->flags |= UP(branch->flags | branch->left->flags);
- if ((branch->flags &~ branches->flags) != 0) /* new flags */
+ if ((branch->flags & ~branches->flags) != 0) /* new flags */
for (t = branches; t != branch; t = t->right)
t->flags |= branch->flags;
} while (EAT('|'));
assert(SEE(stopper) || SEE(EOS));
- if (!SEE(stopper)) {
+ if (!SEE(stopper))
+ {
assert(stopper == ')' && SEE(EOS));
ERR(REG_EPAREN);
}
/* optimize out simple cases */
- if (branch == branches) { /* only one branch */
+ if (branch == branches)
+ { /* only one branch */
assert(branch->right == NULL);
t = branch->left;
branch->left = NULL;
freesubre(v, branches);
branches = t;
- } else if (!MESSY(branches->flags)) { /* no interesting innards */
+ }
+ else if (!MESSY(branches->flags))
+ { /* no interesting innards */
freesubre(v, branches->left);
branches->left = NULL;
freesubre(v, branches->right);
@@ -666,23 +699,25 @@ parse(struct vars *v,
* ',' nodes introduced only when necessary due to substructure.
*/
static struct subre *
-parsebranch(struct vars *v,
- int stopper, /* EOS or ')' */
+parsebranch(struct vars * v,
+ int stopper, /* EOS or ')' */
int type, /* LACON (lookahead subRE) or PLAIN */
- struct state *left, /* leftmost state */
- struct state *right, /* rightmost state */
- int partial) /* is this only part of a branch? */
+ struct state * left, /* leftmost state */
+ struct state * right, /* rightmost state */
+ int partial) /* is this only part of a branch? */
{
- struct state *lp; /* left end of current construct */
- int seencontent; /* is there anything in this branch yet? */
+ struct state *lp; /* left end of current construct */
+ int seencontent; /* is there anything in this branch yet? */
struct subre *t;
lp = left;
seencontent = 0;
t = subre(v, '=', 0, left, right); /* op '=' is tentative */
NOERRN();
- while (!SEE('|') && !SEE(stopper) && !SEE(EOS)) {
- if (seencontent) { /* implicit concat operator */
+ while (!SEE('|') && !SEE(stopper) && !SEE(EOS))
+ {
+ if (seencontent)
+ { /* implicit concat operator */
lp = newstate(v->nfa);
NOERRN();
moveins(v->nfa, right, lp);
@@ -693,7 +728,8 @@ parsebranch(struct vars *v,
parseqatom(v, stopper, type, lp, right, t);
}
- if (!seencontent) { /* empty branch */
+ if (!seencontent)
+ { /* empty branch */
if (!partial)
NOTE(REG_UUNSPEC);
assert(lp == left);
@@ -711,259 +747,273 @@ parsebranch(struct vars *v,
* of the branch, making this function's name somewhat inaccurate.
*/
static void
-parseqatom(struct vars *v,
+parseqatom(struct vars * v,
int stopper, /* EOS or ')' */
int type, /* LACON (lookahead subRE) or PLAIN */
- struct state *lp, /* left state to hang it on */
- struct state *rp, /* right state to hang it on */
- struct subre *top) /* subtree top */
+ struct state * lp, /* left state to hang it on */
+ struct state * rp, /* right state to hang it on */
+ struct subre * top) /* subtree top */
{
- struct state *s; /* temporaries for new states */
+ struct state *s; /* temporaries for new states */
struct state *s2;
-# define ARCV(t, val) newarc(v->nfa, t, val, lp, rp)
- int m, n;
- struct subre *atom; /* atom's subtree */
+
+#define ARCV(t, val) newarc(v->nfa, t, val, lp, rp)
+ int m,
+ n;
+ struct subre *atom; /* atom's subtree */
struct subre *t;
- int cap; /* capturing parens? */
- int pos; /* positive lookahead? */
- int subno; /* capturing-parens or backref number */
- int atomtype;
- int qprefer; /* quantifier short/long preference */
- int f;
- struct subre **atomp; /* where the pointer to atom is */
+ int cap; /* capturing parens? */
+ int pos; /* positive lookahead? */
+ int subno; /* capturing-parens or backref number */
+ int atomtype;
+ int qprefer; /* quantifier short/long preference */
+ int f;
+ struct subre **atomp; /* where the pointer to atom is */
/* initial bookkeeping */
atom = NULL;
- assert(lp->nouts == 0); /* must string new code */
- assert(rp->nins == 0); /* between lp and rp */
- subno = 0; /* just to shut lint up */
+ assert(lp->nouts == 0); /* must string new code */
+ assert(rp->nins == 0); /* between lp and rp */
+ subno = 0; /* just to shut lint up */
/* an atom or constraint... */
atomtype = v->nexttype;
- switch (atomtype) {
- /* first, constraints, which end by returning */
- case '^':
- ARCV('^', 1);
- if (v->cflags&REG_NLANCH)
- ARCV(BEHIND, v->nlcolor);
- NEXT();
- return;
- break;
- case '$':
- ARCV('$', 1);
- if (v->cflags&REG_NLANCH)
- ARCV(AHEAD, v->nlcolor);
- NEXT();
- return;
- break;
- case SBEGIN:
- ARCV('^', 1); /* BOL */
- ARCV('^', 0); /* or BOS */
- NEXT();
- return;
- break;
- case SEND:
- ARCV('$', 1); /* EOL */
- ARCV('$', 0); /* or EOS */
- NEXT();
- return;
- break;
- case '<':
- wordchrs(v); /* does NEXT() */
- s = newstate(v->nfa);
- NOERR();
- nonword(v, BEHIND, lp, s);
- word(v, AHEAD, s, rp);
- return;
- break;
- case '>':
- wordchrs(v); /* does NEXT() */
- s = newstate(v->nfa);
- NOERR();
- word(v, BEHIND, lp, s);
- nonword(v, AHEAD, s, rp);
- return;
- break;
- case WBDRY:
- wordchrs(v); /* does NEXT() */
- s = newstate(v->nfa);
- NOERR();
- nonword(v, BEHIND, lp, s);
- word(v, AHEAD, s, rp);
- s = newstate(v->nfa);
- NOERR();
- word(v, BEHIND, lp, s);
- nonword(v, AHEAD, s, rp);
- return;
- break;
- case NWBDRY:
- wordchrs(v); /* does NEXT() */
- s = newstate(v->nfa);
- NOERR();
- word(v, BEHIND, lp, s);
- word(v, AHEAD, s, rp);
- s = newstate(v->nfa);
- NOERR();
- nonword(v, BEHIND, lp, s);
- nonword(v, AHEAD, s, rp);
- return;
- break;
- case LACON: /* lookahead constraint */
- pos = v->nextvalue;
- NEXT();
- s = newstate(v->nfa);
- s2 = newstate(v->nfa);
- NOERR();
- t = parse(v, ')', LACON, s, s2);
- freesubre(v, t); /* internal structure irrelevant */
- assert(SEE(')') || ISERR());
- NEXT();
- n = newlacon(v, s, s2, pos);
- NOERR();
- ARCV(LACON, n);
- return;
- break;
- /* then errors, to get them out of the way */
- case '*':
- case '+':
- case '?':
- case '{':
- ERR(REG_BADRPT);
- return;
- break;
- default:
- ERR(REG_ASSERT);
- return;
- break;
- /* then plain characters, and minor variants on that theme */
- case ')': /* unbalanced paren */
- if ((v->cflags&REG_ADVANCED) != REG_EXTENDED) {
- ERR(REG_EPAREN);
+ switch (atomtype)
+ {
+ /* first, constraints, which end by returning */
+ case '^':
+ ARCV('^', 1);
+ if (v->cflags & REG_NLANCH)
+ ARCV(BEHIND, v->nlcolor);
+ NEXT();
return;
- }
- /* legal in EREs due to specification botch */
- NOTE(REG_UPBOTCH);
- /* fallthrough into case PLAIN */
- case PLAIN:
- onechr(v, v->nextvalue, lp, rp);
- okcolors(v->nfa, v->cm);
- NOERR();
- NEXT();
- break;
- case '[':
- if (v->nextvalue == 1)
- bracket(v, lp, rp);
- else
- cbracket(v, lp, rp);
- assert(SEE(']') || ISERR());
- NEXT();
- break;
- case '.':
- rainbow(v->nfa, v->cm, PLAIN,
- (v->cflags&REG_NLSTOP) ? v->nlcolor : COLORLESS,
- lp, rp);
- NEXT();
- break;
- /* and finally the ugly stuff */
- case '(': /* value flags as capturing or non */
- cap = (type == LACON) ? 0 : v->nextvalue;
- if (cap) {
- v->nsubexp++;
- subno = v->nsubexp;
- if ((size_t)subno >= v->nsubs)
- moresubs(v, subno);
- assert((size_t)subno < v->nsubs);
- } else
- atomtype = PLAIN; /* something that's not '(' */
- NEXT();
- /* need new endpoints because tree will contain pointers */
- s = newstate(v->nfa);
- s2 = newstate(v->nfa);
- NOERR();
- EMPTYARC(lp, s);
- EMPTYARC(s2, rp);
- NOERR();
- atom = parse(v, ')', PLAIN, s, s2);
- assert(SEE(')') || ISERR());
- NEXT();
- NOERR();
- if (cap) {
- v->subs[subno] = atom;
- t = subre(v, '(', atom->flags|CAP, lp, rp);
+ break;
+ case '$':
+ ARCV('$', 1);
+ if (v->cflags & REG_NLANCH)
+ ARCV(AHEAD, v->nlcolor);
+ NEXT();
+ return;
+ break;
+ case SBEGIN:
+ ARCV('^', 1); /* BOL */
+ ARCV('^', 0); /* or BOS */
+ NEXT();
+ return;
+ break;
+ case SEND:
+ ARCV('$', 1); /* EOL */
+ ARCV('$', 0); /* or EOS */
+ NEXT();
+ return;
+ break;
+ case '<':
+ wordchrs(v); /* does NEXT() */
+ s = newstate(v->nfa);
NOERR();
- t->subno = subno;
- t->left = atom;
- atom = t;
- }
- /* postpone everything else pending possible {0} */
- break;
- case BACKREF: /* the Feature From The Black Lagoon */
- INSIST(type != LACON, REG_ESUBREG);
- INSIST(v->nextvalue < v->nsubs, REG_ESUBREG);
- INSIST(v->subs[v->nextvalue] != NULL, REG_ESUBREG);
- NOERR();
- assert(v->nextvalue > 0);
- atom = subre(v, 'b', BACKR, lp, rp);
- subno = v->nextvalue;
- atom->subno = subno;
- EMPTYARC(lp, rp); /* temporarily, so there's something */
- NEXT();
- break;
+ nonword(v, BEHIND, lp, s);
+ word(v, AHEAD, s, rp);
+ return;
+ break;
+ case '>':
+ wordchrs(v); /* does NEXT() */
+ s = newstate(v->nfa);
+ NOERR();
+ word(v, BEHIND, lp, s);
+ nonword(v, AHEAD, s, rp);
+ return;
+ break;
+ case WBDRY:
+ wordchrs(v); /* does NEXT() */
+ s = newstate(v->nfa);
+ NOERR();
+ nonword(v, BEHIND, lp, s);
+ word(v, AHEAD, s, rp);
+ s = newstate(v->nfa);
+ NOERR();
+ word(v, BEHIND, lp, s);
+ nonword(v, AHEAD, s, rp);
+ return;
+ break;
+ case NWBDRY:
+ wordchrs(v); /* does NEXT() */
+ s = newstate(v->nfa);
+ NOERR();
+ word(v, BEHIND, lp, s);
+ word(v, AHEAD, s, rp);
+ s = newstate(v->nfa);
+ NOERR();
+ nonword(v, BEHIND, lp, s);
+ nonword(v, AHEAD, s, rp);
+ return;
+ break;
+ case LACON: /* lookahead constraint */
+ pos = v->nextvalue;
+ NEXT();
+ s = newstate(v->nfa);
+ s2 = newstate(v->nfa);
+ NOERR();
+ t = parse(v, ')', LACON, s, s2);
+ freesubre(v, t); /* internal structure irrelevant */
+ assert(SEE(')') || ISERR());
+ NEXT();
+ n = newlacon(v, s, s2, pos);
+ NOERR();
+ ARCV(LACON, n);
+ return;
+ break;
+ /* then errors, to get them out of the way */
+ case '*':
+ case '+':
+ case '?':
+ case '{':
+ ERR(REG_BADRPT);
+ return;
+ break;
+ default:
+ ERR(REG_ASSERT);
+ return;
+ break;
+ /* then plain characters, and minor variants on that theme */
+ case ')': /* unbalanced paren */
+ if ((v->cflags & REG_ADVANCED) != REG_EXTENDED)
+ {
+ ERR(REG_EPAREN);
+ return;
+ }
+ /* legal in EREs due to specification botch */
+ NOTE(REG_UPBOTCH);
+ /* fallthrough into case PLAIN */
+ case PLAIN:
+ onechr(v, v->nextvalue, lp, rp);
+ okcolors(v->nfa, v->cm);
+ NOERR();
+ NEXT();
+ break;
+ case '[':
+ if (v->nextvalue == 1)
+ bracket(v, lp, rp);
+ else
+ cbracket(v, lp, rp);
+ assert(SEE(']') || ISERR());
+ NEXT();
+ break;
+ case '.':
+ rainbow(v->nfa, v->cm, PLAIN,
+ (v->cflags & REG_NLSTOP) ? v->nlcolor : COLORLESS,
+ lp, rp);
+ NEXT();
+ break;
+ /* and finally the ugly stuff */
+ case '(': /* value flags as capturing or non */
+ cap = (type == LACON) ? 0 : v->nextvalue;
+ if (cap)
+ {
+ v->nsubexp++;
+ subno = v->nsubexp;
+ if ((size_t) subno >= v->nsubs)
+ moresubs(v, subno);
+ assert((size_t) subno < v->nsubs);
+ }
+ else
+ atomtype = PLAIN; /* something that's not '(' */
+ NEXT();
+ /* need new endpoints because tree will contain pointers */
+ s = newstate(v->nfa);
+ s2 = newstate(v->nfa);
+ NOERR();
+ EMPTYARC(lp, s);
+ EMPTYARC(s2, rp);
+ NOERR();
+ atom = parse(v, ')', PLAIN, s, s2);
+ assert(SEE(')') || ISERR());
+ NEXT();
+ NOERR();
+ if (cap)
+ {
+ v->subs[subno] = atom;
+ t = subre(v, '(', atom->flags | CAP, lp, rp);
+ NOERR();
+ t->subno = subno;
+ t->left = atom;
+ atom = t;
+ }
+ /* postpone everything else pending possible {0} */
+ break;
+ case BACKREF: /* the Feature From The Black Lagoon */
+ INSIST(type != LACON, REG_ESUBREG);
+ INSIST(v->nextvalue < v->nsubs, REG_ESUBREG);
+ INSIST(v->subs[v->nextvalue] != NULL, REG_ESUBREG);
+ NOERR();
+ assert(v->nextvalue > 0);
+ atom = subre(v, 'b', BACKR, lp, rp);
+ subno = v->nextvalue;
+ atom->subno = subno;
+ EMPTYARC(lp, rp); /* temporarily, so there's something */
+ NEXT();
+ break;
}
/* ...and an atom may be followed by a quantifier */
- switch (v->nexttype) {
- case '*':
- m = 0;
- n = INFINITY;
- qprefer = (v->nextvalue) ? LONGER : SHORTER;
- NEXT();
- break;
- case '+':
- m = 1;
- n = INFINITY;
- qprefer = (v->nextvalue) ? LONGER : SHORTER;
- NEXT();
- break;
- case '?':
- m = 0;
- n = 1;
- qprefer = (v->nextvalue) ? LONGER : SHORTER;
- NEXT();
- break;
- case '{':
- NEXT();
- m = scannum(v);
- if (EAT(',')) {
- if (SEE(DIGIT))
- n = scannum(v);
+ switch (v->nexttype)
+ {
+ case '*':
+ m = 0;
+ n = INFINITY;
+ qprefer = (v->nextvalue) ? LONGER : SHORTER;
+ NEXT();
+ break;
+ case '+':
+ m = 1;
+ n = INFINITY;
+ qprefer = (v->nextvalue) ? LONGER : SHORTER;
+ NEXT();
+ break;
+ case '?':
+ m = 0;
+ n = 1;
+ qprefer = (v->nextvalue) ? LONGER : SHORTER;
+ NEXT();
+ break;
+ case '{':
+ NEXT();
+ m = scannum(v);
+ if (EAT(','))
+ {
+ if (SEE(DIGIT))
+ n = scannum(v);
+ else
+ n = INFINITY;
+ if (m > n)
+ {
+ ERR(REG_BADBR);
+ return;
+ }
+ /* {m,n} exercises preference, even if it's {m,m} */
+ qprefer = (v->nextvalue) ? LONGER : SHORTER;
+ }
else
- n = INFINITY;
- if (m > n) {
+ {
+ n = m;
+ /* {m} passes operand's preference through */
+ qprefer = 0;
+ }
+ if (!SEE('}'))
+ { /* catches errors too */
ERR(REG_BADBR);
return;
}
- /* {m,n} exercises preference, even if it's {m,m} */
- qprefer = (v->nextvalue) ? LONGER : SHORTER;
- } else {
- n = m;
- /* {m} passes operand's preference through */
+ NEXT();
+ break;
+ default: /* no quantifier */
+ m = n = 1;
qprefer = 0;
- }
- if (!SEE('}')) { /* catches errors too */
- ERR(REG_BADBR);
- return;
- }
- NEXT();
- break;
- default: /* no quantifier */
- m = n = 1;
- qprefer = 0;
- break;
+ break;
}
/* annoying special case: {0} or {0,0} cancels everything */
- if (m == 0 && n == 0) {
+ if (m == 0 && n == 0)
+ {
if (atom != NULL)
freesubre(v, atom);
if (atomtype == '(')
@@ -976,7 +1026,8 @@ parseqatom(struct vars *v,
/* if not a messy case, avoid hard part */
assert(!MESSY(top->flags));
f = top->flags | qprefer | ((atom != NULL) ? atom->flags : 0);
- if (atomtype != '(' && atomtype != BACKREF && !MESSY(UP(f))) {
+ if (atomtype != '(' && atomtype != BACKREF && !MESSY(UP(f)))
+ {
if (!(m == 1 && n == 1))
repeat(v, lp, rp, m, n);
if (atom != NULL)
@@ -986,13 +1037,14 @@ parseqatom(struct vars *v,
}
/*
- * hard part: something messy
- * That is, capturing parens, back reference, short/long clash, or
- * an atom with substructure containing one of those.
+ * hard part: something messy That is, capturing parens, back
+ * reference, short/long clash, or an atom with substructure
+ * containing one of those.
*/
/* now we'll need a subre for the contents even if they're boring */
- if (atom == NULL) {
+ if (atom == NULL)
+ {
atom = subre(v, '=', 0, lp, rp);
NOERR();
}
@@ -1000,9 +1052,8 @@ parseqatom(struct vars *v,
/*
* prepare a general-purpose state skeleton
*
- * ---> [s] ---prefix---> [begin] ---atom---> [end] ----rest---> [rp]
- * / /
- * [lp] ----> [s2] ----bypass---------------------
+ * ---> [s] ---prefix---> [begin] ---atom---> [end] ----rest---> [rp] /
+ * / [lp] ----> [s2] ----bypass---------------------
*
* where bypass is an empty, and prefix is some repetitions of atom
*/
@@ -1034,21 +1085,23 @@ parseqatom(struct vars *v,
top->right = t;
/* if it's a backref, now is the time to replicate the subNFA */
- if (atomtype == BACKREF) {
- assert(atom->begin->nouts == 1); /* just the EMPTY */
+ if (atomtype == BACKREF)
+ {
+ assert(atom->begin->nouts == 1); /* just the EMPTY */
delsub(v->nfa, atom->begin, atom->end);
assert(v->subs[subno] != NULL);
/* and here's why the recursion got postponed: it must */
/* wait until the skeleton is filled in, because it may */
/* hit a backref that wants to copy the filled-in skeleton */
dupnfa(v->nfa, v->subs[subno]->begin, v->subs[subno]->end,
- atom->begin, atom->end);
+ atom->begin, atom->end);
NOERR();
}
/* it's quantifier time; first, turn x{0,...} into x{1,...}|empty */
- if (m == 0) {
- EMPTYARC(s2, atom->end); /* the bypass */
+ if (m == 0)
+ {
+ EMPTYARC(s2, atom->end); /* the bypass */
assert(PREF(qprefer) != 0);
f = COMBINE(qprefer, atom->flags);
t = subre(v, '|', f, lp, atom->end);
@@ -1064,25 +1117,30 @@ parseqatom(struct vars *v,
}
/* deal with the rest of the quantifier */
- if (atomtype == BACKREF) {
+ if (atomtype == BACKREF)
+ {
/* special case: backrefs have internal quantifiers */
- EMPTYARC(s, atom->begin); /* empty prefix */
+ EMPTYARC(s, atom->begin); /* empty prefix */
/* just stuff everything into atom */
repeat(v, atom->begin, atom->end, m, n);
- atom->min = (short)m;
- atom->max = (short)n;
+ atom->min = (short) m;
+ atom->max = (short) n;
atom->flags |= COMBINE(qprefer, atom->flags);
- } else if (m == 1 && n == 1) {
+ }
+ else if (m == 1 && n == 1)
+ {
/* no/vacuous quantifier: done */
- EMPTYARC(s, atom->begin); /* empty prefix */
- } else {
+ EMPTYARC(s, atom->begin); /* empty prefix */
+ }
+ else
+ {
/* turn x{m,n} into x{m-1,n-1}x, with capturing */
- /* parens in only second x */
+ /* parens in only second x */
dupnfa(v->nfa, atom->begin, atom->end, s, atom->begin);
assert(m >= 1 && m != INFINITY && n >= 1);
- repeat(v, s, atom->begin, m-1, (n == INFINITY) ? n : n-1);
+ repeat(v, s, atom->begin, m - 1, (n == INFINITY) ? n : n - 1);
f = COMBINE(qprefer, atom->flags);
- t = subre(v, '.', f, s, atom->end); /* prefix and atom */
+ t = subre(v, '.', f, s, atom->end); /* prefix and atom */
NOERR();
t->left = subre(v, '=', PREF(f), s, atom->begin);
NOERR();
@@ -1094,7 +1152,8 @@ parseqatom(struct vars *v,
t = top->right;
if (!(SEE('|') || SEE(stopper) || SEE(EOS)))
t->right = parsebranch(v, stopper, type, atom->end, rp, 1);
- else {
+ else
+ {
EMPTYARC(atom->end, rp);
t->right = subre(v, '=', 0, atom->end, rp);
}
@@ -1107,12 +1166,12 @@ parseqatom(struct vars *v,
* nonword - generate arcs for non-word-character ahead or behind
*/
static void
-nonword(struct vars *v,
- int dir, /* AHEAD or BEHIND */
- struct state *lp,
- struct state *rp)
+nonword(struct vars * v,
+ int dir, /* AHEAD or BEHIND */
+ struct state * lp,
+ struct state * rp)
{
- int anchor = (dir == AHEAD) ? '$' : '^';
+ int anchor = (dir == AHEAD) ? '$' : '^';
assert(dir == AHEAD || dir == BEHIND);
newarc(v->nfa, anchor, 1, lp, rp);
@@ -1125,10 +1184,10 @@ nonword(struct vars *v,
* word - generate arcs for word character ahead or behind
*/
static void
-word(struct vars *v,
- int dir, /* AHEAD or BEHIND */
- struct state *lp,
- struct state *rp)
+word(struct vars * v,
+ int dir, /* AHEAD or BEHIND */
+ struct state * lp,
+ struct state * rp)
{
assert(dir == AHEAD || dir == BEHIND);
cloneouts(v->nfa, v->wordchrs, lp, rp, dir);
@@ -1138,16 +1197,18 @@ word(struct vars *v,
/*
* scannum - scan a number
*/
-static int /* value, <= DUPMAX */
-scannum(struct vars *v)
+static int /* value, <= DUPMAX */
+scannum(struct vars * v)
{
- int n = 0;
+ int n = 0;
- while (SEE(DIGIT) && n < DUPMAX) {
- n = n*10 + v->nextvalue;
+ while (SEE(DIGIT) && n < DUPMAX)
+ {
+ n = n * 10 + v->nextvalue;
NEXT();
}
- if (SEE(DIGIT) || n > DUPMAX) {
+ if (SEE(DIGIT) || n > DUPMAX)
+ {
ERR(REG_BADBR);
return 0;
}
@@ -1165,83 +1226,84 @@ scannum(struct vars *v)
* code in parse(), and when this is called, it doesn't matter any more.
*/
static void
-repeat(struct vars *v,
- struct state *lp,
- struct state *rp,
+repeat(struct vars * v,
+ struct state * lp,
+ struct state * rp,
int m,
int n)
{
-# define SOME 2
-# define INF 3
-# define PAIR(x, y) ((x)*4 + (y))
-# define REDUCE(x) ( ((x) == INFINITY) ? INF : (((x) > 1) ? SOME : (x)) )
- const int rm = REDUCE(m);
- const int rn = REDUCE(n);
+#define SOME 2
+#define INF 3
+#define PAIR(x, y) ((x)*4 + (y))
+#define REDUCE(x) ( ((x) == INFINITY) ? INF : (((x) > 1) ? SOME : (x)) )
+ const int rm = REDUCE(m);
+ const int rn = REDUCE(n);
struct state *s;
struct state *s2;
- switch (PAIR(rm, rn)) {
- case PAIR(0, 0): /* empty string */
- delsub(v->nfa, lp, rp);
- EMPTYARC(lp, rp);
- break;
- case PAIR(0, 1): /* do as x| */
- EMPTYARC(lp, rp);
- break;
- case PAIR(0, SOME): /* do as x{1,n}| */
- repeat(v, lp, rp, 1, n);
- NOERR();
- EMPTYARC(lp, rp);
- break;
- case PAIR(0, INF): /* loop x around */
- s = newstate(v->nfa);
- NOERR();
- moveouts(v->nfa, lp, s);
- moveins(v->nfa, rp, s);
- EMPTYARC(lp, s);
- EMPTYARC(s, rp);
- break;
- case PAIR(1, 1): /* no action required */
- break;
- case PAIR(1, SOME): /* do as x{0,n-1}x = (x{1,n-1}|)x */
- s = newstate(v->nfa);
- NOERR();
- moveouts(v->nfa, lp, s);
- dupnfa(v->nfa, s, rp, lp, s);
- NOERR();
- repeat(v, lp, s, 1, n-1);
- NOERR();
- EMPTYARC(lp, s);
- break;
- case PAIR(1, INF): /* add loopback arc */
- s = newstate(v->nfa);
- s2 = newstate(v->nfa);
- NOERR();
- moveouts(v->nfa, lp, s);
- moveins(v->nfa, rp, s2);
- EMPTYARC(lp, s);
- EMPTYARC(s2, rp);
- EMPTYARC(s2, s);
- break;
- case PAIR(SOME, SOME): /* do as x{m-1,n-1}x */
- s = newstate(v->nfa);
- NOERR();
- moveouts(v->nfa, lp, s);
- dupnfa(v->nfa, s, rp, lp, s);
- NOERR();
- repeat(v, lp, s, m-1, n-1);
- break;
- case PAIR(SOME, INF): /* do as x{m-1,}x */
- s = newstate(v->nfa);
- NOERR();
- moveouts(v->nfa, lp, s);
- dupnfa(v->nfa, s, rp, lp, s);
- NOERR();
- repeat(v, lp, s, m-1, n);
- break;
- default:
- ERR(REG_ASSERT);
- break;
+ switch (PAIR(rm, rn))
+ {
+ case PAIR(0, 0): /* empty string */
+ delsub(v->nfa, lp, rp);
+ EMPTYARC(lp, rp);
+ break;
+ case PAIR(0, 1): /* do as x| */
+ EMPTYARC(lp, rp);
+ break;
+ case PAIR(0, SOME): /* do as x{1,n}| */
+ repeat(v, lp, rp, 1, n);
+ NOERR();
+ EMPTYARC(lp, rp);
+ break;
+ case PAIR(0, INF): /* loop x around */
+ s = newstate(v->nfa);
+ NOERR();
+ moveouts(v->nfa, lp, s);
+ moveins(v->nfa, rp, s);
+ EMPTYARC(lp, s);
+ EMPTYARC(s, rp);
+ break;
+ case PAIR(1, 1): /* no action required */
+ break;
+ case PAIR(1, SOME): /* do as x{0,n-1}x = (x{1,n-1}|)x */
+ s = newstate(v->nfa);
+ NOERR();
+ moveouts(v->nfa, lp, s);
+ dupnfa(v->nfa, s, rp, lp, s);
+ NOERR();
+ repeat(v, lp, s, 1, n - 1);
+ NOERR();
+ EMPTYARC(lp, s);
+ break;
+ case PAIR(1, INF): /* add loopback arc */
+ s = newstate(v->nfa);
+ s2 = newstate(v->nfa);
+ NOERR();
+ moveouts(v->nfa, lp, s);
+ moveins(v->nfa, rp, s2);
+ EMPTYARC(lp, s);
+ EMPTYARC(s2, rp);
+ EMPTYARC(s2, s);
+ break;
+ case PAIR(SOME, SOME): /* do as x{m-1,n-1}x */
+ s = newstate(v->nfa);
+ NOERR();
+ moveouts(v->nfa, lp, s);
+ dupnfa(v->nfa, s, rp, lp, s);
+ NOERR();
+ repeat(v, lp, s, m - 1, n - 1);
+ break;
+ case PAIR(SOME, INF): /* do as x{m-1,}x */
+ s = newstate(v->nfa);
+ NOERR();
+ moveouts(v->nfa, lp, s);
+ dupnfa(v->nfa, s, rp, lp, s);
+ NOERR();
+ repeat(v, lp, s, m - 1, n);
+ break;
+ default:
+ ERR(REG_ASSERT);
+ break;
}
}
@@ -1250,9 +1312,9 @@ repeat(struct vars *v,
* Also called from cbracket for complemented bracket expressions.
*/
static void
-bracket(struct vars *v,
- struct state *lp,
- struct state *rp)
+bracket(struct vars * v,
+ struct state * lp,
+ struct state * rp)
{
assert(SEE('['));
NEXT();
@@ -1265,27 +1327,27 @@ bracket(struct vars *v,
/*
* cbracket - handle complemented bracket expression
* We do it by calling bracket() with dummy endpoints, and then complementing
- * the result. The alternative would be to invoke rainbow(), and then delete
+ * the result. The alternative would be to invoke rainbow(), and then delete
* arcs as the b.e. is seen... but that gets messy.
*/
static void
-cbracket(struct vars *v,
- struct state *lp,
- struct state *rp)
+cbracket(struct vars * v,
+ struct state * lp,
+ struct state * rp)
{
struct state *left = newstate(v->nfa);
struct state *right = newstate(v->nfa);
struct state *s;
- struct arc *a; /* arc from lp */
- struct arc *ba; /* arc from left, from bracket() */
- struct arc *pa; /* MCCE-prototype arc */
- color co;
- chr *p;
- int i;
+ struct arc *a; /* arc from lp */
+ struct arc *ba; /* arc from left, from bracket() */
+ struct arc *pa; /* MCCE-prototype arc */
+ color co;
+ chr *p;
+ int i;
NOERR();
bracket(v, left, right);
- if (v->cflags&REG_NLSTOP)
+ if (v->cflags & REG_NLSTOP)
newarc(v->nfa, PLAIN, v->nlcolor, left, right);
NOERR();
@@ -1294,7 +1356,8 @@ cbracket(struct vars *v,
/* easy part of complementing */
colorcomplement(v->nfa, v->cm, PLAIN, left, lp, rp);
NOERR();
- if (v->mcces == NULL) { /* no MCCEs -- we're done */
+ if (v->mcces == NULL)
+ { /* no MCCEs -- we're done */
dropstate(v->nfa, left);
assert(right->nins == 0);
freestate(v->nfa, right);
@@ -1303,33 +1366,39 @@ cbracket(struct vars *v,
/* but complementing gets messy in the presence of MCCEs... */
NOTE(REG_ULOCALE);
- for (p = v->mcces->chrs, i = v->mcces->nchrs; i > 0; p++, i--) {
+ for (p = v->mcces->chrs, i = v->mcces->nchrs; i > 0; p++, i--)
+ {
co = GETCOLOR(v->cm, *p);
a = findarc(lp, PLAIN, co);
ba = findarc(left, PLAIN, co);
- if (ba == NULL) {
+ if (ba == NULL)
+ {
assert(a != NULL);
freearc(v->nfa, a);
- } else {
- assert(a == NULL);
}
+ else
+ assert(a == NULL);
s = newstate(v->nfa);
NOERR();
newarc(v->nfa, PLAIN, co, lp, s);
NOERR();
pa = findarc(v->mccepbegin, PLAIN, co);
assert(pa != NULL);
- if (ba == NULL) { /* easy case, need all of them */
+ if (ba == NULL)
+ { /* easy case, need all of them */
cloneouts(v->nfa, pa->to, s, rp, PLAIN);
newarc(v->nfa, '$', 1, s, rp);
newarc(v->nfa, '$', 0, s, rp);
colorcomplement(v->nfa, v->cm, AHEAD, pa->to, s, rp);
- } else { /* must be selective */
- if (findarc(ba->to, '$', 1) == NULL) {
+ }
+ else
+ { /* must be selective */
+ if (findarc(ba->to, '$', 1) == NULL)
+ {
newarc(v->nfa, '$', 1, s, rp);
newarc(v->nfa, '$', 0, s, rp);
colorcomplement(v->nfa, v->cm, AHEAD, pa->to,
- s, rp);
+ s, rp);
}
for (pa = pa->to->outs; pa != NULL; pa = pa->outchain)
if (findarc(ba->to, PLAIN, pa->co) == NULL)
@@ -1346,83 +1415,39 @@ cbracket(struct vars *v,
assert(right->nins == 0);
freestate(v->nfa, right);
}
-
+
/*
* brackpart - handle one item (or range) within a bracket expression
*/
static void
-brackpart(struct vars *v,
- struct state *lp,
- struct state *rp)
+brackpart(struct vars * v,
+ struct state * lp,
+ struct state * rp)
{
- celt startc;
- celt endc;
+ celt startc;
+ celt endc;
struct cvec *cv;
- chr *startp;
- chr *endp;
- chr c[1];
+ chr *startp;
+ chr *endp;
+ chr c[1];
/* parse something, get rid of special cases, take shortcuts */
- switch (v->nexttype) {
- case RANGE: /* a-b-c or other botch */
- ERR(REG_ERANGE);
- return;
- break;
- case PLAIN:
- c[0] = v->nextvalue;
- NEXT();
- /* shortcut for ordinary chr (not range, not MCCE leader) */
- if (!SEE(RANGE) && !ISCELEADER(v, c[0])) {
- onechr(v, c[0], lp, rp);
+ switch (v->nexttype)
+ {
+ case RANGE: /* a-b-c or other botch */
+ ERR(REG_ERANGE);
return;
- }
- startc = element(v, c, c+1);
- NOERR();
- break;
- case COLLEL:
- startp = v->now;
- endp = scanplain(v);
- INSIST(startp < endp, REG_ECOLLATE);
- NOERR();
- startc = element(v, startp, endp);
- NOERR();
- break;
- case ECLASS:
- startp = v->now;
- endp = scanplain(v);
- INSIST(startp < endp, REG_ECOLLATE);
- NOERR();
- startc = element(v, startp, endp);
- NOERR();
- cv = eclass(v, startc, (v->cflags&REG_ICASE));
- NOERR();
- dovec(v, cv, lp, rp);
- return;
- break;
- case CCLASS:
- startp = v->now;
- endp = scanplain(v);
- INSIST(startp < endp, REG_ECTYPE);
- NOERR();
- cv = cclass(v, startp, endp, (v->cflags&REG_ICASE));
- NOERR();
- dovec(v, cv, lp, rp);
- return;
- break;
- default:
- ERR(REG_ASSERT);
- return;
- break;
- }
-
- if (SEE(RANGE)) {
- NEXT();
- switch (v->nexttype) {
+ break;
case PLAIN:
- case RANGE:
c[0] = v->nextvalue;
NEXT();
- endc = element(v, c, c+1);
+ /* shortcut for ordinary chr (not range, not MCCE leader) */
+ if (!SEE(RANGE) && !ISCELEADER(v, c[0]))
+ {
+ onechr(v, c[0], lp, rp);
+ return;
+ }
+ startc = element(v, c, c + 1);
NOERR();
break;
case COLLEL:
@@ -1430,25 +1455,74 @@ brackpart(struct vars *v,
endp = scanplain(v);
INSIST(startp < endp, REG_ECOLLATE);
NOERR();
- endc = element(v, startp, endp);
+ startc = element(v, startp, endp);
+ NOERR();
+ break;
+ case ECLASS:
+ startp = v->now;
+ endp = scanplain(v);
+ INSIST(startp < endp, REG_ECOLLATE);
+ NOERR();
+ startc = element(v, startp, endp);
+ NOERR();
+ cv = eclass(v, startc, (v->cflags & REG_ICASE));
NOERR();
+ dovec(v, cv, lp, rp);
+ return;
+ break;
+ case CCLASS:
+ startp = v->now;
+ endp = scanplain(v);
+ INSIST(startp < endp, REG_ECTYPE);
+ NOERR();
+ cv = cclass(v, startp, endp, (v->cflags & REG_ICASE));
+ NOERR();
+ dovec(v, cv, lp, rp);
+ return;
break;
default:
- ERR(REG_ERANGE);
+ ERR(REG_ASSERT);
return;
break;
+ }
+
+ if (SEE(RANGE))
+ {
+ NEXT();
+ switch (v->nexttype)
+ {
+ case PLAIN:
+ case RANGE:
+ c[0] = v->nextvalue;
+ NEXT();
+ endc = element(v, c, c + 1);
+ NOERR();
+ break;
+ case COLLEL:
+ startp = v->now;
+ endp = scanplain(v);
+ INSIST(startp < endp, REG_ECOLLATE);
+ NOERR();
+ endc = element(v, startp, endp);
+ NOERR();
+ break;
+ default:
+ ERR(REG_ERANGE);
+ return;
+ break;
}
- } else
+ }
+ else
endc = startc;
/*
- * Ranges are unportable. Actually, standard C does
- * guarantee that digits are contiguous, but making
- * that an exception is just too complicated.
+ * Ranges are unportable. Actually, standard C does guarantee that
+ * digits are contiguous, but making that an exception is just too
+ * complicated.
*/
if (startc != endc)
NOTE(REG_UUNPORT);
- cv = range(v, startc, endc, (v->cflags&REG_ICASE));
+ cv = range(v, startc, endc, (v->cflags & REG_ICASE));
NOERR();
dovec(v, cv, lp, rp);
}
@@ -1459,16 +1533,17 @@ brackpart(struct vars *v,
* Certain bits of trickery in lex.c know that this code does not try
* to look past the final bracket of the [. etc.
*/
-static chr * /* just after end of sequence */
-scanplain(struct vars *v)
+static chr * /* just after end of sequence */
+scanplain(struct vars * v)
{
- chr *endp;
+ chr *endp;
assert(SEE(COLLEL) || SEE(ECLASS) || SEE(CCLASS));
NEXT();
endp = v->now;
- while (SEE(PLAIN)) {
+ while (SEE(PLAIN))
+ {
endp = v->now;
NEXT();
}
@@ -1485,12 +1560,12 @@ scanplain(struct vars *v)
* certainly necessary, and sets up little disconnected subNFA.
*/
static void
-leaders(struct vars *v,
- struct cvec *cv)
+leaders(struct vars * v,
+ struct cvec * cv)
{
- int mcce;
- chr *p;
- chr leader;
+ int mcce;
+ chr *p;
+ chr leader;
struct state *s;
struct arc *a;
@@ -1498,16 +1573,20 @@ leaders(struct vars *v,
v->mccepend = newstate(v->nfa);
NOERR();
- for (mcce = 0; mcce < cv->nmcces; mcce++) {
+ for (mcce = 0; mcce < cv->nmcces; mcce++)
+ {
p = cv->mcces[mcce];
leader = *p;
- if (!haschr(cv, leader)) {
+ if (!haschr(cv, leader))
+ {
addchr(cv, leader);
s = newstate(v->nfa);
newarc(v->nfa, PLAIN, subcolor(v->cm, leader),
- v->mccepbegin, s);
+ v->mccepbegin, s);
okcolors(v->nfa, v->cm);
- } else {
+ }
+ else
+ {
a = findarc(v->mccepbegin, PLAIN,
GETCOLOR(v->cm, leader));
assert(a != NULL);
@@ -1515,7 +1594,8 @@ leaders(struct vars *v,
assert(s != v->mccepend);
}
p++;
- assert(*p != 0 && *(p+1) == 0); /* only 2-char MCCEs for now */
+ assert(*p != 0 && *(p + 1) == 0); /* only 2-char MCCEs for
+ * now */
newarc(v->nfa, PLAIN, subcolor(v->cm, *p), s, v->mccepend);
okcolors(v->nfa, v->cm);
}
@@ -1526,12 +1606,13 @@ leaders(struct vars *v,
* This is mostly a shortcut for efficient handling of the common case.
*/
static void
-onechr(struct vars *v,
+onechr(struct vars * v,
chr c,
- struct state *lp,
- struct state *rp)
+ struct state * lp,
+ struct state * rp)
{
- if (!(v->cflags&REG_ICASE)) {
+ if (!(v->cflags & REG_ICASE))
+ {
newarc(v->nfa, PLAIN, subcolor(v->cm, c), lp, rp);
return;
}
@@ -1545,42 +1626,50 @@ onechr(struct vars *v,
* This one has to handle the messy cases, like MCCEs and MCCE leaders.
*/
static void
-dovec(struct vars *v,
- struct cvec *cv,
- struct state *lp,
- struct state *rp)
+dovec(struct vars * v,
+ struct cvec * cv,
+ struct state * lp,
+ struct state * rp)
{
- chr ch, from, to;
- celt ce;
- chr *p;
- int i;
- color co;
+ chr ch,
+ from,
+ to;
+ celt ce;
+ chr *p;
+ int i;
+ color co;
struct cvec *leads;
struct arc *a;
- struct arc *pa; /* arc in prototype */
+ struct arc *pa; /* arc in prototype */
struct state *s;
- struct state *ps; /* state in prototype */
+ struct state *ps; /* state in prototype */
/* need a place to store leaders, if any */
- if (nmcces(v) > 0) {
+ if (nmcces(v) > 0)
+ {
assert(v->mcces != NULL);
- if (v->cv2 == NULL || v->cv2->nchrs < v->mcces->nchrs) {
+ if (v->cv2 == NULL || v->cv2->nchrs < v->mcces->nchrs)
+ {
if (v->cv2 != NULL)
free(v->cv2);
v->cv2 = newcvec(v->mcces->nchrs, 0, v->mcces->nmcces);
NOERR();
leads = v->cv2;
- } else
+ }
+ else
leads = clearcvec(v->cv2);
- } else
+ }
+ else
leads = NULL;
/* first, get the ordinary characters out of the way */
- for (p = cv->chrs, i = cv->nchrs; i > 0; p++, i--) {
+ for (p = cv->chrs, i = cv->nchrs; i > 0; p++, i--)
+ {
ch = *p;
if (!ISCELEADER(v, ch))
newarc(v->nfa, PLAIN, subcolor(v->cm, ch), lp, rp);
- else {
+ else
+ {
assert(singleton(v->cm, ch));
assert(leads != NULL);
if (!haschr(leads, ch))
@@ -1589,10 +1678,12 @@ dovec(struct vars *v,
}
/* and the ranges */
- for (p = cv->ranges, i = cv->nranges; i > 0; p += 2, i--) {
+ for (p = cv->ranges, i = cv->nranges; i > 0; p += 2, i--)
+ {
from = *p;
- to = *(p+1);
- while (from <= to && (ce = nextleader(v, from, to)) != NOCELT) {
+ to = *(p + 1);
+ while (from <= to && (ce = nextleader(v, from, to)) != NOCELT)
+ {
if (from < ce)
subrange(v, from, ce - 1, lp, rp);
assert(singleton(v->cm, ce));
@@ -1610,12 +1701,14 @@ dovec(struct vars *v,
/* deal with the MCCE leaders */
NOTE(REG_ULOCALE);
- for (p = leads->chrs, i = leads->nchrs; i > 0; p++, i--) {
+ for (p = leads->chrs, i = leads->nchrs; i > 0; p++, i--)
+ {
co = GETCOLOR(v->cm, *p);
a = findarc(lp, PLAIN, co);
if (a != NULL)
s = a->to;
- else {
+ else
+ {
s = newstate(v->nfa);
NOERR();
newarc(v->nfa, PLAIN, co, lp, s);
@@ -1631,10 +1724,12 @@ dovec(struct vars *v,
}
/* and the MCCEs */
- for (i = 0; i < cv->nmcces; i++) {
+ for (i = 0; i < cv->nmcces; i++)
+ {
p = cv->mcces[i];
assert(singleton(v->cm, *p));
- if (!singleton(v->cm, *p)) {
+ if (!singleton(v->cm, *p))
+ {
ERR(REG_ASSERT);
return;
}
@@ -1643,17 +1738,18 @@ dovec(struct vars *v,
a = findarc(lp, PLAIN, co);
if (a != NULL)
s = a->to;
- else {
+ else
+ {
s = newstate(v->nfa);
NOERR();
newarc(v->nfa, PLAIN, co, lp, s);
NOERR();
}
- assert(*p != 0); /* at least two chars */
+ assert(*p != 0); /* at least two chars */
assert(singleton(v->cm, *p));
ch = *p++;
co = GETCOLOR(v->cm, ch);
- assert(*p == 0); /* and only two, for now */
+ assert(*p == 0); /* and only two, for now */
newarc(v->nfa, PLAIN, co, s, rp);
NOERR();
}
@@ -1662,20 +1758,21 @@ dovec(struct vars *v,
/*
* nextleader - find next MCCE leader within range
*/
-static celt /* NOCELT means none */
-nextleader(struct vars *v,
+static celt /* NOCELT means none */
+nextleader(struct vars * v,
chr from,
chr to)
{
- int i;
- chr *p;
- chr ch;
- celt it = NOCELT;
+ int i;
+ chr *p;
+ chr ch;
+ celt it = NOCELT;
if (v->mcces == NULL)
return it;
- for (i = v->mcces->nchrs, p = v->mcces->chrs; i > 0; i--, p++) {
+ for (i = v->mcces->nchrs, p = v->mcces->chrs; i > 0; i--, p++)
+ {
ch = *p;
if (from <= ch && ch <= to)
if (it == NOCELT || ch < it)
@@ -1694,20 +1791,21 @@ nextleader(struct vars *v,
* should be cleaned up to reduce dependencies on input scanning.
*/
static void
-wordchrs(struct vars *v)
+wordchrs(struct vars * v)
{
struct state *left;
struct state *right;
- if (v->wordchrs != NULL) {
- NEXT(); /* for consistency */
+ if (v->wordchrs != NULL)
+ {
+ NEXT(); /* for consistency */
return;
}
left = newstate(v->nfa);
right = newstate(v->nfa);
NOERR();
- /* fine point: implemented with [::], and lexer will set REG_ULOCALE */
+ /* fine point: implemented with [::], and lexer will set REG_ULOCALE */
lexword(v);
NEXT();
assert(v->savenow != NULL && SEE('['));
@@ -1722,20 +1820,22 @@ wordchrs(struct vars *v)
* subre - allocate a subre
*/
static struct subre *
-subre(struct vars *v,
+subre(struct vars * v,
int op,
int flags,
- struct state *begin,
- struct state *end)
+ struct state * begin,
+ struct state * end)
{
struct subre *ret;
ret = v->treefree;
if (ret != NULL)
v->treefree = ret->left;
- else {
- ret = (struct subre *)MALLOC(sizeof(struct subre));
- if (ret == NULL) {
+ else
+ {
+ ret = (struct subre *) MALLOC(sizeof(struct subre));
+ if (ret == NULL)
+ {
ERR(REG_ESPACE);
return NULL;
}
@@ -1763,8 +1863,8 @@ subre(struct vars *v,
* freesubre - free a subRE subtree
*/
static void
-freesubre(struct vars *v, /* might be NULL */
- struct subre *sr)
+freesubre(struct vars * v, /* might be NULL */
+ struct subre * sr)
{
if (sr == NULL)
return;
@@ -1781,8 +1881,8 @@ freesubre(struct vars *v, /* might be NULL */
* freesrnode - free one node in a subRE subtree
*/
static void
-freesrnode(struct vars *v, /* might be NULL */
- struct subre *sr)
+freesrnode(struct vars * v, /* might be NULL */
+ struct subre * sr)
{
if (sr == NULL)
return;
@@ -1791,10 +1891,12 @@ freesrnode(struct vars *v, /* might be NULL */
freecnfa(&sr->cnfa);
sr->flags = 0;
- if (v != NULL) {
+ if (v != NULL)
+ {
sr->left = v->treefree;
v->treefree = sr;
- } else
+ }
+ else
FREE(sr);
}
@@ -1802,8 +1904,8 @@ freesrnode(struct vars *v, /* might be NULL */
* optst - optimize a subRE subtree
*/
static void
-optst(struct vars *v,
- struct subre *t)
+optst(struct vars * v,
+ struct subre * t)
{
if (t == NULL)
return;
@@ -1818,16 +1920,16 @@ optst(struct vars *v,
/*
* numst - number tree nodes (assigning retry indexes)
*/
-static int /* next number */
-numst(struct subre *t,
- int start) /* starting point for subtree numbers */
+static int /* next number */
+numst(struct subre * t,
+ int start) /* starting point for subtree numbers */
{
- int i;
+ int i;
assert(t != NULL);
i = start;
- t->retry = (short)i++;
+ t->retry = (short) i++;
if (t->left != NULL)
i = numst(t->left, i);
if (t->right != NULL)
@@ -1839,7 +1941,7 @@ numst(struct subre *t,
* markst - mark tree nodes as INUSE
*/
static void
-markst(struct subre *t)
+markst(struct subre * t)
{
assert(t != NULL);
@@ -1854,34 +1956,35 @@ markst(struct subre *t)
* cleanst - free any tree nodes not marked INUSE
*/
static void
-cleanst(struct vars *v)
+cleanst(struct vars * v)
{
struct subre *t;
struct subre *next;
- for (t = v->treechain; t != NULL; t = next) {
+ for (t = v->treechain; t != NULL; t = next)
+ {
next = t->chain;
- if (!(t->flags&INUSE))
+ if (!(t->flags & INUSE))
FREE(t);
}
v->treechain = NULL;
- v->treefree = NULL; /* just on general principles */
+ v->treefree = NULL; /* just on general principles */
}
/*
* nfatree - turn a subRE subtree into a tree of compacted NFAs
*/
-static long /* optimize results from top node */
-nfatree(struct vars *v,
- struct subre *t,
+static long /* optimize results from top node */
+nfatree(struct vars * v,
+ struct subre * t,
FILE *f) /* for debug output */
{
assert(t != NULL && t->begin != NULL);
if (t->left != NULL)
- (DISCARD)nfatree(v, t->left, f);
+ (DISCARD) nfatree(v, t->left, f);
if (t->right != NULL)
- (DISCARD)nfatree(v, t->right, f);
+ (DISCARD) nfatree(v, t->right, f);
return nfanode(v, t, f);
}
@@ -1889,29 +1992,30 @@ nfatree(struct vars *v,
/*
* nfanode - do one NFA for nfatree
*/
-static long /* optimize results */
-nfanode(struct vars *v,
- struct subre *t,
+static long /* optimize results */
+nfanode(struct vars * v,
+ struct subre * t,
FILE *f) /* for debug output */
{
struct nfa *nfa;
- long ret = 0;
+ long ret = 0;
assert(t->begin != NULL);
#ifdef REG_DEBUG
if (f != NULL)
{
- char idbuf[50];
+ char idbuf[50];
fprintf(f, "\n\n\n========= TREE NODE %s ==========\n",
- stid(t, idbuf, sizeof(idbuf)));
+ stid(t, idbuf, sizeof(idbuf)));
}
#endif
nfa = newnfa(v, v->cm, v->nfa);
NOERRZ();
dupnfa(nfa, t->begin, t->end, nfa->init, nfa->final);
- if (!ISERR()) {
+ if (!ISERR())
+ {
specialcolors(nfa);
ret = optimize(nfa, f);
}
@@ -1925,25 +2029,29 @@ nfanode(struct vars *v,
/*
* newlacon - allocate a lookahead-constraint subRE
*/
-static int /* lacon number */
-newlacon(struct vars *v,
- struct state *begin,
- struct state *end,
+static int /* lacon number */
+newlacon(struct vars * v,
+ struct state * begin,
+ struct state * end,
int pos)
{
- int n;
+ int n;
struct subre *sub;
- if (v->nlacons == 0) {
- v->lacons = (struct subre *)MALLOC(2 * sizeof(struct subre));
- n = 1; /* skip 0th */
+ if (v->nlacons == 0)
+ {
+ v->lacons = (struct subre *) MALLOC(2 * sizeof(struct subre));
+ n = 1; /* skip 0th */
v->nlacons = 2;
- } else {
- v->lacons = (struct subre *)REALLOC(v->lacons,
- (v->nlacons+1)*sizeof(struct subre));
+ }
+ else
+ {
+ v->lacons = (struct subre *) REALLOC(v->lacons,
+ (v->nlacons + 1) * sizeof(struct subre));
n = v->nlacons++;
}
- if (v->lacons == NULL) {
+ if (v->lacons == NULL)
+ {
ERR(REG_ESPACE);
return 0;
}
@@ -1959,11 +2067,11 @@ newlacon(struct vars *v,
* freelacons - free lookahead-constraint subRE vector
*/
static void
-freelacons(struct subre *subs,
+freelacons(struct subre * subs,
int n)
{
struct subre *sub;
- int i;
+ int i;
assert(n > 0);
for (sub = subs + 1, i = n - 1; i > 0; sub++, i--) /* no 0th */
@@ -1983,14 +2091,14 @@ rfree(regex_t *re)
if (re == NULL || re->re_magic != REMAGIC)
return;
- re->re_magic = 0; /* invalidate RE */
- g = (struct guts *)re->re_guts;
+ re->re_magic = 0; /* invalidate RE */
+ g = (struct guts *) re->re_guts;
re->re_guts = NULL;
re->re_fns = NULL;
g->magic = 0;
freecm(&g->cmap);
if (g->tree != NULL)
- freesubre((struct vars *)NULL, g->tree);
+ freesubre((struct vars *) NULL, g->tree);
if (g->lacons != NULL)
freelacons(g->lacons, g->nlacons);
if (!NULLCNFA(g->search))
@@ -2008,30 +2116,33 @@ dump(regex_t *re,
FILE *f)
{
struct guts *g;
- int i;
+ int i;
if (re->re_magic != REMAGIC)
fprintf(f, "bad magic number (0x%x not 0x%x)\n", re->re_magic,
- REMAGIC);
- if (re->re_guts == NULL) {
+ REMAGIC);
+ if (re->re_guts == NULL)
+ {
fprintf(f, "NULL guts!!!\n");
return;
}
- g = (struct guts *)re->re_guts;
+ g = (struct guts *) re->re_guts;
if (g->magic != GUTSMAGIC)
fprintf(f, "bad guts magic number (0x%x not 0x%x)\n", g->magic,
- GUTSMAGIC);
+ GUTSMAGIC);
fprintf(f, "\n\n\n========= DUMP ==========\n");
- fprintf(f, "nsub %d, info 0%lo, csize %d, ntree %d\n",
- re->re_nsub, re->re_info, re->re_csize, g->ntree);
+ fprintf(f, "nsub %d, info 0%lo, csize %d, ntree %d\n",
+ re->re_nsub, re->re_info, re->re_csize, g->ntree);
dumpcolors(&g->cmap, f);
- if (!NULLCNFA(g->search)) {
+ if (!NULLCNFA(g->search))
+ {
printf("\nsearch:\n");
dumpcnfa(&g->search, f);
}
- for (i = 1; i < g->nlacons; i++) {
+ for (i = 1; i < g->nlacons; i++)
+ {
fprintf(f, "\nla%d (%s):\n", i,
(g->lacons[i].subno) ? "positive" : "negative");
dumpcnfa(&g->lacons[i].cnfa, f);
@@ -2044,7 +2155,7 @@ dump(regex_t *re,
* dumpst - dump a subRE tree
*/
static void
-dumpst(struct subre *t,
+dumpst(struct subre * t,
FILE *f,
int nfapresent) /* is the original NFA still around? */
{
@@ -2059,40 +2170,42 @@ dumpst(struct subre *t,
* stdump - recursive guts of dumpst
*/
static void
-stdump(struct subre *t,
+stdump(struct subre * t,
FILE *f,
int nfapresent) /* is the original NFA still around? */
{
- char idbuf[50];
+ char idbuf[50];
fprintf(f, "%s. `%c'", stid(t, idbuf, sizeof(idbuf)), t->op);
- if (t->flags&LONGER)
+ if (t->flags & LONGER)
fprintf(f, " longest");
- if (t->flags&SHORTER)
+ if (t->flags & SHORTER)
fprintf(f, " shortest");
- if (t->flags&MIXED)
+ if (t->flags & MIXED)
fprintf(f, " hasmixed");
- if (t->flags&CAP)
+ if (t->flags & CAP)
fprintf(f, " hascapture");
- if (t->flags&BACKR)
+ if (t->flags & BACKR)
fprintf(f, " hasbackref");
- if (!(t->flags&INUSE))
+ if (!(t->flags & INUSE))
fprintf(f, " UNUSED");
if (t->subno != 0)
fprintf(f, " (#%d)", t->subno);
- if (t->min != 1 || t->max != 1) {
+ if (t->min != 1 || t->max != 1)
+ {
fprintf(f, " {%d,", t->min);
if (t->max != INFINITY)
fprintf(f, "%d", t->max);
fprintf(f, "}");
}
if (nfapresent)
- fprintf(f, " %ld-%ld", (long)t->begin->no, (long)t->end->no);
+ fprintf(f, " %ld-%ld", (long) t->begin->no, (long) t->end->no);
if (t->left != NULL)
fprintf(f, " L:%s", stid(t->left, idbuf, sizeof(idbuf)));
if (t->right != NULL)
fprintf(f, " R:%s", stid(t->right, idbuf, sizeof(idbuf)));
- if (!NULLCNFA(t->cnfa)) {
+ if (!NULLCNFA(t->cnfa))
+ {
fprintf(f, "\n");
dumpcnfa(&t->cnfa, f);
fprintf(f, "\n");
@@ -2106,22 +2219,21 @@ stdump(struct subre *t,
/*
* stid - identify a subtree node for dumping
*/
-static char * /* points to buf or constant string */
-stid(struct subre *t,
+static char * /* points to buf or constant string */
+stid(struct subre * t,
char *buf,
size_t bufsize)
{
/* big enough for hex int or decimal t->retry? */
- if (bufsize < sizeof(int)*2 + 3 || bufsize < sizeof(t->retry)*3 + 1)
+ if (bufsize < sizeof(int) * 2 + 3 || bufsize < sizeof(t->retry) * 3 + 1)
return "unable";
if (t->retry != 0)
sprintf(buf, "%d", t->retry);
else
- sprintf(buf, "0x%x", (int)t); /* may lose bits, that's okay */
+ sprintf(buf, "0x%x", (int) t); /* may lose bits, that's okay */
return buf;
}
-
-#endif /* REG_DEBUG */
+#endif /* REG_DEBUG */
#include "regc_lex.c"
diff --git a/src/backend/regex/rege_dfa.c b/src/backend/regex/rege_dfa.c
index 3bdfc2ab182..6004462c934 100644
--- a/src/backend/regex/rege_dfa.c
+++ b/src/backend/regex/rege_dfa.c
@@ -2,21 +2,21 @@
* DFA routines
* This file is #included by regexec.c.
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
- *
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ *
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
- * thanks all of them.
- *
+ * thanks all of them.
+ *
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
- *
+ *
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
- *
+ *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -28,27 +28,27 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/rege_dfa.c,v 1.1 2003/02/05 17:41:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/rege_dfa.c,v 1.2 2003/08/04 00:43:21 momjian Exp $
*
*/
/*
* longest - longest-preferred matching engine
*/
-static chr * /* endpoint, or NULL */
-longest(struct vars *v, /* used only for debug and exec flags */
- struct dfa *d,
- chr *start, /* where the match should start */
- chr *stop, /* match must end at or before here */
+static chr * /* endpoint, or NULL */
+longest(struct vars * v, /* used only for debug and exec flags */
+ struct dfa * d,
+ chr * start, /* where the match should start */
+ chr * stop, /* match must end at or before here */
int *hitstopp) /* record whether hit v->stop, if non-NULL */
{
- chr *cp;
- chr *realstop = (stop == v->stop) ? stop : stop + 1;
- color co;
+ chr *cp;
+ chr *realstop = (stop == v->stop) ? stop : stop + 1;
+ color co;
struct sset *css;
struct sset *ss;
- chr *post;
- int i;
+ chr *post;
+ int i;
struct colormap *cm = d->cm;
/* initialize */
@@ -59,12 +59,15 @@ longest(struct vars *v, /* used only for debug and exec flags */
/* startup */
FDEBUG(("+++ startup +++\n"));
- if (cp == v->start) {
- co = d->cnfa->bos[(v->eflags&REG_NOTBOL) ? 0 : 1];
- FDEBUG(("color %ld\n", (long)co));
- } else {
+ if (cp == v->start)
+ {
+ co = d->cnfa->bos[(v->eflags & REG_NOTBOL) ? 0 : 1];
+ FDEBUG(("color %ld\n", (long) co));
+ }
+ else
+ {
co = GETCOLOR(cm, *(cp - 1));
- FDEBUG(("char %c, color %ld\n", (char)*(cp-1), (long)co));
+ FDEBUG(("char %c, color %ld\n", (char) *(cp - 1), (long) co));
}
css = miss(v, d, css, co, cp, start);
if (css == NULL)
@@ -72,29 +75,33 @@ longest(struct vars *v, /* used only for debug and exec flags */
css->lastseen = cp;
/* main loop */
- if (v->eflags&REG_FTRACE)
- while (cp < realstop) {
+ if (v->eflags & REG_FTRACE)
+ while (cp < realstop)
+ {
FDEBUG(("+++ at c%d +++\n", css - d->ssets));
co = GETCOLOR(cm, *cp);
- FDEBUG(("char %c, color %ld\n", (char)*cp, (long)co));
+ FDEBUG(("char %c, color %ld\n", (char) *cp, (long) co));
ss = css->outs[co];
- if (ss == NULL) {
- ss = miss(v, d, css, co, cp+1, start);
+ if (ss == NULL)
+ {
+ ss = miss(v, d, css, co, cp + 1, start);
if (ss == NULL)
- break; /* NOTE BREAK OUT */
+ break; /* NOTE BREAK OUT */
}
cp++;
ss->lastseen = cp;
css = ss;
}
else
- while (cp < realstop) {
+ while (cp < realstop)
+ {
co = GETCOLOR(cm, *cp);
ss = css->outs[co];
- if (ss == NULL) {
- ss = miss(v, d, css, co, cp+1, start);
+ if (ss == NULL)
+ {
+ ss = miss(v, d, css, co, cp + 1, start);
if (ss == NULL)
- break; /* NOTE BREAK OUT */
+ break; /* NOTE BREAK OUT */
}
cp++;
ss->lastseen = cp;
@@ -103,14 +110,15 @@ longest(struct vars *v, /* used only for debug and exec flags */
/* shutdown */
FDEBUG(("+++ shutdown at c%d +++\n", css - d->ssets));
- if (cp == v->stop && stop == v->stop) {
+ if (cp == v->stop && stop == v->stop)
+ {
if (hitstopp != NULL)
*hitstopp = 1;
- co = d->cnfa->eos[(v->eflags&REG_NOTEOL) ? 0 : 1];
- FDEBUG(("color %ld\n", (long)co));
+ co = d->cnfa->eos[(v->eflags & REG_NOTEOL) ? 0 : 1];
+ FDEBUG(("color %ld\n", (long) co));
ss = miss(v, d, css, co, cp, start);
/* special case: match ended at eol? */
- if (ss != NULL && (ss->flags&POSTSTATE))
+ if (ss != NULL && (ss->flags & POSTSTATE))
return cp;
else if (ss != NULL)
ss->lastseen = cp; /* to be tidy */
@@ -119,10 +127,10 @@ longest(struct vars *v, /* used only for debug and exec flags */
/* find last match, if any */
post = d->lastpost;
for (ss = d->ssets, i = d->nssused; i > 0; ss++, i--)
- if ((ss->flags&POSTSTATE) && post != ss->lastseen &&
- (post == NULL || post < ss->lastseen))
+ if ((ss->flags & POSTSTATE) && post != ss->lastseen &&
+ (post == NULL || post < ss->lastseen))
post = ss->lastseen;
- if (post != NULL) /* found one */
+ if (post != NULL) /* found one */
return post - 1;
return NULL;
@@ -131,19 +139,20 @@ longest(struct vars *v, /* used only for debug and exec flags */
/*
* shortest - shortest-preferred matching engine
*/
-static chr * /* endpoint, or NULL */
-shortest(struct vars *v,
- struct dfa *d,
- chr *start, /* where the match should start */
- chr *min, /* match must end at or after here */
- chr *max, /* match must end at or before here */
- chr **coldp, /* store coldstart pointer here, if nonNULL */
+static chr * /* endpoint, or NULL */
+shortest(struct vars * v,
+ struct dfa * d,
+ chr * start, /* where the match should start */
+ chr * min, /* match must end at or after here */
+ chr * max, /* match must end at or before here */
+ chr ** coldp, /* store coldstart pointer here, if
+ * nonNULL */
int *hitstopp) /* record whether hit v->stop, if non-NULL */
{
- chr *cp;
- chr *realmin = (min == v->stop) ? min : min + 1;
- chr *realmax = (max == v->stop) ? max : max + 1;
- color co;
+ chr *cp;
+ chr *realmin = (min == v->stop) ? min : min + 1;
+ chr *realmax = (max == v->stop) ? max : max + 1;
+ color co;
struct sset *css;
struct sset *ss;
struct colormap *cm = d->cm;
@@ -156,12 +165,15 @@ shortest(struct vars *v,
/* startup */
FDEBUG(("--- startup ---\n"));
- if (cp == v->start) {
- co = d->cnfa->bos[(v->eflags&REG_NOTBOL) ? 0 : 1];
- FDEBUG(("color %ld\n", (long)co));
- } else {
+ if (cp == v->start)
+ {
+ co = d->cnfa->bos[(v->eflags & REG_NOTBOL) ? 0 : 1];
+ FDEBUG(("color %ld\n", (long) co));
+ }
+ else
+ {
co = GETCOLOR(cm, *(cp - 1));
- FDEBUG(("char %c, color %ld\n", (char)*(cp-1), (long)co));
+ FDEBUG(("char %c, color %ld\n", (char) *(cp - 1), (long) co));
}
css = miss(v, d, css, co, cp, start);
if (css == NULL)
@@ -170,58 +182,66 @@ shortest(struct vars *v,
ss = css;
/* main loop */
- if (v->eflags&REG_FTRACE)
- while (cp < realmax) {
+ if (v->eflags & REG_FTRACE)
+ while (cp < realmax)
+ {
FDEBUG(("--- at c%d ---\n", css - d->ssets));
co = GETCOLOR(cm, *cp);
- FDEBUG(("char %c, color %ld\n", (char)*cp, (long)co));
+ FDEBUG(("char %c, color %ld\n", (char) *cp, (long) co));
ss = css->outs[co];
- if (ss == NULL) {
- ss = miss(v, d, css, co, cp+1, start);
+ if (ss == NULL)
+ {
+ ss = miss(v, d, css, co, cp + 1, start);
if (ss == NULL)
- break; /* NOTE BREAK OUT */
+ break; /* NOTE BREAK OUT */
}
cp++;
ss->lastseen = cp;
css = ss;
- if ((ss->flags&POSTSTATE) && cp >= realmin)
- break; /* NOTE BREAK OUT */
+ if ((ss->flags & POSTSTATE) && cp >= realmin)
+ break; /* NOTE BREAK OUT */
}
else
- while (cp < realmax) {
+ while (cp < realmax)
+ {
co = GETCOLOR(cm, *cp);
ss = css->outs[co];
- if (ss == NULL) {
- ss = miss(v, d, css, co, cp+1, start);
+ if (ss == NULL)
+ {
+ ss = miss(v, d, css, co, cp + 1, start);
if (ss == NULL)
- break; /* NOTE BREAK OUT */
+ break; /* NOTE BREAK OUT */
}
cp++;
ss->lastseen = cp;
css = ss;
- if ((ss->flags&POSTSTATE) && cp >= realmin)
- break; /* NOTE BREAK OUT */
+ if ((ss->flags & POSTSTATE) && cp >= realmin)
+ break; /* NOTE BREAK OUT */
}
if (ss == NULL)
return NULL;
- if (coldp != NULL) /* report last no-progress state set, if any */
+ if (coldp != NULL) /* report last no-progress state set, if
+ * any */
*coldp = lastcold(v, d);
- if ((ss->flags&POSTSTATE) && cp > min) {
+ if ((ss->flags & POSTSTATE) && cp > min)
+ {
assert(cp >= realmin);
cp--;
- } else if (cp == v->stop && max == v->stop) {
- co = d->cnfa->eos[(v->eflags&REG_NOTEOL) ? 0 : 1];
- FDEBUG(("color %ld\n", (long)co));
+ }
+ else if (cp == v->stop && max == v->stop)
+ {
+ co = d->cnfa->eos[(v->eflags & REG_NOTEOL) ? 0 : 1];
+ FDEBUG(("color %ld\n", (long) co));
ss = miss(v, d, css, co, cp, start);
/* match might have ended at eol */
- if ((ss == NULL || !(ss->flags&POSTSTATE)) && hitstopp != NULL)
+ if ((ss == NULL || !(ss->flags & POSTSTATE)) && hitstopp != NULL)
*hitstopp = 1;
}
- if (ss == NULL || !(ss->flags&POSTSTATE))
+ if (ss == NULL || !(ss->flags & POSTSTATE))
return NULL;
return cp;
@@ -230,19 +250,19 @@ shortest(struct vars *v,
/*
* lastcold - determine last point at which no progress had been made
*/
-static chr * /* endpoint, or NULL */
-lastcold(struct vars *v,
- struct dfa *d)
+static chr * /* endpoint, or NULL */
+lastcold(struct vars * v,
+ struct dfa * d)
{
struct sset *ss;
- chr *nopr;
- int i;
+ chr *nopr;
+ int i;
nopr = d->lastnopr;
if (nopr == NULL)
nopr = v->start;
for (ss = d->ssets, i = d->nssused; i > 0; ss++, i--)
- if ((ss->flags&NOPROGRESS) && nopr < ss->lastseen)
+ if ((ss->flags & NOPROGRESS) && nopr < ss->lastseen)
nopr = ss->lastseen;
return nopr;
}
@@ -251,24 +271,27 @@ lastcold(struct vars *v,
* newdfa - set up a fresh DFA
*/
static struct dfa *
-newdfa(struct vars *v,
- struct cnfa *cnfa,
- struct colormap *cm,
- struct smalldfa *small) /* preallocated space, may be NULL */
+newdfa(struct vars * v,
+ struct cnfa * cnfa,
+ struct colormap * cm,
+ struct smalldfa * small) /* preallocated space, may be NULL */
{
struct dfa *d;
- size_t nss = cnfa->nstates * 2;
- int wordsper = (cnfa->nstates + UBITS - 1) / UBITS;
+ size_t nss = cnfa->nstates * 2;
+ int wordsper = (cnfa->nstates + UBITS - 1) / UBITS;
struct smalldfa *smallwas = small;
assert(cnfa != NULL && cnfa->nstates != 0);
- if (nss <= FEWSTATES && cnfa->ncolors <= FEWCOLORS) {
+ if (nss <= FEWSTATES && cnfa->ncolors <= FEWCOLORS)
+ {
assert(wordsper == 1);
- if (small == NULL) {
- small = (struct smalldfa *)MALLOC(
- sizeof(struct smalldfa));
- if (small == NULL) {
+ if (small == NULL)
+ {
+ small = (struct smalldfa *) MALLOC(
+ sizeof(struct smalldfa));
+ if (small == NULL)
+ {
ERR(REG_ESPACE);
return NULL;
}
@@ -280,32 +303,36 @@ newdfa(struct vars *v,
d->outsarea = small->outsarea;
d->incarea = small->incarea;
d->cptsmalloced = 0;
- d->mallocarea = (smallwas == NULL) ? (char *)small : NULL;
- } else {
- d = (struct dfa *)MALLOC(sizeof(struct dfa));
- if (d == NULL) {
+ d->mallocarea = (smallwas == NULL) ? (char *) small : NULL;
+ }
+ else
+ {
+ d = (struct dfa *) MALLOC(sizeof(struct dfa));
+ if (d == NULL)
+ {
ERR(REG_ESPACE);
return NULL;
}
- d->ssets = (struct sset *)MALLOC(nss * sizeof(struct sset));
- d->statesarea = (unsigned *)MALLOC((nss+WORK) * wordsper *
- sizeof(unsigned));
+ d->ssets = (struct sset *) MALLOC(nss * sizeof(struct sset));
+ d->statesarea = (unsigned *) MALLOC((nss + WORK) * wordsper *
+ sizeof(unsigned));
d->work = &d->statesarea[nss * wordsper];
- d->outsarea = (struct sset **)MALLOC(nss * cnfa->ncolors *
- sizeof(struct sset *));
- d->incarea = (struct arcp *)MALLOC(nss * cnfa->ncolors *
- sizeof(struct arcp));
+ d->outsarea = (struct sset **) MALLOC(nss * cnfa->ncolors *
+ sizeof(struct sset *));
+ d->incarea = (struct arcp *) MALLOC(nss * cnfa->ncolors *
+ sizeof(struct arcp));
d->cptsmalloced = 1;
- d->mallocarea = (char *)d;
+ d->mallocarea = (char *) d;
if (d->ssets == NULL || d->statesarea == NULL ||
- d->outsarea == NULL || d->incarea == NULL) {
+ d->outsarea == NULL || d->incarea == NULL)
+ {
freedfa(d);
ERR(REG_ESPACE);
return NULL;
}
}
- d->nssets = (v->eflags&REG_SMALL) ? 7 : nss;
+ d->nssets = (v->eflags & REG_SMALL) ? 7 : nss;
d->nssused = 0;
d->nstates = cnfa->nstates;
d->ncolors = cnfa->ncolors;
@@ -325,9 +352,10 @@ newdfa(struct vars *v,
* freedfa - free a DFA
*/
static void
-freedfa(struct dfa *d)
+freedfa(struct dfa * d)
{
- if (d->cptsmalloced) {
+ if (d->cptsmalloced)
+ {
if (d->ssets != NULL)
FREE(d->ssets);
if (d->statesarea != NULL)
@@ -351,8 +379,8 @@ static unsigned
hash(unsigned *uv,
int n)
{
- int i;
- unsigned h;
+ int i;
+ unsigned h;
h = 0;
for (i = 0; i < n; i++)
@@ -364,24 +392,25 @@ hash(unsigned *uv,
* initialize - hand-craft a cache entry for startup, otherwise get ready
*/
static struct sset *
-initialize(struct vars *v, /* used only for debug flags */
- struct dfa *d,
- chr *start)
+initialize(struct vars * v, /* used only for debug flags */
+ struct dfa * d,
+ chr * start)
{
struct sset *ss;
- int i;
+ int i;
/* is previous one still there? */
- if (d->nssused > 0 && (d->ssets[0].flags&STARTER))
+ if (d->nssused > 0 && (d->ssets[0].flags & STARTER))
ss = &d->ssets[0];
- else { /* no, must (re)build it */
+ else
+ { /* no, must (re)build it */
ss = getvacant(v, d, start, start);
for (i = 0; i < d->wordsper; i++)
ss->states[i] = 0;
BSET(ss->states, d->cnfa->pre);
ss->hash = HASH(ss->states, d->wordsper);
assert(d->cnfa->pre != d->cnfa->post);
- ss->flags = STARTER|LOCKED|NOPROGRESS;
+ ss->flags = STARTER | LOCKED | NOPROGRESS;
/* lastseen dealt with below */
}
@@ -396,27 +425,28 @@ initialize(struct vars *v, /* used only for debug flags */
/*
* miss - handle a cache miss
*/
-static struct sset * /* NULL if goes to empty set */
-miss(struct vars *v, /* used only for debug flags */
- struct dfa *d,
- struct sset *css,
+static struct sset * /* NULL if goes to empty set */
+miss(struct vars * v, /* used only for debug flags */
+ struct dfa * d,
+ struct sset * css,
pcolor co,
- chr *cp, /* next chr */
- chr *start) /* where the attempt got started */
+ chr * cp, /* next chr */
+ chr * start) /* where the attempt got started */
{
struct cnfa *cnfa = d->cnfa;
- int i;
- unsigned h;
+ int i;
+ unsigned h;
struct carc *ca;
struct sset *p;
- int ispost;
- int noprogress;
- int gotstate;
- int dolacons;
- int sawlacons;
+ int ispost;
+ int noprogress;
+ int gotstate;
+ int dolacons;
+ int sawlacons;
/* for convenience, we can be called even if it might not be a miss */
- if (css->outs[co] != NULL) {
+ if (css->outs[co] != NULL)
+ {
FDEBUG(("hit\n"));
return css->outs[co];
}
@@ -430,8 +460,9 @@ miss(struct vars *v, /* used only for debug flags */
gotstate = 0;
for (i = 0; i < d->nstates; i++)
if (ISBSET(css->states, i))
- for (ca = cnfa->states[i]+1; ca->co != COLORLESS; ca++)
- if (ca->co == co) {
+ for (ca = cnfa->states[i] + 1; ca->co != COLORLESS; ca++)
+ if (ca->co == co)
+ {
BSET(d->work, ca->to);
gotstate = 1;
if (ca->to == cnfa->post)
@@ -440,21 +471,23 @@ miss(struct vars *v, /* used only for debug flags */
noprogress = 0;
FDEBUG(("%d -> %d\n", i, ca->to));
}
- dolacons = (gotstate) ? (cnfa->flags&HASLACONS) : 0;
+ dolacons = (gotstate) ? (cnfa->flags & HASLACONS) : 0;
sawlacons = 0;
- while (dolacons) { /* transitive closure */
+ while (dolacons)
+ { /* transitive closure */
dolacons = 0;
for (i = 0; i < d->nstates; i++)
if (ISBSET(d->work, i))
- for (ca = cnfa->states[i]+1; ca->co != COLORLESS;
- ca++) {
+ for (ca = cnfa->states[i] + 1; ca->co != COLORLESS;
+ ca++)
+ {
if (ca->co <= cnfa->ncolors)
- continue; /* NOTE CONTINUE */
+ continue; /* NOTE CONTINUE */
sawlacons = 1;
if (ISBSET(d->work, ca->to))
- continue; /* NOTE CONTINUE */
+ continue; /* NOTE CONTINUE */
if (!lacon(v, cnfa, cp, ca->co))
- continue; /* NOTE CONTINUE */
+ continue; /* NOTE CONTINUE */
BSET(d->work, ca->to);
dolacons = 1;
if (ca->to == cnfa->post)
@@ -470,11 +503,13 @@ miss(struct vars *v, /* used only for debug flags */
/* next, is that in the cache? */
for (p = d->ssets, i = d->nssused; i > 0; p++, i--)
- if (HIT(h, d->work, p, d->wordsper)) {
+ if (HIT(h, d->work, p, d->wordsper))
+ {
FDEBUG(("cached c%d\n", p - d->ssets));
- break; /* NOTE BREAK OUT */
+ break; /* NOTE BREAK OUT */
}
- if (i == 0) { /* nope, need a new cache entry */
+ if (i == 0)
+ { /* nope, need a new cache entry */
p = getvacant(v, d, cp, start);
assert(p != css);
for (i = 0; i < d->wordsper; i++)
@@ -486,12 +521,13 @@ miss(struct vars *v, /* used only for debug flags */
/* lastseen to be dealt with by caller */
}
- if (!sawlacons) { /* lookahead conds. always cache miss */
+ if (!sawlacons)
+ { /* lookahead conds. always cache miss */
FDEBUG(("c%d[%d]->c%d\n", css - d->ssets, co, p - d->ssets));
css->outs[co] = p;
css->inchain[co] = p->ins;
p->ins.ss = css;
- p->ins.co = (color)co;
+ p->ins.co = (color) co;
}
return p;
}
@@ -499,28 +535,29 @@ miss(struct vars *v, /* used only for debug flags */
/*
* lacon - lookahead-constraint checker for miss()
*/
-static int /* predicate: constraint satisfied? */
-lacon(struct vars *v,
- struct cnfa *pcnfa, /* parent cnfa */
- chr *cp,
- pcolor co) /* "color" of the lookahead constraint */
+static int /* predicate: constraint satisfied? */
+lacon(struct vars * v,
+ struct cnfa * pcnfa, /* parent cnfa */
+ chr * cp,
+ pcolor co) /* "color" of the lookahead constraint */
{
- int n;
+ int n;
struct subre *sub;
struct dfa *d;
struct smalldfa sd;
- chr *end;
+ chr *end;
n = co - pcnfa->ncolors;
assert(n < v->g->nlacons && v->g->lacons != NULL);
FDEBUG(("=== testing lacon %d\n", n));
sub = &v->g->lacons[n];
d = newdfa(v, &sub->cnfa, &v->g->cmap, &sd);
- if (d == NULL) {
+ if (d == NULL)
+ {
ERR(REG_ESPACE);
return 0;
}
- end = longest(v, d, cp, v->stop, (int *)NULL);
+ end = longest(v, d, cp, v->stop, (int *) NULL);
freedfa(d);
FDEBUG(("=== lacon %d match %d\n", n, (end != NULL)));
return (sub->subno) ? (end != NULL) : (end == NULL);
@@ -532,46 +569,49 @@ lacon(struct vars *v,
* clear the innards of the state set -- that's up to the caller.
*/
static struct sset *
-getvacant(struct vars *v, /* used only for debug flags */
- struct dfa *d,
- chr *cp,
- chr *start)
+getvacant(struct vars * v, /* used only for debug flags */
+ struct dfa * d,
+ chr * cp,
+ chr * start)
{
- int i;
+ int i;
struct sset *ss;
struct sset *p;
struct arcp ap;
struct arcp lastap;
- color co;
+ color co;
ss = pickss(v, d, cp, start);
- assert(!(ss->flags&LOCKED));
+ assert(!(ss->flags & LOCKED));
/* clear out its inarcs, including self-referential ones */
ap = ss->ins;
- while ((p = ap.ss) != NULL) {
+ while ((p = ap.ss) != NULL)
+ {
co = ap.co;
- FDEBUG(("zapping c%d's %ld outarc\n", p - d->ssets, (long)co));
+ FDEBUG(("zapping c%d's %ld outarc\n", p - d->ssets, (long) co));
p->outs[co] = NULL;
ap = p->inchain[co];
- p->inchain[co].ss = NULL; /* paranoia */
+ p->inchain[co].ss = NULL; /* paranoia */
}
ss->ins.ss = NULL;
/* take it off the inarc chains of the ssets reached by its outarcs */
- for (i = 0; i < d->ncolors; i++) {
+ for (i = 0; i < d->ncolors; i++)
+ {
p = ss->outs[i];
assert(p != ss); /* not self-referential */
if (p == NULL)
- continue; /* NOTE CONTINUE */
+ continue; /* NOTE CONTINUE */
FDEBUG(("del outarc %d from c%d's in chn\n", i, p - d->ssets));
if (p->ins.ss == ss && p->ins.co == i)
p->ins = ss->inchain[i];
- else {
+ else
+ {
assert(p->ins.ss != NULL);
for (ap = p->ins; ap.ss != NULL &&
- !(ap.ss == ss && ap.co == i);
- ap = ap.ss->inchain[ap.co])
+ !(ap.ss == ss && ap.co == i);
+ ap = ap.ss->inchain[ap.co])
lastap = ap;
assert(ap.ss != NULL);
lastap.ss->inchain[lastap.co] = ss->inchain[i];
@@ -581,13 +621,13 @@ getvacant(struct vars *v, /* used only for debug flags */
}
/* if ss was a success state, may need to remember location */
- if ((ss->flags&POSTSTATE) && ss->lastseen != d->lastpost &&
- (d->lastpost == NULL || d->lastpost < ss->lastseen))
+ if ((ss->flags & POSTSTATE) && ss->lastseen != d->lastpost &&
+ (d->lastpost == NULL || d->lastpost < ss->lastseen))
d->lastpost = ss->lastseen;
/* likewise for a no-progress state */
- if ((ss->flags&NOPROGRESS) && ss->lastseen != d->lastnopr &&
- (d->lastnopr == NULL || d->lastnopr < ss->lastseen))
+ if ((ss->flags & NOPROGRESS) && ss->lastseen != d->lastnopr &&
+ (d->lastnopr == NULL || d->lastnopr < ss->lastseen))
d->lastnopr = ss->lastseen;
return ss;
@@ -597,18 +637,19 @@ getvacant(struct vars *v, /* used only for debug flags */
* pickss - pick the next stateset to be used
*/
static struct sset *
-pickss(struct vars *v, /* used only for debug flags */
- struct dfa *d,
- chr *cp,
- chr *start)
+pickss(struct vars * v, /* used only for debug flags */
+ struct dfa * d,
+ chr * cp,
+ chr * start)
{
- int i;
+ int i;
struct sset *ss;
struct sset *end;
- chr *ancient;
+ chr *ancient;
/* shortcut for cases where cache isn't full */
- if (d->nssused < d->nssets) {
+ if (d->nssused < d->nssets)
+ {
i = d->nssused;
d->nssused++;
ss = &d->ssets[i];
@@ -620,7 +661,8 @@ pickss(struct vars *v, /* used only for debug flags */
ss->ins.co = WHITE; /* give it some value */
ss->outs = &d->outsarea[i * d->ncolors];
ss->inchain = &d->incarea[i * d->ncolors];
- for (i = 0; i < d->ncolors; i++) {
+ for (i = 0; i < d->ncolors; i++)
+ {
ss->outs[i] = NULL;
ss->inchain[i].ss = NULL;
}
@@ -628,20 +670,22 @@ pickss(struct vars *v, /* used only for debug flags */
}
/* look for oldest, or old enough anyway */
- if (cp - start > d->nssets*2/3) /* oldest 33% are expendable */
- ancient = cp - d->nssets*2/3;
+ if (cp - start > d->nssets * 2 / 3) /* oldest 33% are expendable */
+ ancient = cp - d->nssets * 2 / 3;
else
ancient = start;
for (ss = d->search, end = &d->ssets[d->nssets]; ss < end; ss++)
if ((ss->lastseen == NULL || ss->lastseen < ancient) &&
- !(ss->flags&LOCKED)) {
+ !(ss->flags & LOCKED))
+ {
d->search = ss + 1;
FDEBUG(("replacing c%d\n", ss - d->ssets));
return ss;
}
for (ss = d->ssets, end = d->search; ss < end; ss++)
if ((ss->lastseen == NULL || ss->lastseen < ancient) &&
- !(ss->flags&LOCKED)) {
+ !(ss->flags & LOCKED))
+ {
d->search = ss + 1;
FDEBUG(("replacing c%d\n", ss - d->ssets));
return ss;
diff --git a/src/backend/regex/regerror.c b/src/backend/regex/regerror.c
index 94693eba211..a0a9d3f0410 100644
--- a/src/backend/regex/regerror.c
+++ b/src/backend/regex/regerror.c
@@ -1,21 +1,21 @@
/*
* regerror - error-code expansion
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
- *
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ *
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
- * thanks all of them.
- *
+ * thanks all of them.
+ *
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
- *
+ *
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
- *
+ *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -27,7 +27,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/regerror.c,v 1.25 2003/02/05 17:41:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/regerror.c,v 1.26 2003/08/04 00:43:21 momjian Exp $
*
*/
@@ -37,72 +37,82 @@
static char unk[] = "*** unknown regex error code 0x%x ***";
/* struct to map among codes, code names, and explanations */
-static struct rerr {
- int code;
- char *name;
- char *explain;
-} rerrs[] = {
+static struct rerr
+{
+ int code;
+ char *name;
+ char *explain;
+} rerrs[] =
+
+{
/* the actual table is built from regex.h */
#include "regex/regerrs.h"
- { -1, "", "oops" }, /* explanation special-cased in code */
+ {
+ -1, "", "oops"
+ }, /* explanation special-cased in code */
};
/*
* pg_regerror - the interface to error numbers
*/
/* ARGSUSED */
-size_t /* actual space needed (including NUL) */
+size_t /* actual space needed (including NUL) */
pg_regerror(int errcode, /* error code, or REG_ATOI or REG_ITOA */
- const regex_t *preg, /* associated regex_t (unused at present) */
+ const regex_t *preg, /* associated regex_t (unused at present) */
char *errbuf, /* result buffer (unless errbuf_size==0) */
- size_t errbuf_size) /* available space in errbuf, can be 0 */
+ size_t errbuf_size) /* available space in errbuf, can be 0 */
{
struct rerr *r;
- char *msg;
- char convbuf[sizeof(unk)+50]; /* 50 = plenty for int */
- size_t len;
- int icode;
+ char *msg;
+ char convbuf[sizeof(unk) + 50]; /* 50 = plenty for int */
+ size_t len;
+ int icode;
- switch (errcode) {
- case REG_ATOI: /* convert name to number */
- for (r = rerrs; r->code >= 0; r++)
- if (strcmp(r->name, errbuf) == 0)
- break;
- sprintf(convbuf, "%d", r->code); /* -1 for unknown */
- msg = convbuf;
- break;
- case REG_ITOA: /* convert number to name */
- icode = atoi(errbuf); /* not our problem if this fails */
- for (r = rerrs; r->code >= 0; r++)
- if (r->code == icode)
- break;
- if (r->code >= 0)
- msg = r->name;
- else { /* unknown; tell him the number */
- sprintf(convbuf, "REG_%u", (unsigned)icode);
+ switch (errcode)
+ {
+ case REG_ATOI: /* convert name to number */
+ for (r = rerrs; r->code >= 0; r++)
+ if (strcmp(r->name, errbuf) == 0)
+ break;
+ sprintf(convbuf, "%d", r->code); /* -1 for unknown */
msg = convbuf;
- }
- break;
- default: /* a real, normal error code */
- for (r = rerrs; r->code >= 0; r++)
- if (r->code == errcode)
- break;
- if (r->code >= 0)
- msg = r->explain;
- else { /* unknown; say so */
- sprintf(convbuf, unk, errcode);
- msg = convbuf;
- }
- break;
+ break;
+ case REG_ITOA: /* convert number to name */
+ icode = atoi(errbuf); /* not our problem if this fails */
+ for (r = rerrs; r->code >= 0; r++)
+ if (r->code == icode)
+ break;
+ if (r->code >= 0)
+ msg = r->name;
+ else
+ { /* unknown; tell him the number */
+ sprintf(convbuf, "REG_%u", (unsigned) icode);
+ msg = convbuf;
+ }
+ break;
+ default: /* a real, normal error code */
+ for (r = rerrs; r->code >= 0; r++)
+ if (r->code == errcode)
+ break;
+ if (r->code >= 0)
+ msg = r->explain;
+ else
+ { /* unknown; say so */
+ sprintf(convbuf, unk, errcode);
+ msg = convbuf;
+ }
+ break;
}
len = strlen(msg) + 1; /* space needed, including NUL */
- if (errbuf_size > 0) {
+ if (errbuf_size > 0)
+ {
if (errbuf_size > len)
strcpy(errbuf, msg);
- else { /* truncate to fit */
- strncpy(errbuf, msg, errbuf_size-1);
- errbuf[errbuf_size-1] = '\0';
+ else
+ { /* truncate to fit */
+ strncpy(errbuf, msg, errbuf_size - 1);
+ errbuf[errbuf_size - 1] = '\0';
}
}
diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c
index eef01b0bd58..535501ff0b7 100644
--- a/src/backend/regex/regexec.c
+++ b/src/backend/regex/regexec.c
@@ -1,21 +1,21 @@
/*
* re_*exec and friends - match REs
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
- *
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ *
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
- * thanks all of them.
- *
+ * thanks all of them.
+ *
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
- *
+ *
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
- *
+ *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -27,7 +27,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/regexec.c,v 1.21 2003/02/05 17:41:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/regexec.c,v 1.22 2003/08/04 00:43:21 momjian Exp $
*
*/
@@ -36,87 +36,95 @@
/* lazy-DFA representation */
-struct arcp { /* "pointer" to an outarc */
+struct arcp
+{ /* "pointer" to an outarc */
struct sset *ss;
- color co;
+ color co;
};
-struct sset { /* state set */
- unsigned *states; /* pointer to bitvector */
- unsigned hash; /* hash of bitvector */
-# define HASH(bv, nw) (((nw) == 1) ? *(bv) : hash(bv, nw))
-# define HIT(h,bv,ss,nw) ((ss)->hash == (h) && ((nw) == 1 || \
+struct sset
+{ /* state set */
+ unsigned *states; /* pointer to bitvector */
+ unsigned hash; /* hash of bitvector */
+#define HASH(bv, nw) (((nw) == 1) ? *(bv) : hash(bv, nw))
+#define HIT(h,bv,ss,nw) ((ss)->hash == (h) && ((nw) == 1 || \
memcmp(VS(bv), VS((ss)->states), (nw)*sizeof(unsigned)) == 0))
- int flags;
-# define STARTER 01 /* the initial state set */
-# define POSTSTATE 02 /* includes the goal state */
-# define LOCKED 04 /* locked in cache */
-# define NOPROGRESS 010 /* zero-progress state set */
- struct arcp ins; /* chain of inarcs pointing here */
- chr *lastseen; /* last entered on arrival here */
- struct sset **outs; /* outarc vector indexed by color */
- struct arcp *inchain; /* chain-pointer vector for outarcs */
+ int flags;
+#define STARTER 01 /* the initial state set */
+#define POSTSTATE 02 /* includes the goal state */
+#define LOCKED 04 /* locked in cache */
+#define NOPROGRESS 010 /* zero-progress state set */
+ struct arcp ins; /* chain of inarcs pointing here */
+ chr *lastseen; /* last entered on arrival here */
+ struct sset **outs; /* outarc vector indexed by color */
+ struct arcp *inchain; /* chain-pointer vector for outarcs */
};
-struct dfa {
- int nssets; /* size of cache */
- int nssused; /* how many entries occupied yet */
- int nstates; /* number of states */
- int ncolors; /* length of outarc and inchain vectors */
- int wordsper; /* length of state-set bitvectors */
- struct sset *ssets; /* state-set cache */
- unsigned *statesarea; /* bitvector storage */
- unsigned *work; /* pointer to work area within statesarea */
- struct sset **outsarea; /* outarc-vector storage */
- struct arcp *incarea; /* inchain storage */
+struct dfa
+{
+ int nssets; /* size of cache */
+ int nssused; /* how many entries occupied yet */
+ int nstates; /* number of states */
+ int ncolors; /* length of outarc and inchain vectors */
+ int wordsper; /* length of state-set bitvectors */
+ struct sset *ssets; /* state-set cache */
+ unsigned *statesarea; /* bitvector storage */
+ unsigned *work; /* pointer to work area within statesarea */
+ struct sset **outsarea; /* outarc-vector storage */
+ struct arcp *incarea; /* inchain storage */
struct cnfa *cnfa;
struct colormap *cm;
- chr *lastpost; /* location of last cache-flushed success */
- chr *lastnopr; /* location of last cache-flushed NOPROGRESS */
- struct sset *search; /* replacement-search-pointer memory */
- int cptsmalloced; /* were the areas individually malloced? */
- char *mallocarea; /* self, or master malloced area, or NULL */
+ chr *lastpost; /* location of last cache-flushed success */
+ chr *lastnopr; /* location of last cache-flushed
+ * NOPROGRESS */
+ struct sset *search; /* replacement-search-pointer memory */
+ int cptsmalloced; /* were the areas individually malloced? */
+ char *mallocarea; /* self, or master malloced area, or NULL */
};
-#define WORK 1 /* number of work bitvectors needed */
+#define WORK 1 /* number of work bitvectors needed */
/* setup for non-malloc allocation for small cases */
-#define FEWSTATES 20 /* must be less than UBITS */
-#define FEWCOLORS 15
-struct smalldfa {
- struct dfa dfa;
- struct sset ssets[FEWSTATES*2];
- unsigned statesarea[FEWSTATES*2 + WORK];
- struct sset *outsarea[FEWSTATES*2 * FEWCOLORS];
- struct arcp incarea[FEWSTATES*2 * FEWCOLORS];
+#define FEWSTATES 20 /* must be less than UBITS */
+#define FEWCOLORS 15
+struct smalldfa
+{
+ struct dfa dfa;
+ struct sset ssets[FEWSTATES * 2];
+ unsigned statesarea[FEWSTATES * 2 + WORK];
+ struct sset *outsarea[FEWSTATES * 2 * FEWCOLORS];
+ struct arcp incarea[FEWSTATES * 2 * FEWCOLORS];
};
-#define DOMALLOC ((struct smalldfa *)NULL) /* force malloc */
+
+#define DOMALLOC ((struct smalldfa *)NULL) /* force malloc */
/* internal variables, bundled for easy passing around */
-struct vars {
- regex_t *re;
+struct vars
+{
+ regex_t *re;
struct guts *g;
- int eflags; /* copies of arguments */
- size_t nmatch;
+ int eflags; /* copies of arguments */
+ size_t nmatch;
regmatch_t *pmatch;
rm_detail_t *details;
- chr *start; /* start of string */
- chr *stop; /* just past end of string */
- int err; /* error code if any (0 none) */
- regoff_t *mem; /* memory vector for backtracking */
+ chr *start; /* start of string */
+ chr *stop; /* just past end of string */
+ int err; /* error code if any (0 none) */
+ regoff_t *mem; /* memory vector for backtracking */
struct smalldfa dfa1;
struct smalldfa dfa2;
};
-#define VISERR(vv) ((vv)->err != 0) /* have we seen an error yet? */
-#define ISERR() VISERR(v)
-#define VERR(vv,e) (((vv)->err) ? (vv)->err : ((vv)->err = (e)))
-#define ERR(e) VERR(v, e) /* record an error */
-#define NOERR() {if (ISERR()) return v->err;} /* if error seen, return it */
-#define OFF(p) ((p) - v->start)
-#define LOFF(p) ((long)OFF(p))
+#define VISERR(vv) ((vv)->err != 0) /* have we seen an error yet? */
+#define ISERR() VISERR(v)
+#define VERR(vv,e) (((vv)->err) ? (vv)->err : ((vv)->err = (e)))
+#define ERR(e) VERR(v, e) /* record an error */
+#define NOERR() {if (ISERR()) return v->err;} /* if error seen, return
+ * it */
+#define OFF(p) ((p) - v->start)
+#define LOFF(p) ((long)OFF(p))
@@ -124,32 +132,33 @@ struct vars {
* forward declarations
*/
/* === regexec.c === */
-static int find (struct vars *, struct cnfa *, struct colormap *);
-static int cfind (struct vars *, struct cnfa *, struct colormap *);
-static int cfindloop (struct vars *, struct cnfa *, struct colormap *, struct dfa *, struct dfa *, chr **);
-static void zapsubs (regmatch_t *, size_t);
-static void zapmem (struct vars *, struct subre *);
-static void subset (struct vars *, struct subre *, chr *, chr *);
-static int dissect (struct vars *, struct subre *, chr *, chr *);
-static int condissect (struct vars *, struct subre *, chr *, chr *);
-static int altdissect (struct vars *, struct subre *, chr *, chr *);
-static int cdissect (struct vars *, struct subre *, chr *, chr *);
-static int ccondissect (struct vars *, struct subre *, chr *, chr *);
-static int crevdissect (struct vars *, struct subre *, chr *, chr *);
-static int cbrdissect (struct vars *, struct subre *, chr *, chr *);
-static int caltdissect (struct vars *, struct subre *, chr *, chr *);
+static int find(struct vars *, struct cnfa *, struct colormap *);
+static int cfind(struct vars *, struct cnfa *, struct colormap *);
+static int cfindloop(struct vars *, struct cnfa *, struct colormap *, struct dfa *, struct dfa *, chr **);
+static void zapsubs(regmatch_t *, size_t);
+static void zapmem(struct vars *, struct subre *);
+static void subset(struct vars *, struct subre *, chr *, chr *);
+static int dissect(struct vars *, struct subre *, chr *, chr *);
+static int condissect(struct vars *, struct subre *, chr *, chr *);
+static int altdissect(struct vars *, struct subre *, chr *, chr *);
+static int cdissect(struct vars *, struct subre *, chr *, chr *);
+static int ccondissect(struct vars *, struct subre *, chr *, chr *);
+static int crevdissect(struct vars *, struct subre *, chr *, chr *);
+static int cbrdissect(struct vars *, struct subre *, chr *, chr *);
+static int caltdissect(struct vars *, struct subre *, chr *, chr *);
+
/* === rege_dfa.c === */
-static chr *longest (struct vars *, struct dfa *, chr *, chr *, int *);
-static chr *shortest (struct vars *, struct dfa *, chr *, chr *, chr *, chr **, int *);
-static chr *lastcold (struct vars *, struct dfa *);
-static struct dfa *newdfa (struct vars *, struct cnfa *, struct colormap *, struct smalldfa *);
-static void freedfa (struct dfa *);
-static unsigned hash (unsigned *, int);
-static struct sset *initialize (struct vars *, struct dfa *, chr *);
-static struct sset *miss (struct vars *, struct dfa *, struct sset *, pcolor, chr *, chr *);
-static int lacon (struct vars *, struct cnfa *, chr *, pcolor);
-static struct sset *getvacant (struct vars *, struct dfa *, chr *, chr *);
-static struct sset *pickss (struct vars *, struct dfa *, chr *, chr *);
+static chr *longest(struct vars *, struct dfa *, chr *, chr *, int *);
+static chr *shortest(struct vars *, struct dfa *, chr *, chr *, chr *, chr **, int *);
+static chr *lastcold(struct vars *, struct dfa *);
+static struct dfa *newdfa(struct vars *, struct cnfa *, struct colormap *, struct smalldfa *);
+static void freedfa(struct dfa *);
+static unsigned hash(unsigned *, int);
+static struct sset *initialize(struct vars *, struct dfa *, chr *);
+static struct sset *miss(struct vars *, struct dfa *, struct sset *, pcolor, chr *, chr *);
+static int lacon(struct vars *, struct cnfa *, chr *, pcolor);
+static struct sset *getvacant(struct vars *, struct dfa *, chr *, chr *);
+static struct sset *pickss(struct vars *, struct dfa *, chr *, chr *);
/*
@@ -157,22 +166,24 @@ static struct sset *pickss (struct vars *, struct dfa *, chr *, chr *);
*/
int
pg_regexec(regex_t *re,
- const chr *string,
+ const chr * string,
size_t len,
- rm_detail_t *details,
+ rm_detail_t * details,
size_t nmatch,
regmatch_t pmatch[],
int flags)
{
struct vars var;
register struct vars *v = &var;
- int st;
- size_t n;
- int backref;
-# define LOCALMAT 20
- regmatch_t mat[LOCALMAT];
-# define LOCALMEM 40
- regoff_t mem[LOCALMEM];
+ int st;
+ size_t n;
+ int backref;
+
+#define LOCALMAT 20
+ regmatch_t mat[LOCALMAT];
+
+#define LOCALMEM 40
+ regoff_t mem[LOCALMEM];
/* sanity checks */
if (re == NULL || string == NULL || re->re_magic != REMAGIC)
@@ -182,46 +193,51 @@ pg_regexec(regex_t *re,
/* setup */
v->re = re;
- v->g = (struct guts *)re->re_guts;
- if ((v->g->cflags&REG_EXPECT) && details == NULL)
+ v->g = (struct guts *) re->re_guts;
+ if ((v->g->cflags & REG_EXPECT) && details == NULL)
return REG_INVARG;
- if (v->g->info&REG_UIMPOSSIBLE)
+ if (v->g->info & REG_UIMPOSSIBLE)
return REG_NOMATCH;
- backref = (v->g->info&REG_UBACKREF) ? 1 : 0;
+ backref = (v->g->info & REG_UBACKREF) ? 1 : 0;
v->eflags = flags;
- if (v->g->cflags&REG_NOSUB)
- nmatch = 0; /* override client */
+ if (v->g->cflags & REG_NOSUB)
+ nmatch = 0; /* override client */
v->nmatch = nmatch;
- if (backref) {
+ if (backref)
+ {
/* need work area */
if (v->g->nsub + 1 <= LOCALMAT)
v->pmatch = mat;
else
- v->pmatch = (regmatch_t *)MALLOC((v->g->nsub + 1) *
- sizeof(regmatch_t));
+ v->pmatch = (regmatch_t *) MALLOC((v->g->nsub + 1) *
+ sizeof(regmatch_t));
if (v->pmatch == NULL)
return REG_ESPACE;
v->nmatch = v->g->nsub + 1;
- } else
+ }
+ else
v->pmatch = pmatch;
v->details = details;
- v->start = (chr *)string;
- v->stop = (chr *)string + len;
+ v->start = (chr *) string;
+ v->stop = (chr *) string + len;
v->err = 0;
- if (backref) {
+ if (backref)
+ {
/* need retry memory */
assert(v->g->ntree >= 0);
- n = (size_t)v->g->ntree;
+ n = (size_t) v->g->ntree;
if (n <= LOCALMEM)
v->mem = mem;
else
- v->mem = (regoff_t *)MALLOC(n*sizeof(regoff_t));
- if (v->mem == NULL) {
+ v->mem = (regoff_t *) MALLOC(n * sizeof(regoff_t));
+ if (v->mem == NULL)
+ {
if (v->pmatch != pmatch && v->pmatch != mat)
FREE(v->pmatch);
return REG_ESPACE;
}
- } else
+ }
+ else
v->mem = NULL;
/* do it */
@@ -232,10 +248,11 @@ pg_regexec(regex_t *re,
st = find(v, &v->g->tree->cnfa, &v->g->cmap);
/* copy (portion of) match vector over if necessary */
- if (st == REG_OKAY && v->pmatch != pmatch && nmatch > 0) {
+ if (st == REG_OKAY && v->pmatch != pmatch && nmatch > 0)
+ {
zapsubs(pmatch, nmatch);
n = (nmatch < v->nmatch) ? nmatch : v->nmatch;
- memcpy(VS(pmatch), VS(v->pmatch), n*sizeof(regmatch_t));
+ memcpy(VS(pmatch), VS(v->pmatch), n * sizeof(regmatch_t));
}
/* clean up */
@@ -250,19 +267,20 @@ pg_regexec(regex_t *re,
* find - find a match for the main NFA (no-complications case)
*/
static int
-find(struct vars *v,
- struct cnfa *cnfa,
- struct colormap *cm)
+find(struct vars * v,
+ struct cnfa * cnfa,
+ struct colormap * cm)
{
struct dfa *s;
struct dfa *d;
- chr *begin;
- chr *end = NULL;
- chr *cold;
- chr *open; /* open and close of range of possible starts */
- chr *close;
- int hitend;
- int shorter = (v->g->tree->flags&SHORTER) ? 1 : 0;
+ chr *begin;
+ chr *end = NULL;
+ chr *cold;
+ chr *open; /* open and close of range of possible
+ * starts */
+ chr *close;
+ int hitend;
+ int shorter = (v->g->tree->flags & SHORTER) ? 1 : 0;
/* first, a shot with the search RE */
s = newdfa(v, &v->g->search, cm, &v->dfa1);
@@ -270,20 +288,21 @@ find(struct vars *v,
NOERR();
MDEBUG(("\nsearch at %ld\n", LOFF(v->start)));
cold = NULL;
- close = shortest(v, s, v->start, v->start, v->stop, &cold, (int *)NULL);
+ close = shortest(v, s, v->start, v->start, v->stop, &cold, (int *) NULL);
freedfa(s);
NOERR();
- if (v->g->cflags&REG_EXPECT) {
+ if (v->g->cflags & REG_EXPECT)
+ {
assert(v->details != NULL);
if (cold != NULL)
v->details->rm_extend.rm_so = OFF(cold);
else
v->details->rm_extend.rm_so = OFF(v->stop);
- v->details->rm_extend.rm_eo = OFF(v->stop); /* unknown */
+ v->details->rm_extend.rm_eo = OFF(v->stop); /* unknown */
}
- if (close == NULL) /* not found */
+ if (close == NULL) /* not found */
return REG_NOMATCH;
- if (v->nmatch == 0) /* found, don't need exact location */
+ if (v->nmatch == 0) /* found, don't need exact location */
return REG_OKAY;
/* find starting point and match */
@@ -294,18 +313,19 @@ find(struct vars *v,
d = newdfa(v, cnfa, cm, &v->dfa1);
assert(!(ISERR() && d != NULL));
NOERR();
- for (begin = open; begin <= close; begin++) {
+ for (begin = open; begin <= close; begin++)
+ {
MDEBUG(("\nfind trying at %ld\n", LOFF(begin)));
if (shorter)
end = shortest(v, d, begin, begin, v->stop,
- (chr **)NULL, &hitend);
+ (chr **) NULL, &hitend);
else
end = longest(v, d, begin, v->stop, &hitend);
NOERR();
if (hitend && cold == NULL)
cold = begin;
if (end != NULL)
- break; /* NOTE BREAK OUT */
+ break; /* NOTE BREAK OUT */
}
assert(end != NULL); /* search RE succeeded so loop should */
freedfa(d);
@@ -314,14 +334,15 @@ find(struct vars *v,
assert(v->nmatch > 0);
v->pmatch[0].rm_so = OFF(begin);
v->pmatch[0].rm_eo = OFF(end);
- if (v->g->cflags&REG_EXPECT) {
+ if (v->g->cflags & REG_EXPECT)
+ {
if (cold != NULL)
v->details->rm_extend.rm_so = OFF(cold);
else
v->details->rm_extend.rm_so = OFF(v->stop);
- v->details->rm_extend.rm_eo = OFF(v->stop); /* unknown */
+ v->details->rm_extend.rm_eo = OFF(v->stop); /* unknown */
}
- if (v->nmatch == 1) /* no need for submatches */
+ if (v->nmatch == 1) /* no need for submatches */
return REG_OKAY;
/* submatches */
@@ -333,19 +354,20 @@ find(struct vars *v,
* cfind - find a match for the main NFA (with complications)
*/
static int
-cfind(struct vars *v,
- struct cnfa *cnfa,
- struct colormap *cm)
+cfind(struct vars * v,
+ struct cnfa * cnfa,
+ struct colormap * cm)
{
struct dfa *s;
struct dfa *d;
- chr *cold;
- int ret;
+ chr *cold;
+ int ret;
s = newdfa(v, &v->g->search, cm, &v->dfa1);
NOERR();
d = newdfa(v, cnfa, cm, &v->dfa2);
- if (ISERR()) {
+ if (ISERR())
+ {
assert(d == NULL);
freedfa(s);
return v->err;
@@ -356,13 +378,14 @@ cfind(struct vars *v,
freedfa(d);
freedfa(s);
NOERR();
- if (v->g->cflags&REG_EXPECT) {
+ if (v->g->cflags & REG_EXPECT)
+ {
assert(v->details != NULL);
if (cold != NULL)
v->details->rm_extend.rm_so = OFF(cold);
else
v->details->rm_extend.rm_so = OFF(v->stop);
- v->details->rm_extend.rm_eo = OFF(v->stop); /* unknown */
+ v->details->rm_extend.rm_eo = OFF(v->stop); /* unknown */
}
return ret;
}
@@ -371,47 +394,51 @@ cfind(struct vars *v,
* cfindloop - the heart of cfind
*/
static int
-cfindloop(struct vars *v,
- struct cnfa *cnfa,
- struct colormap *cm,
- struct dfa *d,
- struct dfa *s,
- chr **coldp) /* where to put coldstart pointer */
+cfindloop(struct vars * v,
+ struct cnfa * cnfa,
+ struct colormap * cm,
+ struct dfa * d,
+ struct dfa * s,
+ chr ** coldp) /* where to put coldstart pointer */
{
- chr *begin;
- chr *end;
- chr *cold;
- chr *open; /* open and close of range of possible starts */
- chr *close;
- chr *estart;
- chr *estop;
- int er;
- int shorter = v->g->tree->flags&SHORTER;
- int hitend;
+ chr *begin;
+ chr *end;
+ chr *cold;
+ chr *open; /* open and close of range of possible
+ * starts */
+ chr *close;
+ chr *estart;
+ chr *estop;
+ int er;
+ int shorter = v->g->tree->flags & SHORTER;
+ int hitend;
assert(d != NULL && s != NULL);
cold = NULL;
close = v->start;
- do {
+ do
+ {
MDEBUG(("\ncsearch at %ld\n", LOFF(close)));
- close = shortest(v, s, close, close, v->stop, &cold, (int *)NULL);
+ close = shortest(v, s, close, close, v->stop, &cold, (int *) NULL);
if (close == NULL)
break; /* NOTE BREAK */
assert(cold != NULL);
open = cold;
cold = NULL;
MDEBUG(("cbetween %ld and %ld\n", LOFF(open), LOFF(close)));
- for (begin = open; begin <= close; begin++) {
+ for (begin = open; begin <= close; begin++)
+ {
MDEBUG(("\ncfind trying at %ld\n", LOFF(begin)));
estart = begin;
estop = v->stop;
- for (;;) {
+ for (;;)
+ {
if (shorter)
end = shortest(v, d, begin, estart,
- estop, (chr **)NULL, &hitend);
+ estop, (chr **) NULL, &hitend);
else
end = longest(v, d, begin, estop,
- &hitend);
+ &hitend);
if (hitend && cold == NULL)
cold = begin;
if (end == NULL)
@@ -420,19 +447,23 @@ cfindloop(struct vars *v,
zapsubs(v->pmatch, v->nmatch);
zapmem(v, v->g->tree);
er = cdissect(v, v->g->tree, begin, end);
- if (er == REG_OKAY) {
- if (v->nmatch > 0) {
+ if (er == REG_OKAY)
+ {
+ if (v->nmatch > 0)
+ {
v->pmatch[0].rm_so = OFF(begin);
v->pmatch[0].rm_eo = OFF(end);
}
*coldp = cold;
return REG_OKAY;
}
- if (er != REG_NOMATCH) {
+ if (er != REG_NOMATCH)
+ {
ERR(er);
return er;
}
- if ((shorter) ? end == estop : end == begin) {
+ if ((shorter) ? end == estop : end == begin)
+ {
/* no point in trying again */
*coldp = cold;
return REG_NOMATCH;
@@ -457,9 +488,10 @@ static void
zapsubs(regmatch_t *p,
size_t n)
{
- size_t i;
+ size_t i;
- for (i = n-1; i > 0; i--) {
+ for (i = n - 1; i > 0; i--)
+ {
p[i].rm_so = -1;
p[i].rm_eo = -1;
}
@@ -469,15 +501,16 @@ zapsubs(regmatch_t *p,
* zapmem - initialize the retry memory of a subtree to zeros
*/
static void
-zapmem(struct vars *v,
- struct subre *t)
+zapmem(struct vars * v,
+ struct subre * t)
{
if (t == NULL)
return;
assert(v->mem != NULL);
v->mem[t->retry] = 0;
- if (t->op == '(') {
+ if (t->op == '(')
+ {
assert(t->subno > 0);
v->pmatch[t->subno].rm_so = -1;
v->pmatch[t->subno].rm_eo = -1;
@@ -493,15 +526,15 @@ zapmem(struct vars *v,
* subset - set any subexpression relevant to a successful subre
*/
static void
-subset(struct vars *v,
- struct subre *sub,
- chr *begin,
- chr *end)
+subset(struct vars * v,
+ struct subre * sub,
+ chr * begin,
+ chr * end)
{
- int n = sub->subno;
+ int n = sub->subno;
assert(n > 0);
- if ((size_t)n >= v->nmatch)
+ if ((size_t) n >= v->nmatch)
return;
MDEBUG(("setting %d\n", n));
@@ -512,58 +545,59 @@ subset(struct vars *v,
/*
* dissect - determine subexpression matches (uncomplicated case)
*/
-static int /* regexec return code */
-dissect(struct vars *v,
- struct subre *t,
- chr *begin, /* beginning of relevant substring */
- chr *end) /* end of same */
+static int /* regexec return code */
+dissect(struct vars * v,
+ struct subre * t,
+ chr * begin, /* beginning of relevant substring */
+ chr * end) /* end of same */
{
assert(t != NULL);
MDEBUG(("dissect %ld-%ld\n", LOFF(begin), LOFF(end)));
- switch (t->op) {
- case '=': /* terminal node */
- assert(t->left == NULL && t->right == NULL);
- return REG_OKAY; /* no action, parent did the work */
- break;
- case '|': /* alternation */
- assert(t->left != NULL);
- return altdissect(v, t, begin, end);
- break;
- case 'b': /* back ref -- shouldn't be calling us! */
- return REG_ASSERT;
- break;
- case '.': /* concatenation */
- assert(t->left != NULL && t->right != NULL);
- return condissect(v, t, begin, end);
- break;
- case '(': /* capturing */
- assert(t->left != NULL && t->right == NULL);
- assert(t->subno > 0);
- subset(v, t, begin, end);
- return dissect(v, t->left, begin, end);
- break;
- default:
- return REG_ASSERT;
- break;
+ switch (t->op)
+ {
+ case '=': /* terminal node */
+ assert(t->left == NULL && t->right == NULL);
+ return REG_OKAY; /* no action, parent did the work */
+ break;
+ case '|': /* alternation */
+ assert(t->left != NULL);
+ return altdissect(v, t, begin, end);
+ break;
+ case 'b': /* back ref -- shouldn't be calling us! */
+ return REG_ASSERT;
+ break;
+ case '.': /* concatenation */
+ assert(t->left != NULL && t->right != NULL);
+ return condissect(v, t, begin, end);
+ break;
+ case '(': /* capturing */
+ assert(t->left != NULL && t->right == NULL);
+ assert(t->subno > 0);
+ subset(v, t, begin, end);
+ return dissect(v, t->left, begin, end);
+ break;
+ default:
+ return REG_ASSERT;
+ break;
}
}
/*
* condissect - determine concatenation subexpression matches (uncomplicated)
*/
-static int /* regexec return code */
-condissect(struct vars *v,
- struct subre *t,
- chr *begin, /* beginning of relevant substring */
- chr *end) /* end of same */
+static int /* regexec return code */
+condissect(struct vars * v,
+ struct subre * t,
+ chr * begin, /* beginning of relevant substring */
+ chr * end) /* end of same */
{
struct dfa *d;
struct dfa *d2;
- chr *mid;
- int i;
- int shorter = (t->left->flags&SHORTER) ? 1 : 0;
- chr *stop = (shorter) ? end : begin;
+ chr *mid;
+ int i;
+ int shorter = (t->left->flags & SHORTER) ? 1 : 0;
+ chr *stop = (shorter) ? end : begin;
assert(t->op == '.');
assert(t->left != NULL && t->left->cnfa.nstates > 0);
@@ -572,7 +606,8 @@ condissect(struct vars *v,
d = newdfa(v, &t->left->cnfa, &v->g->cmap, &v->dfa1);
NOERR();
d2 = newdfa(v, &t->right->cnfa, &v->g->cmap, &v->dfa2);
- if (ISERR()) {
+ if (ISERR())
+ {
assert(d2 == NULL);
freedfa(d);
return v->err;
@@ -580,11 +615,12 @@ condissect(struct vars *v,
/* pick a tentative midpoint */
if (shorter)
- mid = shortest(v, d, begin, begin, end, (chr **)NULL,
- (int *)NULL);
+ mid = shortest(v, d, begin, begin, end, (chr **) NULL,
+ (int *) NULL);
else
- mid = longest(v, d, begin, end, (int *)NULL);
- if (mid == NULL) {
+ mid = longest(v, d, begin, end, (int *) NULL);
+ if (mid == NULL)
+ {
freedfa(d);
freedfa(d2);
return REG_ASSERT;
@@ -592,9 +628,11 @@ condissect(struct vars *v,
MDEBUG(("tentative midpoint %ld\n", LOFF(mid)));
/* iterate until satisfaction or failure */
- while (longest(v, d2, mid, end, (int *)NULL) != end) {
+ while (longest(v, d2, mid, end, (int *) NULL) != end)
+ {
/* that midpoint didn't work, find a new one */
- if (mid == stop) {
+ if (mid == stop)
+ {
/* all possibilities exhausted! */
MDEBUG(("no midpoint!\n"));
freedfa(d);
@@ -602,11 +640,12 @@ condissect(struct vars *v,
return REG_ASSERT;
}
if (shorter)
- mid = shortest(v, d, begin, mid+1, end, (chr **)NULL,
- (int *)NULL);
+ mid = shortest(v, d, begin, mid + 1, end, (chr **) NULL,
+ (int *) NULL);
else
- mid = longest(v, d, begin, mid-1, (int *)NULL);
- if (mid == NULL) {
+ mid = longest(v, d, begin, mid - 1, (int *) NULL);
+ if (mid == NULL)
+ {
/* failed to find a new one! */
MDEBUG(("failed midpoint!\n"));
freedfa(d);
@@ -629,154 +668,166 @@ condissect(struct vars *v,
/*
* altdissect - determine alternative subexpression matches (uncomplicated)
*/
-static int /* regexec return code */
-altdissect(struct vars *v,
- struct subre *t,
- chr *begin, /* beginning of relevant substring */
- chr *end) /* end of same */
+static int /* regexec return code */
+altdissect(struct vars * v,
+ struct subre * t,
+ chr * begin, /* beginning of relevant substring */
+ chr * end) /* end of same */
{
struct dfa *d;
- int i;
+ int i;
assert(t != NULL);
assert(t->op == '|');
- for (i = 0; t != NULL; t = t->right, i++) {
+ for (i = 0; t != NULL; t = t->right, i++)
+ {
MDEBUG(("trying %dth\n", i));
assert(t->left != NULL && t->left->cnfa.nstates > 0);
d = newdfa(v, &t->left->cnfa, &v->g->cmap, &v->dfa1);
if (ISERR())
return v->err;
- if (longest(v, d, begin, end, (int *)NULL) == end) {
+ if (longest(v, d, begin, end, (int *) NULL) == end)
+ {
MDEBUG(("success\n"));
freedfa(d);
return dissect(v, t->left, begin, end);
}
freedfa(d);
}
- return REG_ASSERT; /* none of them matched?!? */
+ return REG_ASSERT; /* none of them matched?!? */
}
/*
* cdissect - determine subexpression matches (with complications)
- * The retry memory stores the offset of the trial midpoint from begin,
+ * The retry memory stores the offset of the trial midpoint from begin,
* plus 1 so that 0 uniquely means "clean slate".
*/
-static int /* regexec return code */
-cdissect(struct vars *v,
- struct subre *t,
- chr *begin, /* beginning of relevant substring */
- chr *end) /* end of same */
+static int /* regexec return code */
+cdissect(struct vars * v,
+ struct subre * t,
+ chr * begin, /* beginning of relevant substring */
+ chr * end) /* end of same */
{
- int er;
+ int er;
assert(t != NULL);
MDEBUG(("cdissect %ld-%ld %c\n", LOFF(begin), LOFF(end), t->op));
- switch (t->op) {
- case '=': /* terminal node */
- assert(t->left == NULL && t->right == NULL);
- return REG_OKAY; /* no action, parent did the work */
- break;
- case '|': /* alternation */
- assert(t->left != NULL);
- return caltdissect(v, t, begin, end);
- break;
- case 'b': /* back ref -- shouldn't be calling us! */
- assert(t->left == NULL && t->right == NULL);
- return cbrdissect(v, t, begin, end);
- break;
- case '.': /* concatenation */
- assert(t->left != NULL && t->right != NULL);
- return ccondissect(v, t, begin, end);
- break;
- case '(': /* capturing */
- assert(t->left != NULL && t->right == NULL);
- assert(t->subno > 0);
- er = cdissect(v, t->left, begin, end);
- if (er == REG_OKAY)
- subset(v, t, begin, end);
- return er;
- break;
- default:
- return REG_ASSERT;
- break;
+ switch (t->op)
+ {
+ case '=': /* terminal node */
+ assert(t->left == NULL && t->right == NULL);
+ return REG_OKAY; /* no action, parent did the work */
+ break;
+ case '|': /* alternation */
+ assert(t->left != NULL);
+ return caltdissect(v, t, begin, end);
+ break;
+ case 'b': /* back ref -- shouldn't be calling us! */
+ assert(t->left == NULL && t->right == NULL);
+ return cbrdissect(v, t, begin, end);
+ break;
+ case '.': /* concatenation */
+ assert(t->left != NULL && t->right != NULL);
+ return ccondissect(v, t, begin, end);
+ break;
+ case '(': /* capturing */
+ assert(t->left != NULL && t->right == NULL);
+ assert(t->subno > 0);
+ er = cdissect(v, t->left, begin, end);
+ if (er == REG_OKAY)
+ subset(v, t, begin, end);
+ return er;
+ break;
+ default:
+ return REG_ASSERT;
+ break;
}
}
/*
* ccondissect - concatenation subexpression matches (with complications)
- * The retry memory stores the offset of the trial midpoint from begin,
+ * The retry memory stores the offset of the trial midpoint from begin,
* plus 1 so that 0 uniquely means "clean slate".
*/
-static int /* regexec return code */
-ccondissect(struct vars *v,
- struct subre *t,
- chr *begin, /* beginning of relevant substring */
- chr *end) /* end of same */
+static int /* regexec return code */
+ccondissect(struct vars * v,
+ struct subre * t,
+ chr * begin, /* beginning of relevant substring */
+ chr * end) /* end of same */
{
struct dfa *d;
struct dfa *d2;
- chr *mid;
- int er;
+ chr *mid;
+ int er;
assert(t->op == '.');
assert(t->left != NULL && t->left->cnfa.nstates > 0);
assert(t->right != NULL && t->right->cnfa.nstates > 0);
- if (t->left->flags&SHORTER) /* reverse scan */
+ if (t->left->flags & SHORTER) /* reverse scan */
return crevdissect(v, t, begin, end);
d = newdfa(v, &t->left->cnfa, &v->g->cmap, DOMALLOC);
if (ISERR())
return v->err;
d2 = newdfa(v, &t->right->cnfa, &v->g->cmap, DOMALLOC);
- if (ISERR()) {
+ if (ISERR())
+ {
freedfa(d);
return v->err;
}
MDEBUG(("cconcat %d\n", t->retry));
/* pick a tentative midpoint */
- if (v->mem[t->retry] == 0) {
- mid = longest(v, d, begin, end, (int *)NULL);
- if (mid == NULL) {
+ if (v->mem[t->retry] == 0)
+ {
+ mid = longest(v, d, begin, end, (int *) NULL);
+ if (mid == NULL)
+ {
freedfa(d);
freedfa(d2);
return REG_NOMATCH;
}
MDEBUG(("tentative midpoint %ld\n", LOFF(mid)));
v->mem[t->retry] = (mid - begin) + 1;
- } else {
+ }
+ else
+ {
mid = begin + (v->mem[t->retry] - 1);
MDEBUG(("working midpoint %ld\n", LOFF(mid)));
}
/* iterate until satisfaction or failure */
- for (;;) {
+ for (;;)
+ {
/* try this midpoint on for size */
er = cdissect(v, t->left, begin, mid);
if (er == REG_OKAY &&
- longest(v, d2, mid, end, (int *)NULL) == end &&
- (er = cdissect(v, t->right, mid, end)) ==
- REG_OKAY)
- break; /* NOTE BREAK OUT */
- if (er != REG_OKAY && er != REG_NOMATCH) {
+ longest(v, d2, mid, end, (int *) NULL) == end &&
+ (er = cdissect(v, t->right, mid, end)) ==
+ REG_OKAY)
+ break; /* NOTE BREAK OUT */
+ if (er != REG_OKAY && er != REG_NOMATCH)
+ {
freedfa(d);
freedfa(d2);
return er;
}
/* that midpoint didn't work, find a new one */
- if (mid == begin) {
+ if (mid == begin)
+ {
/* all possibilities exhausted */
MDEBUG(("%d no midpoint\n", t->retry));
freedfa(d);
freedfa(d2);
return REG_NOMATCH;
}
- mid = longest(v, d, begin, mid-1, (int *)NULL);
- if (mid == NULL) {
+ mid = longest(v, d, begin, mid - 1, (int *) NULL);
+ if (mid == NULL)
+ {
/* failed to find a new one */
MDEBUG(("%d failed midpoint\n", t->retry));
freedfa(d);
@@ -798,76 +849,85 @@ ccondissect(struct vars *v,
/*
* crevdissect - determine backref shortest-first subexpression matches
- * The retry memory stores the offset of the trial midpoint from begin,
+ * The retry memory stores the offset of the trial midpoint from begin,
* plus 1 so that 0 uniquely means "clean slate".
*/
-static int /* regexec return code */
-crevdissect(struct vars *v,
- struct subre *t,
- chr *begin, /* beginning of relevant substring */
- chr *end) /* end of same */
+static int /* regexec return code */
+crevdissect(struct vars * v,
+ struct subre * t,
+ chr * begin, /* beginning of relevant substring */
+ chr * end) /* end of same */
{
struct dfa *d;
struct dfa *d2;
- chr *mid;
- int er;
+ chr *mid;
+ int er;
assert(t->op == '.');
assert(t->left != NULL && t->left->cnfa.nstates > 0);
assert(t->right != NULL && t->right->cnfa.nstates > 0);
- assert(t->left->flags&SHORTER);
+ assert(t->left->flags & SHORTER);
/* concatenation -- need to split the substring between parts */
d = newdfa(v, &t->left->cnfa, &v->g->cmap, DOMALLOC);
if (ISERR())
return v->err;
d2 = newdfa(v, &t->right->cnfa, &v->g->cmap, DOMALLOC);
- if (ISERR()) {
+ if (ISERR())
+ {
freedfa(d);
return v->err;
}
MDEBUG(("crev %d\n", t->retry));
/* pick a tentative midpoint */
- if (v->mem[t->retry] == 0) {
- mid = shortest(v, d, begin, begin, end, (chr **)NULL, (int *)NULL);
- if (mid == NULL) {
+ if (v->mem[t->retry] == 0)
+ {
+ mid = shortest(v, d, begin, begin, end, (chr **) NULL, (int *) NULL);
+ if (mid == NULL)
+ {
freedfa(d);
freedfa(d2);
return REG_NOMATCH;
}
MDEBUG(("tentative midpoint %ld\n", LOFF(mid)));
v->mem[t->retry] = (mid - begin) + 1;
- } else {
+ }
+ else
+ {
mid = begin + (v->mem[t->retry] - 1);
MDEBUG(("working midpoint %ld\n", LOFF(mid)));
}
/* iterate until satisfaction or failure */
- for (;;) {
+ for (;;)
+ {
/* try this midpoint on for size */
er = cdissect(v, t->left, begin, mid);
if (er == REG_OKAY &&
- longest(v, d2, mid, end, (int *)NULL) == end &&
- (er = cdissect(v, t->right, mid, end)) ==
- REG_OKAY)
- break; /* NOTE BREAK OUT */
- if (er != REG_OKAY && er != REG_NOMATCH) {
+ longest(v, d2, mid, end, (int *) NULL) == end &&
+ (er = cdissect(v, t->right, mid, end)) ==
+ REG_OKAY)
+ break; /* NOTE BREAK OUT */
+ if (er != REG_OKAY && er != REG_NOMATCH)
+ {
freedfa(d);
freedfa(d2);
return er;
}
/* that midpoint didn't work, find a new one */
- if (mid == end) {
+ if (mid == end)
+ {
/* all possibilities exhausted */
MDEBUG(("%d no midpoint\n", t->retry));
freedfa(d);
freedfa(d2);
return REG_NOMATCH;
}
- mid = shortest(v, d, begin, mid+1, end, (chr **)NULL, (int *)NULL);
- if (mid == NULL) {
+ mid = shortest(v, d, begin, mid + 1, end, (chr **) NULL, (int *) NULL);
+ if (mid == NULL)
+ {
/* failed to find a new one */
MDEBUG(("%d failed midpoint\n", t->retry));
freedfa(d);
@@ -890,25 +950,25 @@ crevdissect(struct vars *v,
/*
* cbrdissect - determine backref subexpression matches
*/
-static int /* regexec return code */
-cbrdissect(struct vars *v,
- struct subre *t,
- chr *begin, /* beginning of relevant substring */
- chr *end) /* end of same */
+static int /* regexec return code */
+cbrdissect(struct vars * v,
+ struct subre * t,
+ chr * begin, /* beginning of relevant substring */
+ chr * end) /* end of same */
{
- int i;
- int n = t->subno;
- size_t len;
- chr *paren;
- chr *p;
- chr *stop;
- int min = t->min;
- int max = t->max;
+ int i;
+ int n = t->subno;
+ size_t len;
+ chr *paren;
+ chr *p;
+ chr *stop;
+ int min = t->min;
+ int max = t->max;
assert(t != NULL);
assert(t->op == 'b');
assert(n >= 0);
- assert((size_t)n < v->nmatch);
+ assert((size_t) n < v->nmatch);
MDEBUG(("cbackref n%d %d{%d-%d}\n", t->retry, n, min, max));
@@ -923,7 +983,8 @@ cbrdissect(struct vars *v,
v->mem[t->retry] = 1;
/* special-case zero-length string */
- if (len == 0) {
+ if (len == 0)
+ {
if (begin == end)
return REG_OKAY;
return REG_NOMATCH;
@@ -931,41 +992,44 @@ cbrdissect(struct vars *v,
/* and too-short string */
assert(end >= begin);
- if ((size_t)(end - begin) < len)
+ if ((size_t) (end - begin) < len)
return REG_NOMATCH;
stop = end - len;
/* count occurrences */
i = 0;
- for (p = begin; p <= stop && (i < max || max == INFINITY); p += len) {
- if ((*v->g->compare)(paren, p, len) != 0)
- break;
+ for (p = begin; p <= stop && (i < max || max == INFINITY); p += len)
+ {
+ if ((*v->g->compare) (paren, p, len) != 0)
+ break;
i++;
}
MDEBUG(("cbackref found %d\n", i));
/* and sort it out */
- if (p != end) /* didn't consume all of it */
+ if (p != end) /* didn't consume all of it */
return REG_NOMATCH;
if (min <= i && (i <= max || max == INFINITY))
return REG_OKAY;
- return REG_NOMATCH; /* out of range */
+ return REG_NOMATCH; /* out of range */
}
/*
* caltdissect - determine alternative subexpression matches (w. complications)
*/
-static int /* regexec return code */
-caltdissect(struct vars *v,
- struct subre *t,
- chr *begin, /* beginning of relevant substring */
- chr *end) /* end of same */
+static int /* regexec return code */
+caltdissect(struct vars * v,
+ struct subre * t,
+ chr * begin, /* beginning of relevant substring */
+ chr * end) /* end of same */
{
struct dfa *d;
- int er;
-# define UNTRIED 0 /* not yet tried at all */
-# define TRYING 1 /* top matched, trying submatches */
-# define TRIED 2 /* top didn't match or submatches exhausted */
+ int er;
+
+#define UNTRIED 0 /* not yet tried at all */
+#define TRYING 1 /* top matched, trying submatches */
+#define TRIED 2 /* top didn't match or submatches
+ * exhausted */
if (t == NULL)
return REG_NOMATCH;
@@ -976,11 +1040,13 @@ caltdissect(struct vars *v,
MDEBUG(("calt n%d\n", t->retry));
assert(t->left != NULL);
- if (v->mem[t->retry] == UNTRIED) {
+ if (v->mem[t->retry] == UNTRIED)
+ {
d = newdfa(v, &t->left->cnfa, &v->g->cmap, DOMALLOC);
if (ISERR())
return v->err;
- if (longest(v, d, begin, end, (int *)NULL) != end) {
+ if (longest(v, d, begin, end, (int *) NULL) != end)
+ {
freedfa(d);
v->mem[t->retry] = TRIED;
return caltdissect(v, t->right, begin, end);
diff --git a/src/backend/regex/regfree.c b/src/backend/regex/regfree.c
index 88f3da32287..1bb9057a916 100644
--- a/src/backend/regex/regfree.c
+++ b/src/backend/regex/regfree.c
@@ -1,21 +1,21 @@
/*
* regfree - free an RE
*
- * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
- *
+ * Copyright (c) 1998, 1999 Henry Spencer. All rights reserved.
+ *
* Development of this software was funded, in part, by Cray Research Inc.,
* UUNET Communications Services Inc., Sun Microsystems Inc., and Scriptics
* Corporation, none of whom are responsible for the results. The author
- * thanks all of them.
- *
+ * thanks all of them.
+ *
* Redistribution and use in source and binary forms -- with or without
* modification -- are permitted for any purpose, provided that
* redistributions in source form retain this entire copyright notice and
* indicate the origin and nature of any modifications.
- *
+ *
* I'd appreciate being given credit for this package in the documentation
* of software which uses it, but that is not a requirement.
- *
+ *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
@@ -27,7 +27,7 @@
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * $Header: /cvsroot/pgsql/src/backend/regex/regfree.c,v 1.16 2003/02/05 17:41:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/regex/regfree.c,v 1.17 2003/08/04 00:43:21 momjian Exp $
*
*
* You might think that this could be incorporated into regcomp.c, and
@@ -50,5 +50,5 @@ pg_regfree(regex_t *re)
{
if (re == NULL)
return;
- (*((struct fns *)re->re_fns)->free)(re);
+ (*((struct fns *) re->re_fns)->free) (re);
}
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index e7039e85154..0c01a41cb9e 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteDefine.c,v 1.84 2003/08/01 00:15:22 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteDefine.c,v 1.85 2003/08/04 00:43:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -100,8 +100,8 @@ InsertRule(char *rulname,
if (!replace)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("rule \"%s\" for relation \"%s\" already exists",
- rulname, get_rel_name(eventrel_oid))));
+ errmsg("rule \"%s\" for relation \"%s\" already exists",
+ rulname, get_rel_name(eventrel_oid))));
/*
* When replacing, we don't need to replace every attribute
@@ -253,7 +253,7 @@ DefineQueryRewrite(RuleStmt *stmt)
if (length(action) == 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("INSTEAD NOTHING rules on select are not implemented"),
+ errmsg("INSTEAD NOTHING rules on select are not implemented"),
errhint("Use views instead.")));
/*
@@ -344,7 +344,7 @@ DefineQueryRewrite(RuleStmt *stmt)
if (i != event_relation->rd_att->natts)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("select rule's target list has too few entries")));
+ errmsg("select rule's target list has too few entries")));
/*
* ... there must not be another ON SELECT rule already ...
@@ -358,9 +358,9 @@ DefineQueryRewrite(RuleStmt *stmt)
rule = event_relation->rd_rules->rules[i];
if (rule->event == CMD_SELECT)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("\"%s\" is already a view",
- RelationGetRelationName(event_relation))));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("\"%s\" is already a view",
+ RelationGetRelationName(event_relation))));
}
}
@@ -383,8 +383,8 @@ DefineQueryRewrite(RuleStmt *stmt)
NAMEDATALEN - 4 - 4) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("view rule for \"%s\" must be named \"%s\"",
- event_obj->relname, ViewSelectRuleName)));
+ errmsg("view rule for \"%s\" must be named \"%s\"",
+ event_obj->relname, ViewSelectRuleName)));
stmt->rulename = pstrdup(ViewSelectRuleName);
}
@@ -401,9 +401,9 @@ DefineQueryRewrite(RuleStmt *stmt)
scanDesc = heap_beginscan(event_relation, SnapshotNow, 0, NULL);
if (heap_getnext(scanDesc, ForwardScanDirection) != NULL)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot convert non-empty table \"%s\" to a view",
- event_obj->relname)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot convert non-empty table \"%s\" to a view",
+ event_obj->relname)));
heap_endscan(scanDesc);
RelisBecomingView = true;
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 440e8ba7133..d7efa9e2013 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteHandler.c,v 1.125 2003/07/29 17:21:24 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteHandler.c,v 1.126 2003/08/04 00:43:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,10 +34,11 @@
/* We use a list of these to detect recursion in RewriteQuery */
-typedef struct rewrite_event {
+typedef struct rewrite_event
+{
Oid relation; /* OID of relation having rules */
CmdType event; /* type of rule being fired */
-} rewrite_event;
+} rewrite_event;
static Query *rewriteRuleAction(Query *parsetree,
Query *rule_action,
@@ -107,20 +108,21 @@ rewriteRuleAction(Query *parsetree,
* Generate expanded rtable consisting of main parsetree's rtable plus
* rule action's rtable; this becomes the complete rtable for the rule
* action. Some of the entries may be unused after we finish
- * rewriting, but if we tried to remove them we'd have a much
- * harder job to adjust RT indexes in the query's Vars. It's OK to
- * have unused RT entries, since planner will ignore them.
+ * rewriting, but if we tried to remove them we'd have a much harder
+ * job to adjust RT indexes in the query's Vars. It's OK to have
+ * unused RT entries, since planner will ignore them.
*
* NOTE: because planner will destructively alter rtable, we must ensure
* that rule action's rtable is separate and shares no substructure
* with the main rtable. Hence do a deep copy here.
*
* Also, we must disable write-access checking in all the RT entries
- * copied from the main query. This is safe since in fact the rule action
- * won't write on them, and it's necessary because the rule action may
- * have a different commandType than the main query, causing
- * ExecCheckRTEPerms() to make an inappropriate check. The read-access
- * checks can be left enabled, although they're probably redundant.
+ * copied from the main query. This is safe since in fact the rule
+ * action won't write on them, and it's necessary because the rule
+ * action may have a different commandType than the main query,
+ * causing ExecCheckRTEPerms() to make an inappropriate check. The
+ * read-access checks can be left enabled, although they're probably
+ * redundant.
*/
main_rtable = (List *) copyObject(parsetree->rtable);
@@ -330,12 +332,12 @@ rewriteTargetList(Query *parsetree, Relation target_relation)
}
/*
- * Handle the two cases where we need to insert a default expression:
- * it's an INSERT and there's no tlist entry for the column, or the
- * tlist entry is a DEFAULT placeholder node.
+ * Handle the two cases where we need to insert a default
+ * expression: it's an INSERT and there's no tlist entry for the
+ * column, or the tlist entry is a DEFAULT placeholder node.
*/
if ((new_tle == NULL && commandType == CMD_INSERT) ||
- (new_tle && new_tle->expr && IsA(new_tle->expr, SetToDefault)))
+ (new_tle && new_tle->expr && IsA(new_tle->expr, SetToDefault)))
{
Node *new_expr;
@@ -345,8 +347,9 @@ rewriteTargetList(Query *parsetree, Relation target_relation)
* If there is no default (ie, default is effectively NULL),
* we can omit the tlist entry in the INSERT case, since the
* planner can insert a NULL for itself, and there's no point
- * in spending any more rewriter cycles on the entry. But in the
- * UPDATE case we've got to explicitly set the column to NULL.
+ * in spending any more rewriter cycles on the entry. But in
+ * the UPDATE case we've got to explicitly set the column to
+ * NULL.
*/
if (!new_expr)
{
@@ -540,13 +543,13 @@ build_column_default(Relation rel, int attrno)
/*
* Make sure the value is coerced to the target column type; this will
* generally be true already, but there seem to be some corner cases
- * involving domain defaults where it might not be true.
- * This should match the parser's processing of non-defaulted expressions
- * --- see updateTargetListEntry().
+ * involving domain defaults where it might not be true. This should
+ * match the parser's processing of non-defaulted expressions --- see
+ * updateTargetListEntry().
*/
exprtype = exprType(expr);
- expr = coerce_to_target_type(NULL, /* no UNKNOWN params here */
+ expr = coerce_to_target_type(NULL, /* no UNKNOWN params here */
expr, exprtype,
atttype, atttypmod,
COERCION_ASSIGNMENT,
@@ -559,7 +562,7 @@ build_column_default(Relation rel, int attrno)
NameStr(att_tup->attname),
format_type_be(atttype),
format_type_be(exprtype)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
return expr;
}
@@ -990,7 +993,7 @@ CopyAndAddInvertedQual(Query *parsetree,
* rows that the qualified action doesn't act on. (If there are multiple
* qualified INSTEAD rules, we AND all the negated quals onto a single
* modified original query.) We won't execute the original, unmodified
- * query if we find either qualified or unqualified INSTEAD rules. If
+ * query if we find either qualified or unqualified INSTEAD rules. If
* we find both, the modified original query is discarded too.
*/
static List *
@@ -1009,7 +1012,7 @@ fireRules(Query *parsetree,
RewriteRule *rule_lock = (RewriteRule *) lfirst(i);
Node *event_qual = rule_lock->qual;
List *actions = rule_lock->actions;
- QuerySource qsrc;
+ QuerySource qsrc;
List *r;
/* Determine correct QuerySource value for actions */
@@ -1020,7 +1023,7 @@ fireRules(Query *parsetree,
else
{
qsrc = QSRC_INSTEAD_RULE;
- *instead_flag = true; /* report unqualified INSTEAD */
+ *instead_flag = true; /* report unqualified INSTEAD */
}
}
else
@@ -1034,14 +1037,13 @@ fireRules(Query *parsetree,
* qualifications of the INSTEAD rules are added so it does
* its actions only in cases where the rule quals of all
* INSTEAD rules are false. Think of it as the default action
- * in a case. We save this in *qual_product so
- * RewriteQuery() can add it to the query list after we
- * mangled it up enough.
+ * in a case. We save this in *qual_product so RewriteQuery()
+ * can add it to the query list after we mangled it up enough.
*
- * If we have already found an unqualified INSTEAD rule,
- * then *qual_product won't be used, so don't bother building it.
+ * If we have already found an unqualified INSTEAD rule, then
+ * *qual_product won't be used, so don't bother building it.
*/
- if (! *instead_flag)
+ if (!*instead_flag)
{
if (*qual_product == NULL)
*qual_product = parsetree;
@@ -1093,8 +1095,8 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
* If the statement is an update, insert or delete - fire rules on it.
*
* SELECT rules are handled later when we have all the queries that
- * should get executed. Also, utilities aren't rewritten at all
- * (do we still need that check?)
+ * should get executed. Also, utilities aren't rewritten at all (do
+ * we still need that check?)
*/
if (event != CMD_SELECT && event != CMD_UTILITY)
{
@@ -1109,19 +1111,21 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
Assert(rt_entry->rtekind == RTE_RELATION);
/*
- * This may well be the first access to the result relation during the
- * current statement (it will be, if this Query was extracted from a
- * rule or somehow got here other than via the parser). Therefore,
- * grab the appropriate lock type for a result relation, and do not
- * release it until end of transaction. This protects the rewriter
- * and planner against schema changes mid-query.
+ * This may well be the first access to the result relation during
+ * the current statement (it will be, if this Query was extracted
+ * from a rule or somehow got here other than via the parser).
+ * Therefore, grab the appropriate lock type for a result
+ * relation, and do not release it until end of transaction. This
+ * protects the rewriter and planner against schema changes
+ * mid-query.
*/
rt_entry_relation = heap_open(rt_entry->relid, RowExclusiveLock);
/*
- * If it's an INSERT or UPDATE, rewrite the targetlist into standard
- * form. This will be needed by the planner anyway, and doing it now
- * ensures that any references to NEW.field will behave sanely.
+ * If it's an INSERT or UPDATE, rewrite the targetlist into
+ * standard form. This will be needed by the planner anyway, and
+ * doing it now ensures that any references to NEW.field will
+ * behave sanely.
*/
if (event == CMD_INSERT || event == CMD_UPDATE)
rewriteTargetList(parsetree, rt_entry_relation);
@@ -1144,8 +1148,8 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
&qual_product);
/*
- * If we got any product queries, recursively rewrite them
- * --- but first check for recursion!
+ * If we got any product queries, recursively rewrite them ---
+ * but first check for recursion!
*/
if (product_queries != NIL)
{
@@ -1158,9 +1162,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
if (rev->relation == RelationGetRelid(rt_entry_relation) &&
rev->event == event)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("infinite recursion detected in rules for relation \"%s\"",
- RelationGetRelationName(rt_entry_relation))));
+ (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("infinite recursion detected in rules for relation \"%s\"",
+ RelationGetRelationName(rt_entry_relation))));
}
rev = (rewrite_event *) palloc(sizeof(rewrite_event));
@@ -1179,7 +1183,7 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
}
}
- heap_close(rt_entry_relation, NoLock); /* keep lock! */
+ heap_close(rt_entry_relation, NoLock); /* keep lock! */
}
/*
@@ -1191,9 +1195,9 @@ RewriteQuery(Query *parsetree, List *rewrite_events)
* disappear so the scans for them in the rule actions cannot find
* them.
*
- * If we found any unqualified INSTEAD, the original query is not
- * done at all, in any form. Otherwise, we add the modified form
- * if qualified INSTEADs were found, else the unmodified form.
+ * If we found any unqualified INSTEAD, the original query is not done at
+ * all, in any form. Otherwise, we add the modified form if qualified
+ * INSTEADs were found, else the unmodified form.
*/
if (!instead)
{
@@ -1299,7 +1303,8 @@ QueryRewrite(Query *parsetree)
* Step 3
*
* Determine which, if any, of the resulting queries is supposed to set
- * the command-result tag; and update the canSetTag fields accordingly.
+ * the command-result tag; and update the canSetTag fields
+ * accordingly.
*
* If the original query is still in the list, it sets the command tag.
* Otherwise, the last INSTEAD query of the same kind as the original
@@ -1308,8 +1313,8 @@ QueryRewrite(Query *parsetree)
* setting up a default tag based on the original un-rewritten query.)
*
* The Asserts verify that at most one query in the result list is marked
- * canSetTag. If we aren't checking asserts, we can fall out of the loop
- * as soon as we find the original query.
+ * canSetTag. If we aren't checking asserts, we can fall out of the
+ * loop as soon as we find the original query.
*/
origCmdType = parsetree->commandType;
foundOriginalQuery = false;
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index f36f96c3491..3f06b3f722e 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteManip.c,v 1.74 2003/07/25 00:01:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/rewrite/rewriteManip.c,v 1.75 2003/08/04 00:43:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,10 +25,10 @@
typedef struct
{
int sublevels_up;
-} checkExprHasAggs_context;
+} checkExprHasAggs_context;
static bool checkExprHasAggs_walker(Node *node,
- checkExprHasAggs_context *context);
+ checkExprHasAggs_context * context);
static bool checkExprHasSubLink_walker(Node *node, void *context);
static Relids offset_relid_set(Relids relids, int offset);
static Relids adjust_relid_set(Relids relids, int oldrelid, int newrelid);
@@ -51,6 +51,7 @@ checkExprHasAggs(Node *node)
checkExprHasAggs_context context;
context.sublevels_up = 0;
+
/*
* Must be prepared to start with a Query or a bare expression tree;
* if it's a Query, we don't want to increment sublevels_up.
@@ -62,7 +63,7 @@ checkExprHasAggs(Node *node)
}
static bool
-checkExprHasAggs_walker(Node *node, checkExprHasAggs_context *context)
+checkExprHasAggs_walker(Node *node, checkExprHasAggs_context * context)
{
if (node == NULL)
return false;
@@ -173,7 +174,7 @@ OffsetVarNodes_walker(Node *node, OffsetVarNodes_context *context)
}
if (IsA(node, InClauseInfo))
{
- InClauseInfo *ininfo = (InClauseInfo *) node;
+ InClauseInfo *ininfo = (InClauseInfo *) node;
if (context->sublevels_up == 0)
{
@@ -247,9 +248,7 @@ offset_relid_set(Relids relids, int offset)
tmprelids = bms_copy(relids);
while ((rtindex = bms_first_member(tmprelids)) >= 0)
- {
result = bms_add_member(result, rtindex + offset);
- }
bms_free(tmprelids);
return result;
}
@@ -312,7 +311,7 @@ ChangeVarNodes_walker(Node *node, ChangeVarNodes_context *context)
}
if (IsA(node, InClauseInfo))
{
- InClauseInfo *ininfo = (InClauseInfo *) node;
+ InClauseInfo *ininfo = (InClauseInfo *) node;
if (context->sublevels_up == 0)
{
@@ -530,7 +529,7 @@ rangeTableEntry_used_walker(Node *node,
}
if (IsA(node, InClauseInfo))
{
- InClauseInfo *ininfo = (InClauseInfo *) node;
+ InClauseInfo *ininfo = (InClauseInfo *) node;
if (context->sublevels_up == 0 &&
(bms_is_member(context->rt_index, ininfo->lefthand) ||
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 857e9d4f4f6..c1a4666907b 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.137 2003/07/24 22:04:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.138 2003/08/04 00:43:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,7 +61,7 @@
/* GUC variable */
-bool zero_damaged_pages = false;
+bool zero_damaged_pages = false;
static void WaitIO(BufferDesc *buf);
@@ -232,14 +232,14 @@ ReadBufferInternal(Relation reln, BlockNumber blockNum,
ereport(WARNING,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page header in block %u of \"%s\"; zeroing out page",
- blockNum, RelationGetRelationName(reln))));
+ blockNum, RelationGetRelationName(reln))));
MemSet((char *) MAKE_PTR(bufHdr->data), 0, BLCKSZ);
}
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("invalid page header in block %u of \"%s\"",
- blockNum, RelationGetRelationName(reln))));
+ errmsg("invalid page header in block %u of \"%s\"",
+ blockNum, RelationGetRelationName(reln))));
}
}
@@ -959,8 +959,8 @@ AtEOXact_Buffers(bool isCommit)
if (isCommit)
elog(WARNING,
- "buffer refcount leak: [%03d] (freeNext=%d, freePrev=%d, "
- "rel=%u/%u, blockNum=%u, flags=0x%x, refcount=%d %ld)",
+ "buffer refcount leak: [%03d] (freeNext=%d, freePrev=%d, "
+ "rel=%u/%u, blockNum=%u, flags=0x%x, refcount=%d %ld)",
i, buf->freeNext, buf->freePrev,
buf->tag.rnode.tblNode, buf->tag.rnode.relNode,
buf->tag.blockNum, buf->flags,
@@ -1509,10 +1509,10 @@ FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock)
if (status == SM_FAIL) /* disk failure ?! */
ereport(PANIC,
(errcode(ERRCODE_IO_ERROR),
- errmsg("could not write block %u of %u/%u",
- bufHdr->tag.blockNum,
- bufHdr->tag.rnode.tblNode,
- bufHdr->tag.rnode.relNode)));
+ errmsg("could not write block %u of %u/%u",
+ bufHdr->tag.blockNum,
+ bufHdr->tag.rnode.tblNode,
+ bufHdr->tag.rnode.relNode)));
BufferFlushCount++;
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 06606990c51..271a752a623 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.99 2003/07/24 22:04:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.100 2003/08/04 00:43:23 momjian Exp $
*
* NOTES:
*
@@ -113,8 +113,8 @@ int max_files_per_process = 1000;
#define FileUnknownPos (-1L)
/* these are the assigned bits in fdstate below: */
-#define FD_TEMPORARY (1 << 0) /* T = delete when closed */
-#define FD_XACT_TEMPORARY (1 << 1) /* T = delete at eoXact */
+#define FD_TEMPORARY (1 << 0) /* T = delete when closed */
+#define FD_XACT_TEMPORARY (1 << 1) /* T = delete at eoXact */
typedef struct vfd
{
@@ -247,7 +247,7 @@ pg_fdatasync(int fd)
* This is exported for use by places that really want a plain kernel FD,
* but need to be proof against running out of FDs. Once an FD has been
* successfully returned, it is the caller's responsibility to ensure that
- * it will not be leaked on ereport()! Most users should *not* call this
+ * it will not be leaked on ereport()! Most users should *not* call this
* routine directly, but instead use the VFD abstraction level, which
* provides protection against descriptor leaks as well as management of
* files that need to be open for more than a short period of time.
@@ -274,7 +274,7 @@ tryAgain:
ereport(LOG,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
- errmsg("out of file descriptors: %m; release and retry")));
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto tryAgain;
@@ -1064,7 +1064,7 @@ TryAgain:
ereport(LOG,
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
- errmsg("out of file descriptors: %m; release and retry")));
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto TryAgain;
@@ -1158,7 +1158,7 @@ AtProcExit_Files(void)
static void
CleanupTempFiles(bool isProcExit)
{
- Index i;
+ Index i;
if (SizeVfdCache > 0)
{
diff --git a/src/backend/storage/freespace/freespace.c b/src/backend/storage/freespace/freespace.c
index c7f72aafd62..5d381aa7f3e 100644
--- a/src/backend/storage/freespace/freespace.c
+++ b/src/backend/storage/freespace/freespace.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.18 2003/07/24 22:04:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.19 2003/08/04 00:43:24 momjian Exp $
*
*
* NOTES:
@@ -39,7 +39,7 @@
* Actually, our space allocation is done in "chunks" of CHUNKPAGES pages,
* with each relation guaranteed at least one chunk. This reduces thrashing
* of the storage allocations when there are small changes in the RRFS page
- * counts from one VACUUM to the next. (XXX it might also be worthwhile to
+ * counts from one VACUUM to the next. (XXX it might also be worthwhile to
* impose some kind of moving-average smoothing on the RRFS page counts?)
*
* So the actual arithmetic is: for each relation compute myRequest as the
@@ -72,10 +72,10 @@
/* Initial value for average-request moving average */
-#define INITIAL_AVERAGE ((Size) (BLCKSZ / 32))
+#define INITIAL_AVERAGE ((Size) (BLCKSZ / 32))
/*
- * Number of pages and bytes per allocation chunk. Indexes can squeeze 50%
+ * Number of pages and bytes per allocation chunk. Indexes can squeeze 50%
* more pages into the same space because they don't need to remember how much
* free space on each page. The nominal number of pages, CHUNKPAGES, is for
* regular rels, and INDEXCHUNKPAGES is for indexes. CHUNKPAGES should be
@@ -83,7 +83,7 @@
*/
#define CHUNKPAGES 16
#define CHUNKBYTES (CHUNKPAGES * sizeof(FSMPageData))
-#define INDEXCHUNKPAGES ((int) (CHUNKBYTES / sizeof(IndexFSMPageData)))
+#define INDEXCHUNKPAGES ((int) (CHUNKBYTES / sizeof(IndexFSMPageData)))
/*
@@ -104,9 +104,9 @@ typedef BlockIdData IndexFSMPageData;
BlockIdSet(&(ptr)->ip_blkid, pg)
#define FSMPageSetSpace(ptr, sz) \
((ptr)->ip_posid = (OffsetNumber) (sz))
-#define IndexFSMPageGetPageNum(ptr) \
+#define IndexFSMPageGetPageNum(ptr) \
BlockIdGetBlockNumber(ptr)
-#define IndexFSMPageSetPageNum(ptr, pg) \
+#define IndexFSMPageSetPageNum(ptr, pg) \
BlockIdSet(ptr, pg)
/*----------
@@ -144,7 +144,7 @@ typedef struct FsmCacheFileHeader
uint32 endian;
uint32 version;
int32 numRels;
-} FsmCacheFileHeader;
+} FsmCacheFileHeader;
/* Per-relation header */
typedef struct FsmCacheRelHeader
@@ -154,7 +154,7 @@ typedef struct FsmCacheRelHeader
uint32 avgRequest; /* moving average of space requests */
int32 lastPageCount; /* pages passed to RecordRelationFreeSpace */
int32 storedPages; /* # of pages stored in arena */
-} FsmCacheRelHeader;
+} FsmCacheRelHeader;
/*
@@ -167,7 +167,7 @@ typedef struct FsmCacheRelHeader
*
* Each relation owns one or more chunks of per-page storage in the "arena".
* The chunks for each relation are always consecutive, so that it can treat
- * its page storage as a simple array. We further insist that its page data
+ * its page storage as a simple array. We further insist that its page data
* be ordered by block number, so that binary search is possible.
*
* Note: we handle pointers to these items as pointers, not as SHMEM_OFFSETs.
@@ -182,7 +182,7 @@ struct FSMHeader
{
HTAB *relHash; /* hashtable of FSMRelation entries */
FSMRelation *usageList; /* FSMRelations in usage-recency order */
- FSMRelation *usageListTail; /* tail of usage-recency list */
+ FSMRelation *usageListTail; /* tail of usage-recency list */
FSMRelation *firstRel; /* FSMRelations in arena storage order */
FSMRelation *lastRel; /* tail of storage-order list */
int numRels; /* number of FSMRelations now in use */
@@ -204,7 +204,7 @@ struct FSMRelation
FSMRelation *nextUsage; /* next rel in usage-recency order */
FSMRelation *priorUsage; /* prior rel in usage-recency order */
FSMRelation *nextPhysical; /* next rel in arena-storage order */
- FSMRelation *priorPhysical; /* prior rel in arena-storage order */
+ FSMRelation *priorPhysical; /* prior rel in arena-storage order */
bool isIndex; /* if true, we store only page numbers */
Size avgRequest; /* moving average of space requests */
int lastPageCount; /* pages passed to RecordRelationFreeSpace */
@@ -233,13 +233,13 @@ static BlockNumber find_index_free_space(FSMRelation *fsmrel);
static void fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page,
Size spaceAvail);
static bool lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
- int *outPageIndex);
+ int *outPageIndex);
static void compact_fsm_storage(void);
static void push_fsm_rels_after(FSMRelation *afterRel);
-static void pack_incoming_pages(FSMPageData *newLocation, int newPages,
- PageFreeSpaceInfo *pageSpaces, int nPages);
-static void pack_existing_pages(FSMPageData *newLocation, int newPages,
- FSMPageData *oldLocation, int oldPages);
+static void pack_incoming_pages(FSMPageData * newLocation, int newPages,
+ PageFreeSpaceInfo * pageSpaces, int nPages);
+static void pack_existing_pages(FSMPageData * newLocation, int newPages,
+ FSMPageData * oldLocation, int oldPages);
static int fsm_calc_request(FSMRelation *fsmrel);
static int fsm_calc_target_allocation(int myRequest);
static int fsm_current_chunks(FSMRelation *fsmrel);
@@ -271,7 +271,7 @@ InitFreeSpaceMap(void)
if (FreeSpaceMap == NULL)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("insufficient shared memory for free space map")));
+ errmsg("insufficient shared memory for free space map")));
MemSet(FreeSpaceMap, 0, sizeof(FSMHeader));
/* Create hashtable for FSMRelations */
@@ -288,7 +288,7 @@ InitFreeSpaceMap(void)
if (!FreeSpaceMap->relHash)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("insufficient shared memory for free space map")));
+ errmsg("insufficient shared memory for free space map")));
/* Allocate page-storage arena */
nchunks = (MaxFSMPages - 1) / CHUNKPAGES + 1;
@@ -296,14 +296,14 @@ InitFreeSpaceMap(void)
if (nchunks <= MaxFSMRelations)
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("max_fsm_pages must exceed max_fsm_relations * %d",
- CHUNKPAGES)));
+ errmsg("max_fsm_pages must exceed max_fsm_relations * %d",
+ CHUNKPAGES)));
FreeSpaceMap->arena = (char *) ShmemAlloc(nchunks * CHUNKBYTES);
if (FreeSpaceMap->arena == NULL)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("insufficient shared memory for free space map")));
+ errmsg("insufficient shared memory for free space map")));
FreeSpaceMap->totalChunks = nchunks;
FreeSpaceMap->usedChunks = 0;
@@ -348,7 +348,7 @@ FreeSpaceShmemSize(void)
* will turn out to have too little space available by the time the caller
* gets a lock on it. In that case, the caller should report the actual
* amount of free space available on that page and then try again (see
- * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
+ * RecordAndGetPageWithFreeSpace). If InvalidBlockNumber is returned,
* extend the relation.
*/
BlockNumber
@@ -365,10 +365,10 @@ GetPageWithFreeSpace(RelFileNode *rel, Size spaceNeeded)
fsmrel = create_fsm_rel(rel);
/*
- * Update the moving average of space requests. This code implements an
- * exponential moving average with an equivalent period of about 63
- * requests. Ignore silly requests, however, to ensure that the average
- * stays sane.
+ * Update the moving average of space requests. This code implements
+ * an exponential moving average with an equivalent period of about 63
+ * requests. Ignore silly requests, however, to ensure that the
+ * average stays sane.
*/
if (spaceNeeded > 0 && spaceNeeded < BLCKSZ)
{
@@ -409,6 +409,7 @@ RecordAndGetPageWithFreeSpace(RelFileNode *rel,
/* Do the Record */
fsm_record_free_space(fsmrel, oldPage, oldSpaceAvail);
+
/*
* Update the moving average of space requests, same as in
* GetPageWithFreeSpace.
@@ -458,7 +459,7 @@ GetAvgFSMRequestSize(RelFileNode *rel)
void
RecordRelationFreeSpace(RelFileNode *rel,
int nPages,
- PageFreeSpaceInfo *pageSpaces)
+ PageFreeSpaceInfo * pageSpaces)
{
FSMRelation *fsmrel;
@@ -469,11 +470,12 @@ RecordRelationFreeSpace(RelFileNode *rel,
nPages = MaxFSMPages;
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
+
/*
* Note we don't record info about a relation unless there's already
* an FSM entry for it, implying someone has done GetPageWithFreeSpace
- * for it. Inactive rels thus will not clutter the map simply by being
- * vacuumed.
+ * for it. Inactive rels thus will not clutter the map simply by
+ * being vacuumed.
*/
fsmrel = lookup_fsm_rel(rel);
if (fsmrel)
@@ -484,6 +486,7 @@ RecordRelationFreeSpace(RelFileNode *rel,
curAlloc = realloc_fsm_rel(fsmrel, nPages, false);
curAllocPages = curAlloc * CHUNKPAGES;
+
/*
* If the data fits in our current allocation, just copy it;
* otherwise must compress.
@@ -500,7 +503,7 @@ RecordRelationFreeSpace(RelFileNode *rel,
Size avail = pageSpaces[i].avail;
/* Check caller provides sorted data */
- if (i > 0 && page <= pageSpaces[i-1].blkno)
+ if (i > 0 && page <= pageSpaces[i - 1].blkno)
elog(ERROR, "free-space data is not in page order");
FSMPageSetPageNum(newLocation, page);
FSMPageSetSpace(newLocation, avail);
@@ -556,10 +559,11 @@ RecordIndexFreeSpace(RelFileNode *rel,
nPages = MaxFSMPages;
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
+
/*
* Note we don't record info about a relation unless there's already
- * an FSM entry for it, implying someone has done GetFreeIndexPage
- * for it. Inactive rels thus will not clutter the map simply by being
+ * an FSM entry for it, implying someone has done GetFreeIndexPage for
+ * it. Inactive rels thus will not clutter the map simply by being
* vacuumed.
*/
fsmrel = lookup_fsm_rel(rel);
@@ -572,6 +576,7 @@ RecordIndexFreeSpace(RelFileNode *rel,
curAlloc = realloc_fsm_rel(fsmrel, nPages, true);
curAllocPages = curAlloc * INDEXCHUNKPAGES;
+
/*
* If the data fits in our current allocation, just copy it;
* otherwise must compress. But compression is easy: we merely
@@ -587,7 +592,7 @@ RecordIndexFreeSpace(RelFileNode *rel,
BlockNumber page = pages[i];
/* Check caller provides sorted data */
- if (i > 0 && page <= pages[i-1])
+ if (i > 0 && page <= pages[i - 1])
elog(ERROR, "free-space data is not in page order");
IndexFSMPageSetPageNum(newLocation, page);
newLocation++;
@@ -660,7 +665,7 @@ FreeSpaceMapForgetDatabase(Oid dbid)
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
for (fsmrel = FreeSpaceMap->usageList; fsmrel; fsmrel = nextrel)
{
- nextrel = fsmrel->nextUsage; /* in case we delete it */
+ nextrel = fsmrel->nextUsage; /* in case we delete it */
if (fsmrel->key.tblNode == dbid)
delete_fsm_rel(fsmrel);
}
@@ -670,7 +675,7 @@ FreeSpaceMapForgetDatabase(Oid dbid)
/*
* PrintFreeSpaceMapStatistics - print statistics about FSM contents
*
- * The info is sent to ereport() with the specified message level. This is
+ * The info is sent to ereport() with the specified message level. This is
* intended for use during VACUUM.
*/
void
@@ -687,9 +692,7 @@ PrintFreeSpaceMapStatistics(int elevel)
for (fsmrel = FreeSpaceMap->firstRel;
fsmrel != NULL;
fsmrel = fsmrel->nextPhysical)
- {
storedPages += fsmrel->storedPages;
- }
/* Copy other stats before dropping lock */
numRels = FreeSpaceMap->numRels;
sumRequests = FreeSpaceMap->sumRequests;
@@ -808,7 +811,7 @@ write_failed:
* forma --- if anyone else is accessing FSM yet, there's a problem.
*
* Notes: no complaint is issued if no cache file is found. If the file is
- * found, it is deleted after reading. Thus, if we crash without a clean
+ * found, it is deleted after reading. Thus, if we crash without a clean
* shutdown, the next cycle of life starts with no FSM data. To do otherwise,
* we'd need to do significantly more validation in this routine, because of
* the likelihood that what is in the dump file would be out-of-date, eg
@@ -879,7 +882,7 @@ LoadFreeSpaceMap(void)
len = nPages * sizeof(IndexFSMPageData);
else
len = nPages * sizeof(FSMPageData);
- data = (char *) palloc(len + 1); /* +1 to avoid palloc(0) */
+ data = (char *) palloc(len + 1); /* +1 to avoid palloc(0) */
if (fread(data, 1, len, fp) != len)
{
elog(LOG, "premature EOF in \"%s\"", cachefilename);
@@ -888,7 +891,7 @@ LoadFreeSpaceMap(void)
}
/*
- * Okay, create the FSM entry and insert data into it. Since the
+ * Okay, create the FSM entry and insert data into it. Since the
* rels were stored in reverse usage order, at the end of the loop
* they will be correctly usage-ordered in memory; and if
* MaxFSMRelations is less than it used to be, we will correctly
@@ -904,10 +907,11 @@ LoadFreeSpaceMap(void)
IndexFSMPageData *newLocation;
curAllocPages = curAlloc * INDEXCHUNKPAGES;
+
/*
* If the data fits in our current allocation, just copy it;
- * otherwise must compress. But compression is easy: we merely
- * forget extra pages.
+ * otherwise must compress. But compression is easy: we
+ * merely forget extra pages.
*/
newLocation = (IndexFSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
@@ -921,6 +925,7 @@ LoadFreeSpaceMap(void)
FSMPageData *newLocation;
curAllocPages = curAlloc * CHUNKPAGES;
+
/*
* If the data fits in our current allocation, just copy it;
* otherwise must compress.
@@ -1005,7 +1010,7 @@ create_fsm_rel(RelFileNode *rel)
fsmrel->isIndex = false; /* until we learn different */
fsmrel->avgRequest = INITIAL_AVERAGE;
fsmrel->lastPageCount = 0;
- fsmrel->firstChunk = -1; /* no space allocated */
+ fsmrel->firstChunk = -1; /* no space allocated */
fsmrel->storedPages = 0;
fsmrel->nextPage = 0;
@@ -1015,7 +1020,7 @@ create_fsm_rel(RelFileNode *rel)
/* Add new entry at front of LRU list */
link_fsm_rel_usage(fsmrel);
- fsmrel->nextPhysical = NULL; /* not in physical-storage list */
+ fsmrel->nextPhysical = NULL; /* not in physical-storage list */
fsmrel->priorPhysical = NULL;
FreeSpaceMap->numRels++;
/* sumRequests is unchanged because request must be zero */
@@ -1076,14 +1081,15 @@ realloc_fsm_rel(FSMRelation *fsmrel, int nPages, bool isIndex)
myRequest = fsm_calc_request(fsmrel);
FreeSpaceMap->sumRequests += myRequest;
myAlloc = fsm_calc_target_allocation(myRequest);
+
/*
- * Need to reallocate space if (a) my target allocation is more
- * than my current allocation, AND (b) my actual immediate need
- * (myRequest+1 chunks) is more than my current allocation.
- * Otherwise just store the new data in-place.
+ * Need to reallocate space if (a) my target allocation is more than
+ * my current allocation, AND (b) my actual immediate need
+ * (myRequest+1 chunks) is more than my current allocation. Otherwise
+ * just store the new data in-place.
*/
curAlloc = fsm_current_allocation(fsmrel);
- if (myAlloc > curAlloc && (myRequest+1) > curAlloc && nPages > 0)
+ if (myAlloc > curAlloc && (myRequest + 1) > curAlloc && nPages > 0)
{
/* Remove entry from storage list, and compact */
unlink_fsm_rel_storage(fsmrel);
@@ -1133,6 +1139,7 @@ unlink_fsm_rel_usage(FSMRelation *fsmrel)
fsmrel->nextUsage->priorUsage = fsmrel->priorUsage;
else
FreeSpaceMap->usageListTail = fsmrel->priorUsage;
+
/*
* We don't bother resetting fsmrel's links, since it's about to be
* deleted or relinked at the head.
@@ -1212,7 +1219,8 @@ find_free_space(FSMRelation *fsmrel, Size spaceNeeded)
if (spaceAvail >= spaceNeeded)
{
/*
- * Found what we want --- adjust the entry, and update nextPage.
+ * Found what we want --- adjust the entry, and update
+ * nextPage.
*/
FSMPageSetSpace(page, spaceAvail - spaceNeeded);
fsmrel->nextPage = pageIndex + 1;
@@ -1233,12 +1241,12 @@ static BlockNumber
find_index_free_space(FSMRelation *fsmrel)
{
IndexFSMPageData *info;
- BlockNumber result;
+ BlockNumber result;
/*
* If isIndex isn't set, it could be that RecordIndexFreeSpace() has
- * never yet been called on this relation, and we're still looking
- * at the default setting from create_fsm_rel(). If so, just act as
+ * never yet been called on this relation, and we're still looking at
+ * the default setting from create_fsm_rel(). If so, just act as
* though there's no space.
*/
if (!fsmrel->isIndex)
@@ -1247,14 +1255,15 @@ find_index_free_space(FSMRelation *fsmrel)
return InvalidBlockNumber;
elog(ERROR, "find_index_free_space called for a non-index relation");
}
+
/*
- * For indexes, there's no need for the nextPage state variable; we just
- * remove and return the first available page. (We could save cycles here
- * by returning the last page, but it seems better to encourage re-use
- * of lower-numbered pages.)
+ * For indexes, there's no need for the nextPage state variable; we
+ * just remove and return the first available page. (We could save
+ * cycles here by returning the last page, but it seems better to
+ * encourage re-use of lower-numbered pages.)
*/
if (fsmrel->storedPages <= 0)
- return InvalidBlockNumber; /* no pages available */
+ return InvalidBlockNumber; /* no pages available */
info = (IndexFSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
result = IndexFSMPageGetPageNum(info);
@@ -1287,8 +1296,8 @@ fsm_record_free_space(FSMRelation *fsmrel, BlockNumber page, Size spaceAvail)
else
{
/*
- * No existing entry; ignore the call. We used to add the page
- * to the FSM --- but in practice, if the page hasn't got enough
+ * No existing entry; ignore the call. We used to add the page to
+ * the FSM --- but in practice, if the page hasn't got enough
* space to satisfy the caller who's kicking it back to us, then
* it's probably uninteresting to everyone else as well.
*/
@@ -1327,7 +1336,7 @@ lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
while (low <= high)
{
int middle;
- BlockNumber probe;
+ BlockNumber probe;
middle = low + (high - low) / 2;
probe = IndexFSMPageGetPageNum(info + middle);
@@ -1357,7 +1366,7 @@ lookup_fsm_page_entry(FSMRelation *fsmrel, BlockNumber page,
while (low <= high)
{
int middle;
- BlockNumber probe;
+ BlockNumber probe;
middle = low + (high - low) / 2;
probe = FSMPageGetPageNum(info + middle);
@@ -1391,13 +1400,13 @@ compact_fsm_storage(void)
fsmrel != NULL;
fsmrel = fsmrel->nextPhysical)
{
- int newAlloc;
- int newAllocPages;
- int newChunkIndex;
- int oldChunkIndex;
- int curChunks;
- char *newLocation;
- char *oldLocation;
+ int newAlloc;
+ int newAllocPages;
+ int newChunkIndex;
+ int oldChunkIndex;
+ int curChunks;
+ char *newLocation;
+ char *oldLocation;
/*
* Calculate target allocation, make sure we don't overrun due to
@@ -1412,6 +1421,7 @@ compact_fsm_storage(void)
newAllocPages = newAlloc * CHUNKPAGES;
newChunkIndex = nextChunkIndex;
nextChunkIndex += newAlloc;
+
/*
* Determine current size, current and new locations
*/
@@ -1419,18 +1429,19 @@ compact_fsm_storage(void)
oldChunkIndex = fsmrel->firstChunk;
newLocation = FreeSpaceMap->arena + newChunkIndex * CHUNKBYTES;
oldLocation = FreeSpaceMap->arena + oldChunkIndex * CHUNKBYTES;
+
/*
* It's possible that we have to move data down, not up, if the
* allocations of previous rels expanded. This should mean that
* our allocation expanded too (or at least got no worse), and
* ditto for later rels. So there should be room --- but we might
* have to push down following rels to make it. We don't want to
- * do the push more than once, so pack everything against the
- * end of the arena if so.
+ * do the push more than once, so pack everything against the end
+ * of the arena if so.
*/
if (newChunkIndex > oldChunkIndex)
{
- int limitChunkIndex;
+ int limitChunkIndex;
if (newAllocPages < fsmrel->storedPages)
elog(PANIC, "can't juggle and compress too");
@@ -1455,9 +1466,9 @@ compact_fsm_storage(void)
else if (newAllocPages < fsmrel->storedPages)
{
/*
- * Need to compress the page data. For an index, "compression"
- * just means dropping excess pages; otherwise we try to keep
- * the ones with the most space.
+ * Need to compress the page data. For an index,
+ * "compression" just means dropping excess pages; otherwise
+ * we try to keep the ones with the most space.
*/
if (fsmrel->isIndex)
{
@@ -1508,11 +1519,11 @@ push_fsm_rels_after(FSMRelation *afterRel)
fsmrel != NULL;
fsmrel = fsmrel->priorPhysical)
{
- int chunkCount;
- int newChunkIndex;
- int oldChunkIndex;
- char *newLocation;
- char *oldLocation;
+ int chunkCount;
+ int newChunkIndex;
+ int oldChunkIndex;
+ char *newLocation;
+ char *oldLocation;
if (fsmrel == afterRel)
break;
@@ -1549,8 +1560,8 @@ push_fsm_rels_after(FSMRelation *afterRel)
#define HISTOGRAM_BINS 64
static void
-pack_incoming_pages(FSMPageData *newLocation, int newPages,
- PageFreeSpaceInfo *pageSpaces, int nPages)
+pack_incoming_pages(FSMPageData * newLocation, int newPages,
+ PageFreeSpaceInfo * pageSpaces, int nPages)
{
int histogram[HISTOGRAM_BINS];
int above,
@@ -1564,35 +1575,35 @@ pack_incoming_pages(FSMPageData *newLocation, int newPages,
MemSet(histogram, 0, sizeof(histogram));
for (i = 0; i < nPages; i++)
{
- Size avail = pageSpaces[i].avail;
+ Size avail = pageSpaces[i].avail;
if (avail >= BLCKSZ)
elog(ERROR, "bogus freespace amount");
- avail /= (BLCKSZ/HISTOGRAM_BINS);
+ avail /= (BLCKSZ / HISTOGRAM_BINS);
histogram[avail]++;
}
/* Find the breakpoint bin */
above = 0;
- for (i = HISTOGRAM_BINS-1; i >= 0; i--)
+ for (i = HISTOGRAM_BINS - 1; i >= 0; i--)
{
- int sum = above + histogram[i];
+ int sum = above + histogram[i];
if (sum > newPages)
break;
above = sum;
}
Assert(i >= 0);
- thresholdL = i * BLCKSZ/HISTOGRAM_BINS; /* low bound of bp bin */
- thresholdU = (i+1) * BLCKSZ/HISTOGRAM_BINS; /* hi bound */
+ thresholdL = i * BLCKSZ / HISTOGRAM_BINS; /* low bound of bp bin */
+ thresholdU = (i + 1) * BLCKSZ / HISTOGRAM_BINS; /* hi bound */
binct = newPages - above; /* number to take from bp bin */
/* And copy the appropriate data */
for (i = 0; i < nPages; i++)
{
BlockNumber page = pageSpaces[i].blkno;
- Size avail = pageSpaces[i].avail;
+ Size avail = pageSpaces[i].avail;
/* Check caller provides sorted data */
- if (i > 0 && page <= pageSpaces[i-1].blkno)
+ if (i > 0 && page <= pageSpaces[i - 1].blkno)
elog(ERROR, "free-space data is not in page order");
/* Save this page? */
if (avail >= thresholdU ||
@@ -1619,8 +1630,8 @@ pack_incoming_pages(FSMPageData *newLocation, int newPages,
* so that we can copy data moving forward in the arrays without problem.
*/
static void
-pack_existing_pages(FSMPageData *newLocation, int newPages,
- FSMPageData *oldLocation, int oldPages)
+pack_existing_pages(FSMPageData * newLocation, int newPages,
+ FSMPageData * oldLocation, int oldPages)
{
int histogram[HISTOGRAM_BINS];
int above,
@@ -1634,33 +1645,33 @@ pack_existing_pages(FSMPageData *newLocation, int newPages,
MemSet(histogram, 0, sizeof(histogram));
for (i = 0; i < oldPages; i++)
{
- Size avail = FSMPageGetSpace(oldLocation + i);
+ Size avail = FSMPageGetSpace(oldLocation + i);
/* Shouldn't happen, but test to protect against stack clobber */
if (avail >= BLCKSZ)
elog(ERROR, "bogus freespace amount");
- avail /= (BLCKSZ/HISTOGRAM_BINS);
+ avail /= (BLCKSZ / HISTOGRAM_BINS);
histogram[avail]++;
}
/* Find the breakpoint bin */
above = 0;
- for (i = HISTOGRAM_BINS-1; i >= 0; i--)
+ for (i = HISTOGRAM_BINS - 1; i >= 0; i--)
{
- int sum = above + histogram[i];
+ int sum = above + histogram[i];
if (sum > newPages)
break;
above = sum;
}
Assert(i >= 0);
- thresholdL = i * BLCKSZ/HISTOGRAM_BINS; /* low bound of bp bin */
- thresholdU = (i+1) * BLCKSZ/HISTOGRAM_BINS; /* hi bound */
+ thresholdL = i * BLCKSZ / HISTOGRAM_BINS; /* low bound of bp bin */
+ thresholdU = (i + 1) * BLCKSZ / HISTOGRAM_BINS; /* hi bound */
binct = newPages - above; /* number to take from bp bin */
/* And copy the appropriate data */
for (i = 0; i < oldPages; i++)
{
BlockNumber page = FSMPageGetPageNum(oldLocation + i);
- Size avail = FSMPageGetSpace(oldLocation + i);
+ Size avail = FSMPageGetSpace(oldLocation + i);
/* Save this page? */
if (avail >= thresholdU ||
@@ -1755,13 +1766,9 @@ static int
fsm_current_allocation(FSMRelation *fsmrel)
{
if (fsmrel->nextPhysical != NULL)
- {
return fsmrel->nextPhysical->firstChunk - fsmrel->firstChunk;
- }
else if (fsmrel == FreeSpaceMap->lastRel)
- {
return FreeSpaceMap->usedChunks - fsmrel->firstChunk;
- }
else
{
/* it's not in the storage-order list */
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index 5cc0f5f2cdb..154e39b2845 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.83 2003/07/24 22:04:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.84 2003/08/04 00:43:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -74,8 +74,8 @@ void
proc_exit(int code)
{
/*
- * Once we set this flag, we are committed to exit. Any ereport() will
- * NOT send control back to the main loop, but right back here.
+ * Once we set this flag, we are committed to exit. Any ereport()
+ * will NOT send control back to the main loop, but right back here.
*/
proc_exit_inprogress = true;
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 187b75ad55d..508149bc42c 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.54 2003/07/24 22:04:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.55 2003/08/04 00:43:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -146,4 +146,3 @@ AttachSharedMemoryAndSemaphores(void)
{
CLOGShmemInit();
}
-
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index 225436ffd93..653dbf89263 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.57 2003/07/24 22:04:09 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.58 2003/08/04 00:43:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -324,11 +324,10 @@ GetSnapshotData(Snapshot snapshot, bool serializable)
* lastBackend would be sufficient. But it seems better to do the
* malloc while not holding the lock, so we can't look at lastBackend.
*
- * if (snapshot->xip != NULL)
- * no need to free and reallocate xip;
+ * if (snapshot->xip != NULL) no need to free and reallocate xip;
*
- * We can reuse the old xip array, because MaxBackends does not change
- * at runtime.
+ * We can reuse the old xip array, because MaxBackends does not change at
+ * runtime.
*/
if (snapshot->xip == NULL)
{
diff --git a/src/backend/storage/lmgr/deadlock.c b/src/backend/storage/lmgr/deadlock.c
index 757f1df2f4f..1106a76c3df 100644
--- a/src/backend/storage/lmgr/deadlock.c
+++ b/src/backend/storage/lmgr/deadlock.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.21 2003/07/24 22:04:13 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.22 2003/08/04 00:43:24 momjian Exp $
*
* Interface:
*
@@ -49,7 +49,7 @@ typedef struct
} WAIT_ORDER;
/*
- * Information saved about each edge in a detected deadlock cycle. This
+ * Information saved about each edge in a detected deadlock cycle. This
* is used to print a diagnostic message upon failure.
*
* Note: because we want to examine this info after releasing the LockMgrLock,
@@ -61,7 +61,7 @@ typedef struct
LOCKTAG locktag; /* ID of awaited lock object */
LOCKMODE lockmode; /* type of lock we're waiting for */
int pid; /* PID of blocked backend */
-} DEADLOCK_INFO;
+} DEADLOCK_INFO;
static bool DeadLockCheckRecurse(PGPROC *proc);
@@ -147,7 +147,7 @@ InitDeadLockChecking(void)
* We need to consider rearranging at most MaxBackends/2 wait queues
* (since it takes at least two waiters in a queue to create a soft
* edge), and the expanded form of the wait queues can't involve more
- * than MaxBackends total waiters. (But avoid palloc(0) if
+ * than MaxBackends total waiters. (But avoid palloc(0) if
* MaxBackends = 1.)
*/
waitOrders = (WAIT_ORDER *)
@@ -221,7 +221,7 @@ DeadLockCheck(PGPROC *proc)
* Call FindLockCycle one more time, to record the correct
* deadlockDetails[] for the basic state with no rearrangements.
*/
- int nSoftEdges;
+ int nSoftEdges;
nWaitOrders = 0;
if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges))
@@ -486,7 +486,7 @@ FindLockCycleRecurse(PGPROC *checkProc,
lockHolders = &(lock->lockHolders);
proclock = (PROCLOCK *) SHMQueueNext(lockHolders, lockHolders,
- offsetof(PROCLOCK, lockLink));
+ offsetof(PROCLOCK, lockLink));
while (proclock)
{
@@ -501,11 +501,11 @@ FindLockCycleRecurse(PGPROC *checkProc,
((1 << lm) & conflictMask) != 0)
{
/* This proc hard-blocks checkProc */
- if (FindLockCycleRecurse(proc, depth+1,
+ if (FindLockCycleRecurse(proc, depth + 1,
softEdges, nSoftEdges))
{
/* fill deadlockDetails[] */
- DEADLOCK_INFO *info = &deadlockDetails[depth];
+ DEADLOCK_INFO *info = &deadlockDetails[depth];
info->locktag = lock->tag;
info->lockmode = checkProc->waitLockMode;
@@ -558,11 +558,11 @@ FindLockCycleRecurse(PGPROC *checkProc,
if (((1 << proc->waitLockMode) & conflictMask) != 0)
{
/* This proc soft-blocks checkProc */
- if (FindLockCycleRecurse(proc, depth+1,
+ if (FindLockCycleRecurse(proc, depth + 1,
softEdges, nSoftEdges))
{
/* fill deadlockDetails[] */
- DEADLOCK_INFO *info = &deadlockDetails[depth];
+ DEADLOCK_INFO *info = &deadlockDetails[depth];
info->locktag = lock->tag;
info->lockmode = checkProc->waitLockMode;
@@ -599,11 +599,11 @@ FindLockCycleRecurse(PGPROC *checkProc,
if (((1 << proc->waitLockMode) & conflictMask) != 0)
{
/* This proc soft-blocks checkProc */
- if (FindLockCycleRecurse(proc, depth+1,
+ if (FindLockCycleRecurse(proc, depth + 1,
softEdges, nSoftEdges))
{
/* fill deadlockDetails[] */
- DEADLOCK_INFO *info = &deadlockDetails[depth];
+ DEADLOCK_INFO *info = &deadlockDetails[depth];
info->locktag = lock->tag;
info->lockmode = checkProc->waitLockMode;
@@ -834,7 +834,6 @@ PrintLockQueue(LOCK *lock, const char *info)
printf("\n");
fflush(stdout);
}
-
#endif
/*
@@ -843,17 +842,17 @@ PrintLockQueue(LOCK *lock, const char *info)
void
DeadLockReport(void)
{
- StringInfoData buf;
- int i;
+ StringInfoData buf;
+ int i;
initStringInfo(&buf);
for (i = 0; i < nDeadlockDetails; i++)
{
- DEADLOCK_INFO *info = &deadlockDetails[i];
+ DEADLOCK_INFO *info = &deadlockDetails[i];
int nextpid;
/* The last proc waits for the first one... */
- if (i < nDeadlockDetails-1)
+ if (i < nDeadlockDetails - 1)
nextpid = info[1].pid;
else
nextpid = deadlockDetails[0].pid;
@@ -900,7 +899,7 @@ RememberSimpleDeadLock(PGPROC *proc1,
LOCK *lock,
PGPROC *proc2)
{
- DEADLOCK_INFO *info = &deadlockDetails[0];
+ DEADLOCK_INFO *info = &deadlockDetails[0];
info->locktag = lock->tag;
info->lockmode = lockmode;
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 2e71f40c09b..9d4c52f75fa 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.124 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.125 2003/08/04 00:43:24 momjian Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
@@ -127,9 +127,9 @@ inline static void
PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
{
if (
- (((PROCLOCK_LOCKMETHOD(*proclockP) == DEFAULT_LOCKMETHOD && Trace_locks)
- || (PROCLOCK_LOCKMETHOD(*proclockP) == USER_LOCKMETHOD && Trace_userlocks))
- && (((LOCK *) MAKE_PTR(proclockP->tag.lock))->tag.relId >= (Oid) Trace_lock_oidmin))
+ (((PROCLOCK_LOCKMETHOD(*proclockP) == DEFAULT_LOCKMETHOD && Trace_locks)
+ || (PROCLOCK_LOCKMETHOD(*proclockP) == USER_LOCKMETHOD && Trace_userlocks))
+ && (((LOCK *) MAKE_PTR(proclockP->tag.lock))->tag.relId >= (Oid) Trace_lock_oidmin))
|| (Trace_lock_table && (((LOCK *) MAKE_PTR(proclockP->tag.lock))->tag.relId == Trace_lock_table))
)
elog(LOG,
@@ -137,8 +137,8 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
where, MAKE_OFFSET(proclockP), proclockP->tag.lock,
PROCLOCK_LOCKMETHOD(*(proclockP)),
proclockP->tag.proc, proclockP->tag.xid,
- proclockP->holding[1], proclockP->holding[2], proclockP->holding[3],
- proclockP->holding[4], proclockP->holding[5], proclockP->holding[6],
+ proclockP->holding[1], proclockP->holding[2], proclockP->holding[3],
+ proclockP->holding[4], proclockP->holding[5], proclockP->holding[6],
proclockP->holding[7], proclockP->nHolding);
}
@@ -321,10 +321,10 @@ LockMethodTableInit(char *tabName,
sprintf(shmemName, "%s (proclock hash)", tabName);
lockMethodTable->proclockHash = ShmemInitHash(shmemName,
- init_table_size,
- max_table_size,
- &info,
- hash_flags);
+ init_table_size,
+ max_table_size,
+ &info,
+ hash_flags);
if (!lockMethodTable->proclockHash)
elog(FATAL, "could not initialize lock table \"%s\"", tabName);
@@ -509,8 +509,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
/*
* Create the hash key for the proclock table.
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding,
- * needed */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding,
+ * needed */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(MyProc);
TransactionIdStore(xid, &proclocktag.xid);
@@ -520,8 +520,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
*/
proclockTable = lockMethodTable->proclockHash;
proclock = (PROCLOCK *) hash_search(proclockTable,
- (void *) &proclocktag,
- HASH_ENTER, &found);
+ (void *) &proclocktag,
+ HASH_ENTER, &found);
if (!proclock)
{
LWLockRelease(masterLock);
@@ -604,8 +604,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
}
/*
- * If this process (under any XID) is a proclock of the lock, also grant
- * myself another one without blocking.
+ * If this process (under any XID) is a proclock of the lock, also
+ * grant myself another one without blocking.
*/
LockCountMyLocks(proclock->tag.lock, MyProc, myHolding);
if (myHolding[lockmode] > 0)
@@ -649,8 +649,8 @@ LockAcquire(LOCKMETHOD lockmethod, LOCKTAG *locktag,
SHMQueueDelete(&proclock->lockLink);
SHMQueueDelete(&proclock->procLink);
proclock = (PROCLOCK *) hash_search(proclockTable,
- (void *) proclock,
- HASH_REMOVE, NULL);
+ (void *) proclock,
+ HASH_REMOVE, NULL);
if (!proclock)
elog(WARNING, "proclock table corrupted");
}
@@ -818,7 +818,7 @@ LockCountMyLocks(SHMEM_OFFSET lockOffset, PGPROC *proc, int *myHolding)
MemSet(myHolding, 0, MAX_LOCKMODES * sizeof(int));
proclock = (PROCLOCK *) SHMQueueNext(procHolders, procHolders,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
while (proclock)
{
@@ -908,9 +908,10 @@ WaitOnLock(LOCKMETHOD lockmethod, LOCKMODE lockmode,
*/
LOCK_PRINT("WaitOnLock: aborting on lock", lock, lockmode);
LWLockRelease(lockMethodTable->masterLock);
+
/*
- * Now that we aren't holding the LockMgrLock, we can give an error
- * report including details about the detected deadlock.
+ * Now that we aren't holding the LockMgrLock, we can give an
+ * error report including details about the detected deadlock.
*/
DeadLockReport();
/* not reached */
@@ -1033,16 +1034,16 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
/*
* Find the proclock entry for this proclock.
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding,
- * needed */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding,
+ * needed */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(MyProc);
TransactionIdStore(xid, &proclocktag.xid);
proclockTable = lockMethodTable->proclockHash;
proclock = (PROCLOCK *) hash_search(proclockTable,
- (void *) &proclocktag,
- HASH_FIND_SAVE, NULL);
+ (void *) &proclocktag,
+ HASH_FIND_SAVE, NULL);
if (!proclock)
{
LWLockRelease(masterLock);
@@ -1143,8 +1144,8 @@ LockRelease(LOCKMETHOD lockmethod, LOCKTAG *locktag,
SHMQueueDelete(&proclock->lockLink);
SHMQueueDelete(&proclock->procLink);
proclock = (PROCLOCK *) hash_search(proclockTable,
- (void *) &proclock,
- HASH_REMOVE_SAVED, NULL);
+ (void *) &proclock,
+ HASH_REMOVE_SAVED, NULL);
if (!proclock)
{
LWLockRelease(masterLock);
@@ -1207,7 +1208,7 @@ LockReleaseAll(LOCKMETHOD lockmethod, PGPROC *proc,
LWLockAcquire(masterLock, LW_EXCLUSIVE);
proclock = (PROCLOCK *) SHMQueueNext(procHolders, procHolders,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
while (proclock)
{
@@ -1295,9 +1296,9 @@ LockReleaseAll(LOCKMETHOD lockmethod, PGPROC *proc,
* remove the proclock entry from the hashtable
*/
proclock = (PROCLOCK *) hash_search(lockMethodTable->proclockHash,
- (void *) proclock,
- HASH_REMOVE,
- NULL);
+ (void *) proclock,
+ HASH_REMOVE,
+ NULL);
if (!proclock)
{
LWLockRelease(masterLock);
@@ -1466,7 +1467,7 @@ DumpLocks(void)
LOCK_PRINT("DumpLocks: waiting on", proc->waitLock, 0);
proclock = (PROCLOCK *) SHMQueueNext(procHolders, procHolders,
- offsetof(PROCLOCK, procLink));
+ offsetof(PROCLOCK, procLink));
while (proclock)
{
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index eebd696c2c2..3bdda5924d7 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.131 2003/07/24 22:04:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.132 2003/08/04 00:43:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,6 +78,7 @@ static bool waitingForSignal = false;
/* Mark these volatile because they can be changed by signal handler */
static volatile bool statement_timeout_active = false;
static volatile bool deadlock_timeout_active = false;
+
/* statement_fin_time is valid only if statement_timeout_active is true */
static struct timeval statement_fin_time;
@@ -571,7 +572,8 @@ ProcSleep(LOCKMETHODTABLE *lockMethodTable,
* up correctly is to call RemoveFromWaitQueue(), but
* we can't do that until we are *on* the wait queue.
* So, set a flag to check below, and break out of
- * loop. Also, record deadlock info for later message.
+ * loop. Also, record deadlock info for later
+ * message.
*/
RememberSimpleDeadLock(MyProc, lockmode, lock, proc);
early_deadlock = true;
@@ -950,11 +952,13 @@ bool
enable_sig_alarm(int delayms, bool is_statement_timeout)
{
#ifdef WIN32
-# warning add Win32 timer
+#warning add Win32 timer
#else
struct timeval fin_time;
+
#ifndef __BEOS__
struct itimerval timeval;
+
#else
bigtime_t time_interval;
#endif
@@ -984,16 +988,16 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
/*
* Begin deadlock timeout with statement-level timeout active
*
- * Here, we want to interrupt at the closer of the two timeout
- * times. If fin_time >= statement_fin_time then we need not
- * touch the existing timer setting; else set up to interrupt
- * at the deadlock timeout time.
+ * Here, we want to interrupt at the closer of the two timeout times.
+ * If fin_time >= statement_fin_time then we need not touch the
+ * existing timer setting; else set up to interrupt at the
+ * deadlock timeout time.
*
* NOTE: in this case it is possible that this routine will be
* interrupted by the previously-set timer alarm. This is okay
- * because the signal handler will do only what it should do according
- * to the state variables. The deadlock checker may get run earlier
- * than normal, but that does no harm.
+ * because the signal handler will do only what it should do
+ * according to the state variables. The deadlock checker may get
+ * run earlier than normal, but that does no harm.
*/
deadlock_timeout_active = true;
if (fin_time.tv_sec > statement_fin_time.tv_sec ||
@@ -1037,6 +1041,7 @@ disable_sig_alarm(bool is_statement_timeout)
#ifdef WIN32
#warning add Win32 timer
#else
+
/*
* Always disable the interrupt if it is active; this avoids being
* interrupted by the signal handler and thereby possibly getting
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index b8766ae6ab7..2441534b46c 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.53 2003/07/24 22:04:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.54 2003/08/04 00:43:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,7 +50,7 @@ PageInit(Page page, Size pageSize, Size specialSize)
* PageHeaderIsValid
* Check that the header fields of a page appear valid.
*
- * This is called when a page has just been read in from disk. The idea is
+ * This is called when a page has just been read in from disk. The idea is
* to cheaply detect trashed pages before we go nuts following bogus item
* pointers, testing invalid transaction identifiers, etc.
*
@@ -135,7 +135,7 @@ PageAddItem(Page page,
ereport(PANIC,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
- phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
+ phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
/*
* Select offsetNumber to place the new item at
@@ -391,8 +391,8 @@ PageRepairFragmentation(Page page, OffsetNumber *unused)
if (totallen > (Size) (pd_special - pd_lower))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted item lengths: total %u, available space %u",
- (unsigned int) totallen, pd_special - pd_lower)));
+ errmsg("corrupted item lengths: total %u, available space %u",
+ (unsigned int) totallen, pd_special - pd_lower)));
/* sort itemIdSortData array into decreasing itemoff order */
qsort((char *) itemidbase, nused, sizeof(struct itemIdSortData),
@@ -472,7 +472,7 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
- phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
+ phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
nline = PageGetMaxOffsetNumber(page);
if ((int) offnum <= 0 || (int) offnum > nline)
@@ -533,7 +533,8 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
*/
if (!PageIsEmpty(page))
{
- int i;
+ int i;
+
nline--; /* there's one less than when we started */
for (i = 1; i <= nline; i++)
{
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index bd46d3dbd52..420413a510b 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.96 2003/07/28 00:09:15 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.97 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -571,12 +571,13 @@ mdnblocks(Relation reln)
segno = 0;
/*
- * Skip through any segments that aren't the last one, to avoid redundant
- * seeks on them. We have previously verified that these segments are
- * exactly RELSEG_SIZE long, and it's useless to recheck that each time.
- * (NOTE: this assumption could only be wrong if another backend has
- * truncated the relation. We rely on higher code levels to handle that
- * scenario by closing and re-opening the md fd.)
+ * Skip through any segments that aren't the last one, to avoid
+ * redundant seeks on them. We have previously verified that these
+ * segments are exactly RELSEG_SIZE long, and it's useless to recheck
+ * that each time. (NOTE: this assumption could only be wrong if
+ * another backend has truncated the relation. We rely on higher code
+ * levels to handle that scenario by closing and re-opening the md
+ * fd.)
*/
while (v->mdfd_chain != (MdfdVec *) NULL)
{
diff --git a/src/backend/tcop/dest.c b/src/backend/tcop/dest.c
index 76990d34f7c..7ea881e84a6 100644
--- a/src/backend/tcop/dest.c
+++ b/src/backend/tcop/dest.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/dest.c,v 1.59 2003/07/22 19:00:11 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/dest.c,v 1.60 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ BeginCommand(const char *commandTag, CommandDest dest)
* CreateDestReceiver - return appropriate receiver function set for dest
*
* Note: a Portal must be specified for destinations Remote, RemoteExecute,
- * and Tuplestore. It can be NULL for the others.
+ * and Tuplestore. It can be NULL for the others.
* ----------------
*/
DestReceiver *
@@ -173,8 +173,9 @@ NullCommand(CommandDest dest)
case RemoteExecute:
/*
- * tell the fe that we saw an empty query string. In protocols
- * before 3.0 this has a useless empty-string message body.
+ * tell the fe that we saw an empty query string. In
+ * protocols before 3.0 this has a useless empty-string
+ * message body.
*/
if (PG_PROTOCOL_MAJOR(FrontendProtocol) >= 3)
pq_putemptymessage('I');
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index b86d657ea25..7ccc3f80c6b 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/fastpath.c,v 1.66 2003/08/01 00:15:22 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/fastpath.c,v 1.67 2003/08/04 00:43:25 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
@@ -53,10 +53,10 @@ struct fp_info
};
-static int16 parse_fcall_arguments(StringInfo msgBuf, struct fp_info *fip,
- FunctionCallInfo fcinfo);
-static int16 parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info *fip,
- FunctionCallInfo fcinfo);
+static int16 parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip,
+ FunctionCallInfo fcinfo);
+static int16 parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info * fip,
+ FunctionCallInfo fcinfo);
/* ----------------
@@ -103,8 +103,8 @@ GetOldFunctionMessage(StringInfo buf)
/* FATAL here since no hope of regaining message sync */
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
}
/* and arg contents */
if (argsize > 0)
@@ -158,8 +158,8 @@ SendFunctionResult(Datum retval, bool isnull, Oid rettype, int16 format)
&typoutput, &typelem, &typisvarlena);
outputstr = DatumGetCString(OidFunctionCall3(typoutput,
retval,
- ObjectIdGetDatum(typelem),
- Int32GetDatum(-1)));
+ ObjectIdGetDatum(typelem),
+ Int32GetDatum(-1)));
pq_sendcountedtext(&buf, outputstr, strlen(outputstr), false);
pfree(outputstr);
}
@@ -174,7 +174,7 @@ SendFunctionResult(Datum retval, bool isnull, Oid rettype, int16 format)
&typsend, &typelem, &typisvarlena);
outputbytes = DatumGetByteaP(OidFunctionCall2(typsend,
retval,
- ObjectIdGetDatum(typelem)));
+ ObjectIdGetDatum(typelem)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
@@ -255,11 +255,11 @@ fetch_fp_info(Oid func_id, struct fp_info * fip)
* and will pass it in msgBuf.
* In old protocol, the passed msgBuf is empty and we must read the
* message here.
- *
+ *
* RETURNS:
* 0 if successful completion, EOF if frontend connection lost.
*
- * Note: All ordinary errors result in ereport(ERROR,...). However,
+ * Note: All ordinary errors result in ereport(ERROR,...). However,
* if we lose the frontend connection there is no one to ereport to,
* and no use in proceeding...
*
@@ -303,15 +303,15 @@ HandleFunctionRequest(StringInfo msgBuf)
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "queries ignored until end of transaction block")));
+ "queries ignored until end of transaction block")));
/*
* Begin parsing the buffer contents.
*/
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
- (void) pq_getmsgstring(msgBuf); /* dummy string */
+ (void) pq_getmsgstring(msgBuf); /* dummy string */
- fid = (Oid) pq_getmsgint(msgBuf, 4); /* function oid */
+ fid = (Oid) pq_getmsgint(msgBuf, 4); /* function oid */
/*
* There used to be a lame attempt at caching lookup info here. Now we
@@ -359,7 +359,7 @@ HandleFunctionRequest(StringInfo msgBuf)
callit = true;
if (fip->flinfo.fn_strict)
{
- int i;
+ int i;
for (i = 0; i < fcinfo.nargs; i++)
{
@@ -394,7 +394,7 @@ HandleFunctionRequest(StringInfo msgBuf)
* is returned.
*/
static int16
-parse_fcall_arguments(StringInfo msgBuf, struct fp_info *fip,
+parse_fcall_arguments(StringInfo msgBuf, struct fp_info * fip,
FunctionCallInfo fcinfo)
{
int nargs;
@@ -447,8 +447,8 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info *fip,
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
@@ -473,11 +473,11 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info *fip,
char *pstring;
getTypeInputInfo(fip->argtypes[i], &typInput, &typElem);
+
/*
- * Since stringinfo.c keeps a trailing null in
- * place even for binary data, the contents of
- * abuf are a valid C string. We have to do
- * encoding conversion before calling the typinput
+ * Since stringinfo.c keeps a trailing null in place even for
+ * binary data, the contents of abuf are a valid C string. We
+ * have to do encoding conversion before calling the typinput
* routine, though.
*/
pstring = (char *)
@@ -528,7 +528,7 @@ parse_fcall_arguments(StringInfo msgBuf, struct fp_info *fip,
* is returned.
*/
static int16
-parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info *fip,
+parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info * fip,
FunctionCallInfo fcinfo)
{
int nargs;
@@ -570,8 +570,8 @@ parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info *fip,
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
@@ -593,8 +593,8 @@ parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info *fip,
if (abuf.cursor != abuf.len)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("incorrect binary data format in function argument %d",
- i + 1)));
+ errmsg("incorrect binary data format in function argument %d",
+ i + 1)));
}
/* Desired result format is always binary in protocol 2.0 */
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 80f2be70ed6..cb835469a0f 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.353 2003/07/29 00:03:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.354 2003/08/04 00:43:25 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
@@ -133,6 +133,7 @@ static bool EchoQuery = false; /* default don't echo */
#ifndef TCOP_DONTUSENEWLINE
static int UseNewLine = 1; /* Use newlines query delimiters (the
* default) */
+
#else
static int UseNewLine = 0; /* Use EOF as query delimiters */
#endif /* TCOP_DONTUSENEWLINE */
@@ -283,8 +284,8 @@ SocketBackend(StringInfo inBuf)
/*
* Validate message type code before trying to read body; if we have
- * lost sync, better to say "command unknown" than to run out of memory
- * because we used garbage as a length word.
+ * lost sync, better to say "command unknown" than to run out of
+ * memory because we used garbage as a length word.
*
* This also gives us a place to set the doing_extended_query_message
* flag as soon as possible.
@@ -300,7 +301,7 @@ SocketBackend(StringInfo inBuf)
{
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
return EOF;
}
}
@@ -327,7 +328,7 @@ SocketBackend(StringInfo inBuf)
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid frontend message type %d", qtype)));
+ errmsg("invalid frontend message type %d", qtype)));
break;
case 'S': /* sync */
@@ -339,7 +340,7 @@ SocketBackend(StringInfo inBuf)
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid frontend message type %d", qtype)));
+ errmsg("invalid frontend message type %d", qtype)));
break;
case 'd': /* copy data */
@@ -350,14 +351,15 @@ SocketBackend(StringInfo inBuf)
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid frontend message type %d", qtype)));
+ errmsg("invalid frontend message type %d", qtype)));
break;
default:
+
/*
- * Otherwise we got garbage from the frontend. We treat this
- * as fatal because we have probably lost message boundary sync,
- * and there's no good way to recover.
+ * Otherwise we got garbage from the frontend. We treat this
+ * as fatal because we have probably lost message boundary
+ * sync, and there's no good way to recover.
*/
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
@@ -409,9 +411,9 @@ ReadCommand(StringInfo inBuf)
* but it is still needed for parsing of SQL function bodies.
*/
List *
-pg_parse_and_rewrite(const char *query_string, /* string to execute */
+pg_parse_and_rewrite(const char *query_string, /* string to execute */
Oid *paramTypes, /* parameter types */
- int numParams) /* number of parameters */
+ int numParams) /* number of parameters */
{
List *raw_parsetree_list;
List *querytree_list;
@@ -518,8 +520,8 @@ pg_rewrite_queries(List *querytree_list)
ResetUsage();
/*
- * rewritten queries are collected in new_list. Note there may be more
- * or fewer than in the original list.
+ * rewritten queries are collected in new_list. Note there may be
+ * more or fewer than in the original list.
*/
foreach(list_item, querytree_list)
{
@@ -558,7 +560,7 @@ pg_rewrite_queries(List *querytree_list)
/* This checks both copyObject() and the equal() routines... */
if (!equal(new_list, querytree_list))
ereport(WARNING,
- (errmsg("copyObject failed to produce an equal parse tree")));
+ (errmsg("copyObject failed to produce an equal parse tree")));
else
querytree_list = new_list;
#endif
@@ -603,7 +605,7 @@ pg_plan_query(Query *querytree)
/* This checks both copyObject() and the equal() routines... */
if (!equal(new_plan, plan))
ereport(WARNING,
- (errmsg("copyObject failed to produce an equal plan tree")));
+ (errmsg("copyObject failed to produce an equal plan tree")));
else
#endif
plan = new_plan;
@@ -671,7 +673,7 @@ pg_plan_queries(List *querytrees, bool needSnapshot)
static void
exec_simple_query(const char *query_string)
{
- CommandDest dest = whereToSendOutput;
+ CommandDest dest = whereToSendOutput;
MemoryContext oldcontext;
List *parsetree_list,
*parsetree_item;
@@ -689,10 +691,10 @@ exec_simple_query(const char *query_string)
pgstat_report_activity(query_string);
/*
- * We use save_log_* so "SET log_duration = true" and
- * "SET log_min_duration_statement = true" don't report incorrect
- * time because gettimeofday() wasn't called.
- * Similarly, log_statement_stats has to be captured once.
+ * We use save_log_* so "SET log_duration = true" and "SET
+ * log_min_duration_statement = true" don't report incorrect time
+ * because gettimeofday() wasn't called. Similarly,
+ * log_statement_stats has to be captured once.
*/
if (save_log_duration || save_log_min_duration_statement > 0)
gettimeofday(&start_t, NULL);
@@ -710,10 +712,10 @@ exec_simple_query(const char *query_string)
start_xact_command();
/*
- * Zap any pre-existing unnamed statement. (While not strictly
- * necessary, it seems best to define simple-Query mode as if it
- * used the unnamed statement and portal; this ensures we recover
- * any storage used by prior unnamed operations.)
+ * Zap any pre-existing unnamed statement. (While not strictly
+ * necessary, it seems best to define simple-Query mode as if it used
+ * the unnamed statement and portal; this ensures we recover any
+ * storage used by prior unnamed operations.)
*/
unnamed_stmt_pstmt = NULL;
if (unnamed_stmt_context)
@@ -756,10 +758,10 @@ exec_simple_query(const char *query_string)
int16 format;
/*
- * Get the command name for use in status display (it also becomes the
- * default completion tag, down inside PortalRun). Set ps_status and
- * do any special start-of-SQL-command processing needed by the
- * destination.
+ * Get the command name for use in status display (it also becomes
+ * the default completion tag, down inside PortalRun). Set
+ * ps_status and do any special start-of-SQL-command processing
+ * needed by the destination.
*/
commandTag = CreateCommandTag(parsetree);
@@ -817,8 +819,8 @@ exec_simple_query(const char *query_string)
CHECK_FOR_INTERRUPTS();
/*
- * Create unnamed portal to run the query or queries in.
- * If there already is one, silently drop it.
+ * Create unnamed portal to run the query or queries in. If there
+ * already is one, silently drop it.
*/
portal = CreatePortal("", true, true);
@@ -836,9 +838,9 @@ exec_simple_query(const char *query_string)
/*
* Select the appropriate output format: text unless we are doing
- * a FETCH from a binary cursor. (Pretty grotty to have to do this
- * here --- but it avoids grottiness in other places. Ah, the joys
- * of backward compatibility...)
+ * a FETCH from a binary cursor. (Pretty grotty to have to do
+ * this here --- but it avoids grottiness in other places. Ah,
+ * the joys of backward compatibility...)
*/
format = 0; /* TEXT is default */
if (IsA(parsetree, FetchStmt))
@@ -851,7 +853,7 @@ exec_simple_query(const char *query_string)
if (PortalIsValid(fportal) &&
(fportal->cursorOptions & CURSOR_OPT_BINARY))
- format = 1; /* BINARY */
+ format = 1; /* BINARY */
}
}
PortalSetResultFormat(portal, 1, &format);
@@ -867,7 +869,8 @@ exec_simple_query(const char *query_string)
MemoryContextSwitchTo(oldcontext);
/*
- * Run the portal to completion, and then drop it (and the receiver).
+ * Run the portal to completion, and then drop it (and the
+ * receiver).
*/
(void) PortalRun(portal,
FETCH_ALL,
@@ -882,30 +885,32 @@ exec_simple_query(const char *query_string)
if (IsA(parsetree, TransactionStmt))
{
/*
- * If this was a transaction control statement, commit it.
- * We will start a new xact command for the next command (if any).
+ * If this was a transaction control statement, commit it. We
+ * will start a new xact command for the next command (if
+ * any).
*/
finish_xact_command();
}
else if (lnext(parsetree_item) == NIL)
{
/*
- * If this is the last parsetree of the query string, close down
- * transaction statement before reporting command-complete. This
- * is so that any end-of-transaction errors are reported before
- * the command-complete message is issued, to avoid confusing
- * clients who will expect either a command-complete message or an
- * error, not one and then the other. But for compatibility with
- * historical Postgres behavior, we do not force a transaction
- * boundary between queries appearing in a single query string.
+ * If this is the last parsetree of the query string, close
+ * down transaction statement before reporting
+ * command-complete. This is so that any end-of-transaction
+ * errors are reported before the command-complete message is
+ * issued, to avoid confusing clients who will expect either a
+ * command-complete message or an error, not one and then the
+ * other. But for compatibility with historical Postgres
+ * behavior, we do not force a transaction boundary between
+ * queries appearing in a single query string.
*/
finish_xact_command();
}
else
{
/*
- * We need a CommandCounterIncrement after every query,
- * except those that start or end a transaction block.
+ * We need a CommandCounterIncrement after every query, except
+ * those that start or end a transaction block.
*/
CommandCounterIncrement();
}
@@ -934,12 +939,13 @@ exec_simple_query(const char *query_string)
QueryContext = NULL;
/*
- * Combine processing here as we need to calculate the query
- * duration in both instances.
+ * Combine processing here as we need to calculate the query duration
+ * in both instances.
*/
if (save_log_duration || save_log_min_duration_statement > 0)
{
- long usecs;
+ long usecs;
+
gettimeofday(&stop_t, NULL);
if (stop_t.tv_usec < start_t.tv_usec)
{
@@ -948,9 +954,9 @@ exec_simple_query(const char *query_string)
}
usecs = (long) (stop_t.tv_sec - start_t.tv_sec) * 1000000 + (long) (stop_t.tv_usec - start_t.tv_usec);
- /*
- * Output a duration_query to the log if the query has exceeded the
- * min duration.
+ /*
+ * Output a duration_query to the log if the query has exceeded
+ * the min duration.
*/
if (usecs >= save_log_min_duration_statement * 1000)
ereport(LOG,
@@ -959,7 +965,7 @@ exec_simple_query(const char *query_string)
(long) (stop_t.tv_usec - start_t.tv_usec),
query_string)));
- /*
+ /*
* If the user is requesting logging of all durations, then log
* that as well.
*/
@@ -984,8 +990,8 @@ exec_simple_query(const char *query_string)
static void
exec_parse_message(const char *query_string, /* string to execute */
const char *stmt_name, /* name for prepared stmt */
- Oid *paramTypes, /* parameter types */
- int numParams) /* number of parameters */
+ Oid *paramTypes, /* parameter types */
+ int numParams) /* number of parameters */
{
MemoryContext oldcontext;
List *parsetree_list;
@@ -1018,16 +1024,16 @@ exec_parse_message(const char *query_string, /* string to execute */
/*
* Switch to appropriate context for constructing parsetrees.
*
- * We have two strategies depending on whether the prepared statement
- * is named or not. For a named prepared statement, we do parsing
- * in MessageContext and copy the finished trees into the prepared
- * statement's private context; then the reset of MessageContext releases
- * temporary space used by parsing and planning. For an unnamed prepared
- * statement, we assume the statement isn't going to hang around long,
- * so getting rid of temp space quickly is probably not worth the costs
- * of copying parse/plan trees. So in this case, we set up a special
- * context for the unnamed statement, and do all the parsing/planning
- * therein.
+ * We have two strategies depending on whether the prepared statement is
+ * named or not. For a named prepared statement, we do parsing in
+ * MessageContext and copy the finished trees into the prepared
+ * statement's private context; then the reset of MessageContext
+ * releases temporary space used by parsing and planning. For an
+ * unnamed prepared statement, we assume the statement isn't going to
+ * hang around long, so getting rid of temp space quickly is probably
+ * not worth the costs of copying parse/plan trees. So in this case,
+ * we set up a special context for the unnamed statement, and do all
+ * the parsing/planning therein.
*/
is_named = (stmt_name[0] != '\0');
if (is_named)
@@ -1064,9 +1070,9 @@ exec_parse_message(const char *query_string, /* string to execute */
parsetree_list = pg_parse_query(query_string);
/*
- * We only allow a single user statement in a prepared statement.
- * This is mainly to keep the protocol simple --- otherwise we'd need
- * to worry about multiple result tupdescs and things like that.
+ * We only allow a single user statement in a prepared statement. This
+ * is mainly to keep the protocol simple --- otherwise we'd need to
+ * worry about multiple result tupdescs and things like that.
*/
if (length(parsetree_list) > 1)
ereport(ERROR,
@@ -1075,8 +1081,8 @@ exec_parse_message(const char *query_string, /* string to execute */
if (parsetree_list != NIL)
{
- Node *parsetree = (Node *) lfirst(parsetree_list);
- int i;
+ Node *parsetree = (Node *) lfirst(parsetree_list);
+ int i;
/*
* Get the command name for possible use in status display.
@@ -1085,10 +1091,10 @@ exec_parse_message(const char *query_string, /* string to execute */
/*
* If we are in an aborted transaction, reject all commands except
- * COMMIT/ROLLBACK. It is important that this test occur before we
- * try to do parse analysis, rewrite, or planning, since all those
- * phases try to do database accesses, which may fail in abort
- * state. (It might be safe to allow some additional utility
+ * COMMIT/ROLLBACK. It is important that this test occur before
+ * we try to do parse analysis, rewrite, or planning, since all
+ * those phases try to do database accesses, which may fail in
+ * abort state. (It might be safe to allow some additional utility
* commands in this state, but not many...)
*/
if (IsAbortedTransactionBlockState())
@@ -1130,13 +1136,13 @@ exec_parse_message(const char *query_string, /* string to execute */
param_list = NIL;
for (i = 0; i < numParams; i++)
{
- Oid ptype = paramTypes[i];
+ Oid ptype = paramTypes[i];
if (ptype == InvalidOid || ptype == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_DATATYPE),
- errmsg("could not determine datatype of parameter $%d",
- i + 1)));
+ errmsg("could not determine datatype of parameter $%d",
+ i + 1)));
param_list = lappendo(param_list, ptype);
}
@@ -1149,7 +1155,7 @@ exec_parse_message(const char *query_string, /* string to execute */
}
else
{
- /* Empty input string. This is legal. */
+ /* Empty input string. This is legal. */
commandTag = NULL;
querytree_list = NIL;
plantree_list = NIL;
@@ -1193,9 +1199,10 @@ exec_parse_message(const char *query_string, /* string to execute */
QueryContext = NULL;
/*
- * We do NOT close the open transaction command here; that only happens
- * when the client sends Sync. Instead, do CommandCounterIncrement just
- * in case something happened during parse/plan.
+ * We do NOT close the open transaction command here; that only
+ * happens when the client sends Sync. Instead, do
+ * CommandCounterIncrement just in case something happened during
+ * parse/plan.
*/
CommandCounterIncrement();
@@ -1236,9 +1243,9 @@ exec_bind_message(StringInfo input_message)
set_ps_display("BIND");
/*
- * Start up a transaction command so we can call functions etc.
- * (Note that this will normally change current memory context.)
- * Nothing happens if we are already in one.
+ * Start up a transaction command so we can call functions etc. (Note
+ * that this will normally change current memory context.) Nothing
+ * happens if we are already in one.
*/
start_xact_command();
@@ -1264,8 +1271,8 @@ exec_bind_message(StringInfo input_message)
if (numPFormats > 1 && numPFormats != numParams)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("bind message has %d parameter formats but %d parameters",
- numPFormats, numParams)));
+ errmsg("bind message has %d parameter formats but %d parameters",
+ numPFormats, numParams)));
/* Find prepared statement */
if (stmt_name[0] != '\0')
@@ -1277,14 +1284,14 @@ exec_bind_message(StringInfo input_message)
if (!pstmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
- errmsg("unnamed prepared statement does not exist")));
+ errmsg("unnamed prepared statement does not exist")));
}
if (numParams != length(pstmt->argtype_list))
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("bind message supplies %d parameters, but prepared statement \"%s\" requires %d",
- numParams, stmt_name, length(pstmt->argtype_list))));
+ numParams, stmt_name, length(pstmt->argtype_list))));
/*
* Create the portal. Allow silent replacement of an existing portal
@@ -1305,13 +1312,14 @@ exec_bind_message(StringInfo input_message)
/*
* Fetch parameters, if any, and store in the portal's memory context.
*
- * In an aborted transaction, we can't risk calling user-defined functions,
- * but we can't fail to Bind either, so bind all parameters to null values.
+ * In an aborted transaction, we can't risk calling user-defined
+ * functions, but we can't fail to Bind either, so bind all parameters
+ * to null values.
*/
if (numParams > 0)
{
- bool isaborted = IsAbortedTransactionBlockState();
- List *l;
+ bool isaborted = IsAbortedTransactionBlockState();
+ List *l;
MemoryContext oldContext;
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
@@ -1340,25 +1348,25 @@ exec_bind_message(StringInfo input_message)
}
else
{
- int16 pformat;
+ int16 pformat;
StringInfoData pbuf;
- char csave;
+ char csave;
if (numPFormats > 1)
pformat = pformats[i];
else if (numPFormats > 0)
pformat = pformats[0];
else
- pformat = 0; /* default = text */
+ pformat = 0; /* default = text */
/*
- * Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the
- * message buffer. We assume we can scribble on the
- * message buffer so as to maintain the convention that
- * StringInfos have a trailing null. This is grotty but
- * is a big win when dealing with very large parameter
- * strings.
+ * Rather than copying data around, we just set up a
+ * phony StringInfo pointing to the correct portion of
+ * the message buffer. We assume we can scribble on
+ * the message buffer so as to maintain the convention
+ * that StringInfos have a trailing null. This is
+ * grotty but is a big win when dealing with very
+ * large parameter strings.
*/
pbuf.data = (char *) pvalue;
pbuf.maxlen = plength + 1;
@@ -1375,9 +1383,10 @@ exec_bind_message(StringInfo input_message)
char *pstring;
getTypeInputInfo(ptype, &typInput, &typElem);
+
/*
- * We have to do encoding conversion before calling
- * the typinput routine.
+ * We have to do encoding conversion before
+ * calling the typinput routine.
*/
pstring = (char *)
pg_client_to_server((unsigned char *) pbuf.data,
@@ -1396,7 +1405,10 @@ exec_bind_message(StringInfo input_message)
Oid typReceive;
Oid typElem;
- /* Call the parameter type's binary input converter */
+ /*
+ * Call the parameter type's binary input
+ * converter
+ */
getTypeBinaryInputInfo(ptype, &typReceive, &typElem);
params[i].value =
@@ -1409,7 +1421,7 @@ exec_bind_message(StringInfo input_message)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("incorrect binary data format in bind parameter %d",
- i + 1)));
+ i + 1)));
}
else
{
@@ -1474,7 +1486,7 @@ exec_bind_message(StringInfo input_message)
static void
exec_execute_message(const char *portal_name, long max_rows)
{
- CommandDest dest;
+ CommandDest dest;
DestReceiver *receiver;
Portal portal;
bool is_trans_stmt = false;
@@ -1494,7 +1506,8 @@ exec_execute_message(const char *portal_name, long max_rows)
errmsg("portal \"%s\" does not exist", portal_name)));
/*
- * If the original query was a null string, just return EmptyQueryResponse.
+ * If the original query was a null string, just return
+ * EmptyQueryResponse.
*/
if (portal->commandTag == NULL)
{
@@ -1521,7 +1534,7 @@ exec_execute_message(const char *portal_name, long max_rows)
/* Check for transaction-control commands */
if (length(portal->parseTrees) == 1)
{
- Query *query = (Query *) lfirst(portal->parseTrees);
+ Query *query = (Query *) lfirst(portal->parseTrees);
if (query->commandType == CMD_UTILITY &&
query->utilityStmt != NULL &&
@@ -1537,14 +1550,15 @@ exec_execute_message(const char *portal_name, long max_rows)
}
/*
- * Create dest receiver in MessageContext (we don't want it in transaction
- * context, because that may get deleted if portal contains VACUUM).
+ * Create dest receiver in MessageContext (we don't want it in
+ * transaction context, because that may get deleted if portal
+ * contains VACUUM).
*/
receiver = CreateDestReceiver(dest, portal);
/*
- * Ensure we are in a transaction command (this should normally be
- * the case already due to prior BIND).
+ * Ensure we are in a transaction command (this should normally be the
+ * case already due to prior BIND).
*/
start_xact_command();
@@ -1558,7 +1572,7 @@ exec_execute_message(const char *portal_name, long max_rows)
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "queries ignored until end of transaction block")));
+ "queries ignored until end of transaction block")));
}
/* Check for cancel signal before we start execution */
@@ -1583,16 +1597,17 @@ exec_execute_message(const char *portal_name, long max_rows)
if (is_trans_stmt)
{
/*
- * If this was a transaction control statement, commit it. We will
- * start a new xact command for the next command (if any).
+ * If this was a transaction control statement, commit it. We
+ * will start a new xact command for the next command (if
+ * any).
*/
finish_xact_command();
}
else
{
/*
- * We need a CommandCounterIncrement after every query,
- * except those that start or end a transaction block.
+ * We need a CommandCounterIncrement after every query, except
+ * those that start or end a transaction block.
*/
CommandCounterIncrement();
}
@@ -1633,7 +1648,7 @@ exec_describe_statement_message(const char *stmt_name)
if (!pstmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_PSTATEMENT),
- errmsg("unnamed prepared statement does not exist")));
+ errmsg("unnamed prepared statement does not exist")));
}
if (whereToSendOutput != Remote)
@@ -1642,7 +1657,7 @@ exec_describe_statement_message(const char *stmt_name)
/*
* First describe the parameters...
*/
- pq_beginmessage(&buf, 't'); /* parameter description message type */
+ pq_beginmessage(&buf, 't'); /* parameter description message type */
pq_sendint(&buf, length(pstmt->argtype_list), 2);
foreach(l, pstmt->argtype_list)
@@ -1659,7 +1674,7 @@ exec_describe_statement_message(const char *stmt_name)
tupdesc = FetchPreparedStatementResultDesc(pstmt);
if (tupdesc)
{
- List *targetlist;
+ List *targetlist;
if (ChoosePortalStrategy(pstmt->query_list) == PORTAL_ONE_SELECT)
targetlist = ((Query *) lfirst(pstmt->query_list))->targetList;
@@ -1693,7 +1708,7 @@ exec_describe_portal_message(const char *portal_name)
if (portal->tupDesc)
{
- List *targetlist;
+ List *targetlist;
if (portal->strategy == PORTAL_ONE_SELECT)
targetlist = ((Query *) lfirst(portal->parseTrees))->targetList;
@@ -1768,19 +1783,21 @@ void
quickdie(SIGNAL_ARGS)
{
PG_SETMASK(&BlockSig);
+
/*
- * Ideally this should be ereport(FATAL), but then we'd not get control
- * back (perhaps could fix by doing local sigsetjmp?)
+ * Ideally this should be ereport(FATAL), but then we'd not get
+ * control back (perhaps could fix by doing local sigsetjmp?)
*/
ereport(WARNING,
(errcode(ERRCODE_CRASH_SHUTDOWN),
- errmsg("terminating connection due to crash of another backend"),
- errdetail("The postmaster has commanded this backend to roll back"
- " the current transaction and exit, because another"
- " backend exited abnormally and possibly corrupted"
- " shared memory."),
+ errmsg("terminating connection due to crash of another backend"),
+ errdetail("The postmaster has commanded this backend to roll back"
+ " the current transaction and exit, because another"
+ " backend exited abnormally and possibly corrupted"
+ " shared memory."),
errhint("In a moment you should be able to reconnect to the"
" database and repeat your query.")));
+
/*
* DO NOT proc_exit() -- we're here because shared memory may be
* corrupted, so we don't want to try to clean up our transaction.
@@ -1894,9 +1911,9 @@ FloatExceptionHandler(SIGNAL_ARGS)
ereport(ERROR,
(errcode(ERRCODE_FLOATING_POINT_EXCEPTION),
errmsg("floating-point exception"),
- errdetail("An invalid floating-point operation was signaled. "
- "This probably means an out-of-range result or an "
- "invalid operation, such as division by zero.")));
+ errdetail("An invalid floating-point operation was signaled. "
+ "This probably means an out-of-range result or an "
+ "invalid operation, such as division by zero.")));
}
/* SIGHUP: set flag to re-read config file at next convenient time */
@@ -1929,7 +1946,7 @@ ProcessInterrupts(void)
DisableNotifyInterrupt();
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
- errmsg("terminating connection due to administrator command")));
+ errmsg("terminating connection due to administrator command")));
}
if (QueryCancelPending)
{
@@ -1999,7 +2016,8 @@ PostgresMain(int argc, char *argv[], const char *username)
bool secure;
int errs = 0;
int debug_flag = 0;
- GucContext ctx, debug_context;
+ GucContext ctx,
+ debug_context;
GucSource gucsource;
char *tmp;
int firstchar;
@@ -2049,7 +2067,7 @@ PostgresMain(int argc, char *argv[], const char *username)
Noversion = false;
EchoQuery = false;
- if (!IsUnderPostmaster /* when exec || ExecBackend*/)
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
{
InitializeGUCOptions();
potential_DataDir = getenv("PGDATA");
@@ -2114,29 +2132,33 @@ PostgresMain(int argc, char *argv[], const char *username)
case 'd': /* debug level */
{
/*
- * Client option can't decrease debug level.
- * We have to do the test here because we group priv and client
- * set GUC calls below, after we know the final debug value.
- */
+ * Client option can't decrease debug level. We have
+ * to do the test here because we group priv and
+ * client set GUC calls below, after we know the final
+ * debug value.
+ */
if (ctx != PGC_BACKEND || atoi(optarg) > debug_flag)
{
debug_flag = atoi(optarg);
- debug_context = ctx; /* save context for use below */
+ debug_context = ctx; /* save context for use
+ * below */
/* Set server debugging level. */
if (debug_flag != 0)
{
char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
-
+
sprintf(debugstr, "debug%s", optarg);
SetConfigOption("log_min_messages", debugstr, ctx, gucsource);
pfree(debugstr);
-
+
}
else
+
/*
* -d0 allows user to prevent postmaster debug
- * from propagating to backend. It would be nice
- * to set it to the postgresql.conf value here.
+ * from propagating to backend. It would be
+ * nice to set it to the postgresql.conf value
+ * here.
*/
SetConfigOption("log_min_messages", "notice",
ctx, gucsource);
@@ -2245,17 +2267,19 @@ PostgresMain(int argc, char *argv[], const char *username)
if (secure)
{
#ifdef EXEC_BACKEND
- char *p;
- int i;
- int PMcanAcceptConnections; /* will eventually be global or static, when fork */
-
+ char *p;
+ int i;
+ int PMcanAcceptConnections; /* will eventually be
+ * global or static,
+ * when fork */
+
sscanf(optarg, "%d,%d,%d,%p,", &MyProcPort->sock, &PMcanAcceptConnections,
- &UsedShmemSegID, &UsedShmemSegAddr);
+ &UsedShmemSegID, &UsedShmemSegAddr);
/* Grab dbname as last param */
- for (i = 0, p = optarg-1; i < 4 && p; i++)
- p = strchr(p+1, ',');
+ for (i = 0, p = optarg - 1; i < 4 && p; i++)
+ p = strchr(p + 1, ',');
if (i == 4 && p)
- dbname = strdup(p+1);
+ dbname = strdup(p + 1);
#else
dbname = strdup(optarg);
#endif
@@ -2411,11 +2435,12 @@ PostgresMain(int argc, char *argv[], const char *username)
SetConfigOption("debug_print_rewritten", "true", debug_context, gucsource);
/*
- * Process any additional GUC variable settings passed in startup packet.
+ * Process any additional GUC variable settings passed in startup
+ * packet.
*/
if (MyProcPort != NULL)
{
- List *gucopts = MyProcPort->guc_options;
+ List *gucopts = MyProcPort->guc_options;
while (gucopts)
{
@@ -2481,7 +2506,7 @@ PostgresMain(int argc, char *argv[], const char *username)
pqsignal(SIGINT, StatementCancelHandler); /* cancel current query */
pqsignal(SIGTERM, die); /* cancel current query and exit */
pqsignal(SIGQUIT, quickdie); /* hard crash time */
- pqsignal(SIGALRM, handle_sig_alarm); /* timeout conditions */
+ pqsignal(SIGALRM, handle_sig_alarm); /* timeout conditions */
/*
* Ignore failure to write to frontend. Note: if frontend closes
@@ -2626,7 +2651,7 @@ PostgresMain(int argc, char *argv[], const char *username)
if (!IsUnderPostmaster)
{
puts("\nPOSTGRES backend interactive interface ");
- puts("$Revision: 1.353 $ $Date: 2003/07/29 00:03:18 $\n");
+ puts("$Revision: 1.354 $ $Date: 2003/08/04 00:43:25 $\n");
}
/*
@@ -2664,15 +2689,15 @@ PostgresMain(int argc, char *argv[], const char *username)
*
* Make sure we're not interrupted while cleaning up. Also forget
* any pending QueryCancel request, since we're aborting anyway.
- * Force InterruptHoldoffCount to a known state in case we ereport'd
- * from inside a holdoff section.
+ * Force InterruptHoldoffCount to a known state in case we
+ * ereport'd from inside a holdoff section.
*/
ImmediateInterruptOK = false;
QueryCancelPending = false;
InterruptHoldoffCount = 1;
CritSectionCount = 0; /* should be unnecessary, but... */
disable_sig_alarm(true);
- QueryCancelPending = false; /* again in case timeout occurred */
+ QueryCancelPending = false; /* again in case timeout occurred */
DisableNotifyInterrupt();
debug_query_string = NULL;
@@ -2706,8 +2731,8 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* If we were handling an extended-query-protocol message,
- * initiate skip till next Sync. This also causes us not
- * to issue ReadyForQuery (until we get Sync).
+ * initiate skip till next Sync. This also causes us not to issue
+ * ReadyForQuery (until we get Sync).
*/
if (doing_extended_query_message)
ignore_till_sync = true;
@@ -2732,8 +2757,8 @@ PostgresMain(int argc, char *argv[], const char *username)
for (;;)
{
/*
- * At top of loop, reset extended-query-message flag, so that
- * any errors encountered in "idle" state don't provoke skip.
+ * At top of loop, reset extended-query-message flag, so that any
+ * errors encountered in "idle" state don't provoke skip.
*/
doing_extended_query_message = false;
@@ -2815,7 +2840,8 @@ PostgresMain(int argc, char *argv[], const char *username)
}
/*
- * (6) process the command. But ignore it if we're skipping till Sync.
+ * (6) process the command. But ignore it if we're skipping till
+ * Sync.
*/
if (ignore_till_sync && firstchar != EOF)
continue;
@@ -2847,7 +2873,7 @@ PostgresMain(int argc, char *argv[], const char *username)
numParams = pq_getmsgint(input_message, 2);
if (numParams > 0)
{
- int i;
+ int i;
paramTypes = (Oid *) palloc(numParams * sizeof(Oid));
for (i = 0; i < numParams; i++)
@@ -2861,9 +2887,10 @@ PostgresMain(int argc, char *argv[], const char *username)
break;
case 'B': /* bind */
+
/*
- * this message is complex enough that it seems best to put
- * the field extraction out-of-line
+ * this message is complex enough that it seems best to
+ * put the field extraction out-of-line
*/
exec_bind_message(input_message);
break;
@@ -2871,7 +2898,7 @@ PostgresMain(int argc, char *argv[], const char *username)
case 'E': /* execute */
{
const char *portal_name;
- int max_rows;
+ int max_rows;
portal_name = pq_getmsgstring(input_message);
max_rows = pq_getmsgint(input_message, 4);
@@ -2911,9 +2938,9 @@ PostgresMain(int argc, char *argv[], const char *username)
send_rfq = true;
break;
- case 'C': /* close */
+ case 'C': /* close */
{
- int close_type;
+ int close_type;
const char *close_target;
close_type = pq_getmsgbyte(input_message);
@@ -2949,19 +2976,19 @@ PostgresMain(int argc, char *argv[], const char *username)
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid CLOSE message subtype %d",
- close_type)));
+ errmsg("invalid CLOSE message subtype %d",
+ close_type)));
break;
}
if (whereToSendOutput == Remote)
- pq_putemptymessage('3'); /* CloseComplete */
+ pq_putemptymessage('3'); /* CloseComplete */
}
break;
case 'D': /* describe */
{
- int describe_type;
+ int describe_type;
const char *describe_target;
describe_type = pq_getmsgbyte(input_message);
@@ -2979,20 +3006,20 @@ PostgresMain(int argc, char *argv[], const char *username)
default:
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid DESCRIBE message subtype %d",
- describe_type)));
+ errmsg("invalid DESCRIBE message subtype %d",
+ describe_type)));
break;
}
}
break;
- case 'H': /* flush */
+ case 'H': /* flush */
pq_getmsgend(input_message);
if (whereToSendOutput == Remote)
pq_flush();
break;
- case 'S': /* sync */
+ case 'S': /* sync */
pq_getmsgend(input_message);
finish_xact_command();
send_rfq = true;
@@ -3007,8 +3034,8 @@ PostgresMain(int argc, char *argv[], const char *username)
case EOF:
/*
- * Reset whereToSendOutput to prevent ereport from attempting
- * to send any more messages to client.
+ * Reset whereToSendOutput to prevent ereport from
+ * attempting to send any more messages to client.
*/
if (whereToSendOutput == Remote)
whereToSendOutput = None;
@@ -3022,12 +3049,13 @@ PostgresMain(int argc, char *argv[], const char *username)
*/
proc_exit(0);
- case 'd': /* copy data */
- case 'c': /* copy done */
- case 'f': /* copy fail */
+ case 'd': /* copy data */
+ case 'c': /* copy done */
+ case 'f': /* copy fail */
+
/*
- * Accept but ignore these messages, per protocol spec;
- * we probably got here because a COPY failed, and the
+ * Accept but ignore these messages, per protocol spec; we
+ * probably got here because a COPY failed, and the
* frontend is still sending data.
*/
break;
@@ -3119,9 +3147,9 @@ ShowUsage(const char *title)
(long) (elapse_t.tv_sec - Save_t.tv_sec),
(long) (elapse_t.tv_usec - Save_t.tv_usec),
(long) (r.ru_utime.tv_sec - Save_r.ru_utime.tv_sec),
- (long) (r.ru_utime.tv_usec - Save_r.ru_utime.tv_usec),
+ (long) (r.ru_utime.tv_usec - Save_r.ru_utime.tv_usec),
(long) (r.ru_stime.tv_sec - Save_r.ru_stime.tv_sec),
- (long) (r.ru_stime.tv_usec - Save_r.ru_stime.tv_usec));
+ (long) (r.ru_stime.tv_usec - Save_r.ru_stime.tv_usec));
appendStringInfo(&str,
"!\t[%ld.%06ld user %ld.%06ld sys total]\n",
(long) user.tv_sec,
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 2f6ed0eda1d..0626ac5e8f4 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/pquery.c,v 1.68 2003/08/01 17:57:42 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/pquery.c,v 1.69 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,18 +24,18 @@
static uint32 RunFromStore(Portal portal, ScanDirection direction, long count,
- DestReceiver *dest);
+ DestReceiver *dest);
static long PortalRunSelect(Portal portal, bool forward, long count,
- DestReceiver *dest);
+ DestReceiver *dest);
static void PortalRunUtility(Portal portal, Query *query,
- DestReceiver *dest, char *completionTag);
+ DestReceiver *dest, char *completionTag);
static void PortalRunMulti(Portal portal,
- DestReceiver *dest, DestReceiver *altdest,
- char *completionTag);
+ DestReceiver *dest, DestReceiver *altdest,
+ char *completionTag);
static long DoPortalRunFetch(Portal portal,
- FetchDirection fdirection,
- long count,
- DestReceiver *dest);
+ FetchDirection fdirection,
+ long count,
+ DestReceiver *dest);
static void DoPortalRewind(Portal portal);
@@ -56,7 +56,7 @@ CreateQueryDesc(Query *parsetree,
qd->plantree = plantree; /* plan */
qd->dest = dest; /* output dest */
qd->params = params; /* parameter values passed into query */
- qd->doInstrument = doInstrument; /* instrumentation wanted? */
+ qd->doInstrument = doInstrument; /* instrumentation wanted? */
/* null these fields until set by ExecutorStart */
qd->tupDesc = NULL;
@@ -156,7 +156,7 @@ ProcessQuery(Query *parsetree,
else
lastOid = InvalidOid;
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
- "INSERT %u %u", lastOid, queryDesc->estate->es_processed);
+ "INSERT %u %u", lastOid, queryDesc->estate->es_processed);
break;
case CMD_UPDATE:
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
@@ -195,14 +195,12 @@ ChoosePortalStrategy(List *parseTrees)
if (length(parseTrees) == 1)
{
- Query *query = (Query *) lfirst(parseTrees);
+ Query *query = (Query *) lfirst(parseTrees);
if (query->commandType == CMD_SELECT &&
query->canSetTag &&
query->into == NULL)
- {
strategy = PORTAL_ONE_SELECT;
- }
else if (query->commandType == CMD_UTILITY &&
query->canSetTag &&
query->utilityStmt != NULL)
@@ -233,8 +231,8 @@ PortalStart(Portal portal, ParamListInfo params)
QueryDesc *queryDesc;
AssertArg(PortalIsValid(portal));
- AssertState(portal->queryContext != NULL); /* query defined? */
- AssertState(!portal->portalReady); /* else extra PortalStart */
+ AssertState(portal->queryContext != NULL); /* query defined? */
+ AssertState(!portal->portalReady); /* else extra PortalStart */
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
@@ -252,31 +250,37 @@ PortalStart(Portal portal, ParamListInfo params)
switch (portal->strategy)
{
case PORTAL_ONE_SELECT:
+
/*
* Must set query snapshot before starting executor.
*/
SetQuerySnapshot();
+
/*
* Create QueryDesc in portal's context; for the moment, set
* the destination to None.
*/
queryDesc = CreateQueryDesc((Query *) lfirst(portal->parseTrees),
- (Plan *) lfirst(portal->planTrees),
+ (Plan *) lfirst(portal->planTrees),
None_Receiver,
params,
false);
+
/*
* Call ExecStart to prepare the plan for execution
*/
ExecutorStart(queryDesc, false);
+
/*
* This tells PortalCleanup to shut down the executor
*/
portal->queryDesc = queryDesc;
+
/*
* Remember tuple descriptor (computed by ExecutorStart)
*/
portal->tupDesc = queryDesc->tupDesc;
+
/*
* Reset cursor position data to "start of query"
*/
@@ -287,12 +291,14 @@ PortalStart(Portal portal, ParamListInfo params)
break;
case PORTAL_UTIL_SELECT:
+
/*
* We don't set query snapshot here, because PortalRunUtility
* will take care of it.
*/
portal->tupDesc =
UtilityTupleDescriptor(((Query *) lfirst(portal->parseTrees))->utilityStmt);
+
/*
* Reset cursor position data to "start of query"
*/
@@ -346,7 +352,8 @@ PortalSetResultFormat(Portal portal, int nFormats, int16 *formats)
errmsg("bind message has %d result formats but query has %d columns",
nFormats, natts)));
memcpy(portal->formats, formats, natts * sizeof(int16));
- } else if (nFormats > 0)
+ }
+ else if (nFormats > 0)
{
/* single format specified, use for all columns */
int16 format1 = formats[0];
@@ -393,7 +400,7 @@ PortalRun(Portal portal, long count,
MemoryContext oldContext;
AssertArg(PortalIsValid(portal));
- AssertState(portal->portalReady); /* else no PortalStart */
+ AssertState(portal->portalReady); /* else no PortalStart */
/* Initialize completion tag to empty string */
if (completionTag)
@@ -405,7 +412,7 @@ PortalRun(Portal portal, long count,
if (portal->portalDone)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("portal \"%s\" cannot be run anymore", portal->name)));
+ errmsg("portal \"%s\" cannot be run anymore", portal->name)));
if (portal->portalActive)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
@@ -429,6 +436,7 @@ PortalRun(Portal portal, long count,
/* we know the query is supposed to set the tag */
if (completionTag && portal->commandTag)
strcpy(completionTag, portal->commandTag);
+
/*
* Since it's a forward fetch, say DONE iff atEnd is now true.
*/
@@ -436,6 +444,7 @@ PortalRun(Portal portal, long count,
break;
case PORTAL_UTIL_SELECT:
+
/*
* If we have not yet run the utility statement, do so,
* storing its results in the portal's tuplestore.
@@ -451,16 +460,19 @@ PortalRun(Portal portal, long count,
(*treceiver->destroy) (treceiver);
portal->portalUtilReady = true;
}
+
/*
* Now fetch desired portion of results.
*/
(void) PortalRunSelect(portal, true, count, dest);
+
/*
* We know the query is supposed to set the tag; we assume
* only the default tag is needed.
*/
if (completionTag && portal->commandTag)
strcpy(completionTag, portal->commandTag);
+
/*
* Since it's a forward fetch, say DONE iff atEnd is now true.
*/
@@ -518,8 +530,8 @@ PortalRunSelect(Portal portal,
uint32 nprocessed;
/*
- * NB: queryDesc will be NULL if we are fetching from a held cursor
- * or a completed utility query; can't use it in that path.
+ * NB: queryDesc will be NULL if we are fetching from a held cursor or
+ * a completed utility query; can't use it in that path.
*/
queryDesc = PortalGetQueryDesc(portal);
@@ -527,7 +539,7 @@ PortalRunSelect(Portal portal,
Assert(queryDesc || portal->holdStore);
/*
- * Force the queryDesc destination to the right thing. This supports
+ * Force the queryDesc destination to the right thing. This supports
* MOVE, for example, which will pass in dest = None. This is okay to
* change as long as we do it on every fetch. (The Executor must not
* assume that dest never changes.)
@@ -567,13 +579,13 @@ PortalRunSelect(Portal portal,
if (direction != NoMovementScanDirection)
{
- long oldPos;
+ long oldPos;
if (nprocessed > 0)
- portal->atStart = false; /* OK to go backward now */
+ portal->atStart = false; /* OK to go backward now */
if (count == 0 ||
(unsigned long) nprocessed < (unsigned long) count)
- portal->atEnd = true; /* we retrieved 'em all */
+ portal->atEnd = true; /* we retrieved 'em all */
oldPos = portal->portalPos;
portal->portalPos += nprocessed;
/* portalPos doesn't advance when we fall off the end */
@@ -610,19 +622,19 @@ PortalRunSelect(Portal portal,
{
if (nprocessed > 0 && portal->atEnd)
{
- portal->atEnd = false; /* OK to go forward now */
- portal->portalPos++; /* adjust for endpoint case */
+ portal->atEnd = false; /* OK to go forward now */
+ portal->portalPos++; /* adjust for endpoint case */
}
if (count == 0 ||
(unsigned long) nprocessed < (unsigned long) count)
{
- portal->atStart = true; /* we retrieved 'em all */
+ portal->atStart = true; /* we retrieved 'em all */
portal->portalPos = 0;
portal->posOverflow = false;
}
else
{
- long oldPos;
+ long oldPos;
oldPos = portal->portalPos;
portal->portalPos -= nprocessed;
@@ -662,13 +674,13 @@ RunFromStore(Portal portal, ScanDirection direction, long count,
}
else
{
- bool forward = (direction == ForwardScanDirection);
+ bool forward = (direction == ForwardScanDirection);
for (;;)
{
MemoryContext oldcontext;
- HeapTuple tup;
- bool should_free;
+ HeapTuple tup;
+ bool should_free;
oldcontext = MemoryContextSwitchTo(portal->holdContext);
@@ -686,9 +698,9 @@ RunFromStore(Portal portal, ScanDirection direction, long count,
pfree(tup);
/*
- * check our tuple count.. if we've processed the proper number
- * then quit, else loop again and process more tuples. Zero
- * count means no limit.
+ * check our tuple count.. if we've processed the proper
+ * number then quit, else loop again and process more tuples.
+ * Zero count means no limit.
*/
current_tuple_count++;
if (count && count == current_tuple_count)
@@ -709,35 +721,34 @@ static void
PortalRunUtility(Portal portal, Query *query,
DestReceiver *dest, char *completionTag)
{
- Node *utilityStmt = query->utilityStmt;
+ Node *utilityStmt = query->utilityStmt;
elog(DEBUG3, "ProcessUtility");
/*
- * Set snapshot if utility stmt needs one. Most reliable
- * way to do this seems to be to enumerate those that do not
- * need one; this is a short list. Transaction control,
- * LOCK, and SET must *not* set a snapshot since they need
- * to be executable at the start of a serializable transaction
- * without freezing a snapshot. By extension we allow SHOW
- * not to set a snapshot. The other stmts listed are just
- * efficiency hacks. Beware of listing anything that can
- * modify the database --- if, say, it has to update an
- * index with expressions that invoke user-defined functions,
- * then it had better have a snapshot.
+ * Set snapshot if utility stmt needs one. Most reliable way to do
+ * this seems to be to enumerate those that do not need one; this is a
+ * short list. Transaction control, LOCK, and SET must *not* set a
+ * snapshot since they need to be executable at the start of a
+ * serializable transaction without freezing a snapshot. By extension
+ * we allow SHOW not to set a snapshot. The other stmts listed are
+ * just efficiency hacks. Beware of listing anything that can modify
+ * the database --- if, say, it has to update an index with
+ * expressions that invoke user-defined functions, then it had better
+ * have a snapshot.
*/
- if (! (IsA(utilityStmt, TransactionStmt) ||
- IsA(utilityStmt, LockStmt) ||
- IsA(utilityStmt, VariableSetStmt) ||
- IsA(utilityStmt, VariableShowStmt) ||
- IsA(utilityStmt, VariableResetStmt) ||
- IsA(utilityStmt, ConstraintsSetStmt) ||
- /* efficiency hacks from here down */
- IsA(utilityStmt, FetchStmt) ||
- IsA(utilityStmt, ListenStmt) ||
- IsA(utilityStmt, NotifyStmt) ||
- IsA(utilityStmt, UnlistenStmt) ||
- IsA(utilityStmt, CheckPointStmt)))
+ if (!(IsA(utilityStmt, TransactionStmt) ||
+ IsA(utilityStmt, LockStmt) ||
+ IsA(utilityStmt, VariableSetStmt) ||
+ IsA(utilityStmt, VariableShowStmt) ||
+ IsA(utilityStmt, VariableResetStmt) ||
+ IsA(utilityStmt, ConstraintsSetStmt) ||
+ /* efficiency hacks from here down */
+ IsA(utilityStmt, FetchStmt) ||
+ IsA(utilityStmt, ListenStmt) ||
+ IsA(utilityStmt, NotifyStmt) ||
+ IsA(utilityStmt, UnlistenStmt) ||
+ IsA(utilityStmt, CheckPointStmt)))
SetQuerySnapshot();
if (query->canSetTag)
@@ -745,7 +756,7 @@ PortalRunUtility(Portal portal, Query *query,
/* utility statement can override default tag string */
ProcessUtility(utilityStmt, dest, completionTag);
if (completionTag && completionTag[0] == '\0' && portal->commandTag)
- strcpy(completionTag, portal->commandTag); /* use the default */
+ strcpy(completionTag, portal->commandTag); /* use the default */
}
else
{
@@ -770,14 +781,14 @@ PortalRunMulti(Portal portal,
List *querylist_item;
/*
- * If the destination is RemoteExecute, change to None. The reason
- * is that the client won't be expecting any tuples, and indeed has no
+ * If the destination is RemoteExecute, change to None. The reason is
+ * that the client won't be expecting any tuples, and indeed has no
* way to know what they are, since there is no provision for Describe
- * to send a RowDescription message when this portal execution strategy
- * is in effect. This presently will only affect SELECT commands added
- * to non-SELECT queries by rewrite rules: such commands will be executed,
- * but the results will be discarded unless you use "simple Query"
- * protocol.
+ * to send a RowDescription message when this portal execution
+ * strategy is in effect. This presently will only affect SELECT
+ * commands added to non-SELECT queries by rewrite rules: such
+ * commands will be executed, but the results will be discarded unless
+ * you use "simple Query" protocol.
*/
if (dest->mydest == RemoteExecute)
dest = None_Receiver;
@@ -785,8 +796,8 @@ PortalRunMulti(Portal portal,
altdest = None_Receiver;
/*
- * Loop to handle the individual queries generated from a
- * single parsetree by analysis and rewrite.
+ * Loop to handle the individual queries generated from a single
+ * parsetree by analysis and rewrite.
*/
foreach(querylist_item, portal->parseTrees)
{
@@ -862,12 +873,12 @@ PortalRunMulti(Portal portal,
}
/*
- * If a command completion tag was supplied, use it. Otherwise
- * use the portal's commandTag as the default completion tag.
+ * If a command completion tag was supplied, use it. Otherwise use
+ * the portal's commandTag as the default completion tag.
*
- * Exception: clients will expect INSERT/UPDATE/DELETE tags to
- * have counts, so fake something up if necessary. (This could
- * happen if the original query was replaced by a DO INSTEAD rule.)
+ * Exception: clients will expect INSERT/UPDATE/DELETE tags to have
+ * counts, so fake something up if necessary. (This could happen if
+ * the original query was replaced by a DO INSTEAD rule.)
*/
if (completionTag && completionTag[0] == '\0')
{
@@ -903,7 +914,7 @@ PortalRunFetch(Portal portal,
MemoryContext oldContext;
AssertArg(PortalIsValid(portal));
- AssertState(portal->portalReady); /* else no PortalStart */
+ AssertState(portal->portalReady); /* else no PortalStart */
/*
* Check for improper portal use, and mark portal active.
@@ -911,7 +922,7 @@ PortalRunFetch(Portal portal,
if (portal->portalDone)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("portal \"%s\" cannot be run anymore", portal->name)));
+ errmsg("portal \"%s\" cannot be run anymore", portal->name)));
if (portal->portalActive)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
@@ -989,18 +1000,18 @@ DoPortalRunFetch(Portal portal,
if (count > 0)
{
/*
- * Definition: Rewind to start, advance count-1 rows, return
- * next row (if any). In practice, if the goal is less than
- * halfway back to the start, it's better to scan from where
- * we are. In any case, we arrange to fetch the target row
- * going forwards.
+ * Definition: Rewind to start, advance count-1 rows,
+ * return next row (if any). In practice, if the goal is
+ * less than halfway back to the start, it's better to
+ * scan from where we are. In any case, we arrange to
+ * fetch the target row going forwards.
*/
if (portal->posOverflow || portal->portalPos == LONG_MAX ||
- count-1 <= portal->portalPos / 2)
+ count - 1 <= portal->portalPos / 2)
{
DoPortalRewind(portal);
if (count > 1)
- PortalRunSelect(portal, true, count-1,
+ PortalRunSelect(portal, true, count - 1,
None_Receiver);
}
else
@@ -1010,10 +1021,10 @@ DoPortalRunFetch(Portal portal,
if (portal->atEnd)
pos++; /* need one extra fetch if off end */
if (count <= pos)
- PortalRunSelect(portal, false, pos-count+1,
+ PortalRunSelect(portal, false, pos - count + 1,
None_Receiver);
- else if (count > pos+1)
- PortalRunSelect(portal, true, count-pos-1,
+ else if (count > pos + 1)
+ PortalRunSelect(portal, true, count - pos - 1,
None_Receiver);
}
return PortalRunSelect(portal, true, 1L, dest);
@@ -1022,17 +1033,19 @@ DoPortalRunFetch(Portal portal,
{
/*
* Definition: Advance to end, back up abs(count)-1 rows,
- * return prior row (if any). We could optimize this if we
- * knew in advance where the end was, but typically we won't.
- * (Is it worth considering case where count > half of size
- * of query? We could rewind once we know the size ...)
+ * return prior row (if any). We could optimize this if
+ * we knew in advance where the end was, but typically we
+ * won't. (Is it worth considering case where count > half
+ * of size of query? We could rewind once we know the
+ * size ...)
*/
PortalRunSelect(portal, true, FETCH_ALL, None_Receiver);
if (count < -1)
- PortalRunSelect(portal, false, -count-1, None_Receiver);
+ PortalRunSelect(portal, false, -count - 1, None_Receiver);
return PortalRunSelect(portal, false, 1L, dest);
}
- else /* count == 0 */
+ else
+/* count == 0 */
{
/* Rewind to start, return zero rows */
DoPortalRewind(portal);
@@ -1043,10 +1056,11 @@ DoPortalRunFetch(Portal portal,
if (count > 0)
{
/*
- * Definition: advance count-1 rows, return next row (if any).
+ * Definition: advance count-1 rows, return next row (if
+ * any).
*/
if (count > 1)
- PortalRunSelect(portal, true, count-1, None_Receiver);
+ PortalRunSelect(portal, true, count - 1, None_Receiver);
return PortalRunSelect(portal, true, 1L, dest);
}
else if (count < 0)
@@ -1056,10 +1070,11 @@ DoPortalRunFetch(Portal portal,
* (if any).
*/
if (count < -1)
- PortalRunSelect(portal, false, -count-1, None_Receiver);
+ PortalRunSelect(portal, false, -count - 1, None_Receiver);
return PortalRunSelect(portal, false, 1L, dest);
}
- else /* count == 0 */
+ else
+/* count == 0 */
{
/* Same as FETCH FORWARD 0, so fall out of switch */
fdirection = FETCH_FORWARD;
@@ -1071,8 +1086,8 @@ DoPortalRunFetch(Portal portal,
}
/*
- * Get here with fdirection == FETCH_FORWARD or FETCH_BACKWARD,
- * and count >= 0.
+ * Get here with fdirection == FETCH_FORWARD or FETCH_BACKWARD, and
+ * count >= 0.
*/
forward = (fdirection == FETCH_FORWARD);
@@ -1081,7 +1096,7 @@ DoPortalRunFetch(Portal portal,
*/
if (count == 0)
{
- bool on_row;
+ bool on_row;
/* Are we sitting on a row? */
on_row = (!portal->atStart && !portal->atEnd);
@@ -1094,11 +1109,11 @@ DoPortalRunFetch(Portal portal,
else
{
/*
- * If we are sitting on a row, back up one so we can re-fetch it.
- * If we are not sitting on a row, we still have to start up and
- * shut down the executor so that the destination is initialized
- * and shut down correctly; so keep going. To PortalRunSelect,
- * count == 0 means we will retrieve no row.
+ * If we are sitting on a row, back up one so we can re-fetch
+ * it. If we are not sitting on a row, we still have to start
+ * up and shut down the executor so that the destination is
+ * initialized and shut down correctly; so keep going. To
+ * PortalRunSelect, count == 0 means we will retrieve no row.
*/
if (on_row)
{
@@ -1115,7 +1130,7 @@ DoPortalRunFetch(Portal portal,
*/
if (!forward && count == FETCH_ALL && dest->mydest == None)
{
- long result = portal->portalPos;
+ long result = portal->portalPos;
if (result > 0 && !portal->atEnd)
result--;
@@ -1142,9 +1157,7 @@ DoPortalRewind(Portal portal)
MemoryContextSwitchTo(oldcontext);
}
if (PortalGetQueryDesc(portal))
- {
ExecutorRewind(PortalGetQueryDesc(portal));
- }
portal->atStart = true;
portal->atEnd = false;
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index ec9e7a4cecb..f0206bd14f7 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/utility.c,v 1.203 2003/08/01 00:15:23 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/utility.c,v 1.204 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,32 +70,32 @@ struct msgstrings
};
static const struct msgstrings msgstringarray[] = {
- { RELKIND_RELATION,
- ERRCODE_UNDEFINED_TABLE,
- gettext_noop("table \"%s\" does not exist"),
- gettext_noop("\"%s\" is not a table"),
- gettext_noop("Use DROP TABLE to remove a table.") },
- { RELKIND_SEQUENCE,
- ERRCODE_UNDEFINED_TABLE,
- gettext_noop("sequence \"%s\" does not exist"),
- gettext_noop("\"%s\" is not a sequence"),
- gettext_noop("Use DROP SEQUENCE to remove a sequence.") },
- { RELKIND_VIEW,
- ERRCODE_UNDEFINED_TABLE,
- gettext_noop("view \"%s\" does not exist"),
- gettext_noop("\"%s\" is not a view"),
- gettext_noop("Use DROP VIEW to remove a view.") },
- { RELKIND_INDEX,
- ERRCODE_UNDEFINED_OBJECT,
- gettext_noop("index \"%s\" does not exist"),
- gettext_noop("\"%s\" is not an index"),
- gettext_noop("Use DROP INDEX to remove an index.") },
- { RELKIND_COMPOSITE_TYPE,
- ERRCODE_UNDEFINED_OBJECT,
- gettext_noop("type \"%s\" does not exist"),
- gettext_noop("\"%s\" is not a type"),
- gettext_noop("Use DROP TYPE to remove a type.") },
- { '\0', 0, NULL, NULL, NULL }
+ {RELKIND_RELATION,
+ ERRCODE_UNDEFINED_TABLE,
+ gettext_noop("table \"%s\" does not exist"),
+ gettext_noop("\"%s\" is not a table"),
+ gettext_noop("Use DROP TABLE to remove a table.")},
+ {RELKIND_SEQUENCE,
+ ERRCODE_UNDEFINED_TABLE,
+ gettext_noop("sequence \"%s\" does not exist"),
+ gettext_noop("\"%s\" is not a sequence"),
+ gettext_noop("Use DROP SEQUENCE to remove a sequence.")},
+ {RELKIND_VIEW,
+ ERRCODE_UNDEFINED_TABLE,
+ gettext_noop("view \"%s\" does not exist"),
+ gettext_noop("\"%s\" is not a view"),
+ gettext_noop("Use DROP VIEW to remove a view.")},
+ {RELKIND_INDEX,
+ ERRCODE_UNDEFINED_OBJECT,
+ gettext_noop("index \"%s\" does not exist"),
+ gettext_noop("\"%s\" is not an index"),
+ gettext_noop("Use DROP INDEX to remove an index.")},
+ {RELKIND_COMPOSITE_TYPE,
+ ERRCODE_UNDEFINED_OBJECT,
+ gettext_noop("type \"%s\" does not exist"),
+ gettext_noop("\"%s\" is not a type"),
+ gettext_noop("Use DROP TYPE to remove a type.")},
+ {'\0', 0, NULL, NULL, NULL}
};
@@ -181,7 +181,7 @@ CheckRelationOwnership(RangeVar *rel, bool noCatalogs)
tuple = SearchSysCache(RELOID,
ObjectIdGetDatum(relOid),
0, 0, 0);
- if (!HeapTupleIsValid(tuple)) /* should not happen */
+ if (!HeapTupleIsValid(tuple)) /* should not happen */
elog(ERROR, "cache lookup failed for relation %u", relOid);
if (!pg_class_ownercheck(relOid, GetUserId()))
@@ -194,8 +194,8 @@ CheckRelationOwnership(RangeVar *rel, bool noCatalogs)
IsSystemClass((Form_pg_class) GETSTRUCT(tuple)))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied: \"%s\" is a system catalog",
- rel->relname)));
+ errmsg("permission denied: \"%s\" is a system catalog",
+ rel->relname)));
}
ReleaseSysCache(tuple);
@@ -316,18 +316,18 @@ ProcessUtility(Node *parsetree,
if (stmt->options)
{
- List *head;
+ List *head;
foreach(head, stmt->options)
{
- DefElem *item = (DefElem *) lfirst(head);
+ DefElem *item = (DefElem *) lfirst(head);
- if (strcmp(item->defname, "transaction_isolation")==0)
+ if (strcmp(item->defname, "transaction_isolation") == 0)
SetPGVariable("transaction_isolation",
- makeList1(item->arg), false);
- else if (strcmp(item->defname, "transaction_read_only")==0)
+ makeList1(item->arg), false);
+ else if (strcmp(item->defname, "transaction_read_only") == 0)
SetPGVariable("transaction_read_only",
- makeList1(item->arg), false);
+ makeList1(item->arg), false);
}
}
}
@@ -429,7 +429,11 @@ ProcessUtility(Node *parsetree,
break;
case OBJECT_DOMAIN:
- /* RemoveDomain does its own permissions checks */
+
+ /*
+ * RemoveDomain does its own permissions
+ * checks
+ */
RemoveDomain(names, stmt->behavior);
break;
@@ -438,7 +442,11 @@ ProcessUtility(Node *parsetree,
break;
case OBJECT_SCHEMA:
- /* RemoveSchema does its own permissions checks */
+
+ /*
+ * RemoveSchema does its own permissions
+ * checks
+ */
RemoveSchema(names, stmt->behavior);
break;
@@ -590,8 +598,8 @@ ProcessUtility(Node *parsetree,
/* check that we are the superuser */
if (!superuser())
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to alter owner")));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("must be superuser to alter owner")));
/* get_usesysid raises an error if no such user */
AlterTableOwner(relid,
get_usesysid(stmt->name));
@@ -599,9 +607,9 @@ ProcessUtility(Node *parsetree,
case 'L': /* CLUSTER ON */
AlterTableClusterOn(relid, stmt->name);
break;
- case 'o': /* ADD OIDS */
+ case 'o': /* ADD OIDS */
AlterTableAlterOids(relid,
- interpretInhOption(stmt->relation->inhOpt),
+ interpretInhOption(stmt->relation->inhOpt),
false);
break;
default: /* oops */
@@ -652,8 +660,8 @@ ProcessUtility(Node *parsetree,
/* check that we are the superuser */
if (!superuser())
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to alter owner")));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("must be superuser to alter owner")));
/* get_usesysid raises an error if no such user */
AlterTypeOwner(stmt->typename,
get_usesysid(stmt->name));
@@ -828,40 +836,39 @@ ProcessUtility(Node *parsetree,
VariableSetStmt *n = (VariableSetStmt *) parsetree;
/*
- * Special cases for special SQL syntax that
- * effectively sets more than one variable per
- * statement.
+ * Special cases for special SQL syntax that effectively
+ * sets more than one variable per statement.
*/
- if (strcmp(n->name, "TRANSACTION")==0)
+ if (strcmp(n->name, "TRANSACTION") == 0)
{
- List *head;
+ List *head;
foreach(head, n->args)
{
- DefElem *item = (DefElem *) lfirst(head);
+ DefElem *item = (DefElem *) lfirst(head);
- if (strcmp(item->defname, "transaction_isolation")==0)
+ if (strcmp(item->defname, "transaction_isolation") == 0)
SetPGVariable("transaction_isolation",
- makeList1(item->arg), n->is_local);
- else if (strcmp(item->defname, "transaction_read_only")==0)
+ makeList1(item->arg), n->is_local);
+ else if (strcmp(item->defname, "transaction_read_only") == 0)
SetPGVariable("transaction_read_only",
- makeList1(item->arg), n->is_local);
+ makeList1(item->arg), n->is_local);
}
}
- else if (strcmp(n->name, "SESSION CHARACTERISTICS")==0)
+ else if (strcmp(n->name, "SESSION CHARACTERISTICS") == 0)
{
- List *head;
+ List *head;
foreach(head, n->args)
{
- DefElem *item = (DefElem *) lfirst(head);
+ DefElem *item = (DefElem *) lfirst(head);
- if (strcmp(item->defname, "transaction_isolation")==0)
+ if (strcmp(item->defname, "transaction_isolation") == 0)
SetPGVariable("default_transaction_isolation",
- makeList1(item->arg), n->is_local);
- else if (strcmp(item->defname, "transaction_read_only")==0)
+ makeList1(item->arg), n->is_local);
+ else if (strcmp(item->defname, "transaction_read_only") == 0)
SetPGVariable("default_transaction_read_only",
- makeList1(item->arg), n->is_local);
+ makeList1(item->arg), n->is_local);
}
}
else
@@ -1046,14 +1053,14 @@ UtilityReturnsTuples(Node *parsetree)
{
case T_FetchStmt:
{
- FetchStmt *stmt = (FetchStmt *) parsetree;
- Portal portal;
+ FetchStmt *stmt = (FetchStmt *) parsetree;
+ Portal portal;
if (stmt->ismove)
return false;
portal = GetPortalByName(stmt->portalname);
if (!PortalIsValid(portal))
- return false; /* not our business to raise error */
+ return false; /* not our business to raise error */
return portal->tupDesc ? true : false;
}
@@ -1066,7 +1073,7 @@ UtilityReturnsTuples(Node *parsetree)
return false;
entry = FetchPreparedStatement(stmt->name, false);
if (!entry)
- return false; /* not our business to raise error */
+ return false; /* not our business to raise error */
switch (ChoosePortalStrategy(entry->query_list))
{
case PORTAL_ONE_SELECT:
@@ -1106,14 +1113,14 @@ UtilityTupleDescriptor(Node *parsetree)
{
case T_FetchStmt:
{
- FetchStmt *stmt = (FetchStmt *) parsetree;
- Portal portal;
+ FetchStmt *stmt = (FetchStmt *) parsetree;
+ Portal portal;
if (stmt->ismove)
return NULL;
portal = GetPortalByName(stmt->portalname);
if (!PortalIsValid(portal))
- return NULL; /* not our business to raise error */
+ return NULL; /* not our business to raise error */
return CreateTupleDescCopy(portal->tupDesc);
}
@@ -1126,7 +1133,7 @@ UtilityTupleDescriptor(Node *parsetree)
return NULL;
entry = FetchPreparedStatement(stmt->name, false);
if (!entry)
- return NULL; /* not our business to raise error */
+ return NULL; /* not our business to raise error */
return FetchPreparedStatementResultDesc(entry);
}
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 8c5f64a8ed2..57a3db1dd2a 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/acl.c,v 1.92 2003/07/27 04:53:02 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/acl.c,v 1.93 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,7 +36,7 @@ static Acl *allocacl(int n);
static const char *aclparse(const char *s, AclItem *aip);
static bool aclitem_match(const AclItem *a1, const AclItem *a2);
static Acl *recursive_revoke(Acl *acl, AclId grantee,
- AclMode revoke_privs, DropBehavior behavior);
+ AclMode revoke_privs, DropBehavior behavior);
static AclMode convert_priv_string(text *priv_type_text);
@@ -77,24 +77,22 @@ getid(const char *s, char *n)
/* This test had better match what putid() does, below */
for (;
*s != '\0' &&
- (isalnum((unsigned char) *s) ||
- *s == '_' ||
- *s == '"' ||
- in_quotes);
+ (isalnum((unsigned char) *s) ||
+ *s == '_' ||
+ *s == '"' ||
+ in_quotes);
s++)
{
if (*s == '"')
- {
in_quotes = !in_quotes;
- }
else
{
- if (len >= NAMEDATALEN-1)
+ if (len >= NAMEDATALEN - 1)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("identifier too long"),
- errdetail("Identifier must be less than %d characters.",
- NAMEDATALEN)));
+ errdetail("Identifier must be less than %d characters.",
+ NAMEDATALEN)));
n[len++] = *s;
}
@@ -107,13 +105,13 @@ getid(const char *s, char *n)
/*
* Write a user or group Name at *p, surrounding it with double quotes if
- * needed. There must be at least NAMEDATALEN+2 bytes available at *p.
+ * needed. There must be at least NAMEDATALEN+2 bytes available at *p.
*/
static void
putid(char *p, const char *s)
{
const char *src;
- bool safe = true;
+ bool safe = true;
for (src = s; *src; src++)
{
@@ -153,7 +151,9 @@ putid(char *p, const char *s)
static const char *
aclparse(const char *s, AclItem *aip)
{
- AclMode privs, goption, read;
+ AclMode privs,
+ goption,
+ read;
uint32 idtype;
char name[NAMEDATALEN];
char name2[NAMEDATALEN];
@@ -174,13 +174,13 @@ aclparse(const char *s, AclItem *aip)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("unrecognized keyword: \"%s\"", name),
- errhint("ACL keyword must be \"group\" or \"user\".")));
+ errhint("ACL keyword must be \"group\" or \"user\".")));
s = getid(s, name); /* move s to the name beyond the keyword */
if (name[0] == '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("missing name"),
- errhint("A name must follow the [group|user] keyword.")));
+ errhint("A name must follow the [group|user] keyword.")));
}
if (name[0] == '\0')
idtype = ACL_IDTYPE_WORLD;
@@ -192,7 +192,7 @@ aclparse(const char *s, AclItem *aip)
privs = goption = ACL_NO_RIGHTS;
- for (++s, read=0; isalpha((unsigned char) *s) || *s == '*'; s++)
+ for (++s, read = 0; isalpha((unsigned char) *s) || *s == '*'; s++)
{
switch (*s)
{
@@ -235,8 +235,8 @@ aclparse(const char *s, AclItem *aip)
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid mode character: must be one of \"%s\"",
- ACL_ALL_RIGHTS_STR)));
+ errmsg("invalid mode character: must be one of \"%s\"",
+ ACL_ALL_RIGHTS_STR)));
}
privs |= read;
@@ -255,8 +255,10 @@ aclparse(const char *s, AclItem *aip)
break;
}
- /* XXX Allow a degree of backward compatibility by defaulting the
- * grantor to the superuser. */
+ /*
+ * XXX Allow a degree of backward compatibility by defaulting the
+ * grantor to the superuser.
+ */
if (*s == '/')
{
s = getid(s + 1, name2);
@@ -331,7 +333,7 @@ aclitemin(PG_FUNCTION_ARGS)
if (*s)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("extra garbage at the end of the ACL specification")));
+ errmsg("extra garbage at the end of the ACL specification")));
PG_RETURN_ACLITEM_P(aip);
}
@@ -356,7 +358,7 @@ aclitemout(PG_FUNCTION_ARGS)
out = palloc(strlen("group =/") +
2 * N_ACL_RIGHTS +
- 2 * (NAMEDATALEN+2) +
+ 2 * (NAMEDATALEN + 2) +
1);
p = out;
@@ -454,8 +456,8 @@ aclitem_match(const AclItem *a1, const AclItem *a2)
Datum
aclitem_eq(PG_FUNCTION_ARGS)
{
- AclItem *a1 = PG_GETARG_ACLITEM_P(0);
- AclItem *a2 = PG_GETARG_ACLITEM_P(1);
+ AclItem *a1 = PG_GETARG_ACLITEM_P(0);
+ AclItem *a2 = PG_GETARG_ACLITEM_P(1);
bool result;
result = a1->ai_privs == a2->ai_privs &&
@@ -510,7 +512,7 @@ acldefault(GrantObjectType objtype, AclId ownerid)
}
acl = allocacl((world_default != ACL_NO_RIGHTS ? 1 : 0)
- + (ownerid ? 1 : 0));
+ + (ownerid ? 1 : 0));
aip = ACL_DAT(acl);
if (world_default != ACL_NO_RIGHTS)
@@ -522,7 +524,7 @@ acldefault(GrantObjectType objtype, AclId ownerid)
if (ownerid)
{
- int index = (world_default != ACL_NO_RIGHTS ? 1: 0);
+ int index = (world_default != ACL_NO_RIGHTS ? 1 : 0);
aip[index].ai_grantee = ownerid;
aip[index].ai_grantor = ownerid;
@@ -563,10 +565,10 @@ aclinsert3(const Acl *old_acl, const AclItem *mod_aip, unsigned modechg, DropBeh
old_aip = ACL_DAT(old_acl);
/*
- * Search the ACL for an existing entry for this grantee and
- * grantor. If one exists, just modify the entry in-place (well,
- * in the same position, since we actually return a copy);
- * otherwise, insert the new entry at the end.
+ * Search the ACL for an existing entry for this grantee and grantor.
+ * If one exists, just modify the entry in-place (well, in the same
+ * position, since we actually return a copy); otherwise, insert the
+ * new entry at the end.
*/
for (dst = 0; dst < num; ++dst)
@@ -652,17 +654,17 @@ recursive_revoke(Acl *acl,
AclMode revoke_privs,
DropBehavior behavior)
{
- int i;
+ int i;
restart:
for (i = 0; i < ACL_NUM(acl); i++)
{
- AclItem *aip = ACL_DAT(acl);
+ AclItem *aip = ACL_DAT(acl);
if (aip[i].ai_grantor == grantee
&& (ACLITEM_GET_PRIVS(aip[i]) & revoke_privs) != 0)
{
- AclItem mod_acl;
+ AclItem mod_acl;
if (behavior == DROP_RESTRICT)
ereport(ERROR,
@@ -727,7 +729,7 @@ aclremove(PG_FUNCTION_ARGS)
for (dst = 0;
dst < old_num && !aclitem_match(mod_aip, old_aip + dst);
++dst)
- /* continue */ ;
+ /* continue */ ;
if (dst >= old_num)
{
@@ -797,15 +799,17 @@ makeaclitem(PG_FUNCTION_ARGS)
int32 grantor = PG_GETARG_INT32(2);
text *privtext = PG_GETARG_TEXT_P(3);
bool goption = PG_GETARG_BOOL(4);
- AclItem *aclitem;
+ AclItem *aclitem;
AclMode priv;
priv = convert_priv_string(privtext);
aclitem = (AclItem *) palloc(sizeof(*aclitem));
+
if (u_grantee == 0 && g_grantee == 0)
{
- aclitem->ai_grantee = 0;
+ aclitem ->ai_grantee = 0;
+
ACLITEM_SET_IDTYPE(*aclitem, ACL_IDTYPE_WORLD);
}
else if (u_grantee != 0 && g_grantee != 0)
@@ -816,16 +820,19 @@ makeaclitem(PG_FUNCTION_ARGS)
}
else if (u_grantee != 0)
{
- aclitem->ai_grantee = u_grantee;
+ aclitem ->ai_grantee = u_grantee;
+
ACLITEM_SET_IDTYPE(*aclitem, ACL_IDTYPE_UID);
}
else if (g_grantee != 0)
{
- aclitem->ai_grantee = g_grantee;
+ aclitem ->ai_grantee = g_grantee;
+
ACLITEM_SET_IDTYPE(*aclitem, ACL_IDTYPE_GID);
}
- aclitem->ai_grantor = grantor;
+ aclitem ->ai_grantor = grantor;
+
ACLITEM_SET_PRIVS(*aclitem, priv);
if (goption)
ACLITEM_SET_GOPTIONS(*aclitem, priv);
@@ -841,7 +848,7 @@ convert_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
if (strcasecmp(priv_type, "SELECT") == 0)
return ACL_SELECT;
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index 7a9e89088a1..a32ea871b58 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -6,7 +6,7 @@
* Copyright (c) 2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.6 2003/07/27 04:53:02 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.7 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -81,7 +81,8 @@ array_push(PG_FUNCTION_ARGS)
if (arg0_elemid != InvalidOid)
{
/* append newelem */
- int ub = dimv[0] + lb[0] - 1;
+ int ub = dimv[0] + lb[0] - 1;
+
indx = ub + 1;
}
else
@@ -105,7 +106,7 @@ array_push(PG_FUNCTION_ARGS)
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -138,11 +139,23 @@ array_push(PG_FUNCTION_ARGS)
Datum
array_cat(PG_FUNCTION_ARGS)
{
- ArrayType *v1, *v2;
- int *dims, *lbs, ndims, ndatabytes, nbytes;
- int *dims1, *lbs1, ndims1, ndatabytes1;
- int *dims2, *lbs2, ndims2, ndatabytes2;
- char *dat1, *dat2;
+ ArrayType *v1,
+ *v2;
+ int *dims,
+ *lbs,
+ ndims,
+ ndatabytes,
+ nbytes;
+ int *dims1,
+ *lbs1,
+ ndims1,
+ ndatabytes1;
+ int *dims2,
+ *lbs2,
+ ndims2,
+ ndatabytes2;
+ char *dat1,
+ *dat2;
Oid element_type;
Oid element_type1;
Oid element_type2;
@@ -152,12 +165,10 @@ array_cat(PG_FUNCTION_ARGS)
v2 = PG_GETARG_ARRAYTYPE_P(1);
/*
- * We must have one of the following combinations of inputs:
- * 1) one empty array, and one non-empty array
- * 2) both arrays empty
- * 3) two arrays with ndims1 == ndims2
- * 4) ndims1 == ndims2 - 1
- * 5) ndims1 == ndims2 + 1
+ * We must have one of the following combinations of inputs: 1) one
+ * empty array, and one non-empty array 2) both arrays empty 3) two
+ * arrays with ndims1 == ndims2 4) ndims1 == ndims2 - 1 5) ndims1 ==
+ * ndims2 + 1
*/
ndims1 = ARR_NDIM(v1);
ndims2 = ARR_NDIM(v2);
@@ -180,8 +191,8 @@ array_cat(PG_FUNCTION_ARGS)
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
errdetail("Arrays of %d and %d dimensions are not "
- "compatible for concatenation.",
- ndims1, ndims2)));
+ "compatible for concatenation.",
+ ndims1, ndims2)));
element_type1 = ARR_ELEMTYPE(v1);
element_type2 = ARR_ELEMTYPE(v2);
@@ -192,7 +203,7 @@ array_cat(PG_FUNCTION_ARGS)
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot concatenate incompatible arrays"),
errdetail("Arrays with element types %s and %s are not "
- "compatible for concatenation.",
+ "compatible for concatenation.",
format_type_be(element_type1),
format_type_be(element_type2))));
@@ -215,14 +226,14 @@ array_cat(PG_FUNCTION_ARGS)
* resulting array has two element outer array made up of input
* argument arrays
*/
- int i;
+ int i;
ndims = ndims1 + 1;
dims = (int *) palloc(ndims * sizeof(int));
lbs = (int *) palloc(ndims * sizeof(int));
- dims[0] = 2; /* outer array made up of two input arrays */
- lbs[0] = 1; /* start lower bound at 1 */
+ dims[0] = 2; /* outer array made up of two input arrays */
+ lbs[0] = 1; /* start lower bound at 1 */
for (i = 0; i < ndims1; i++)
{
@@ -230,8 +241,8 @@ array_cat(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing dimensions are not "
- "compatible for concatenation.")));
+ errdetail("Arrays with differing dimensions are not "
+ "compatible for concatenation.")));
dims[i + 1] = dims1[i];
lbs[i + 1] = lbs1[i];
@@ -244,7 +255,7 @@ array_cat(PG_FUNCTION_ARGS)
* with the first argument appended to the front of the outer
* dimension
*/
- int i;
+ int i;
ndims = ndims2;
dims = dims2;
@@ -260,18 +271,18 @@ array_cat(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing dimensions are not "
- "compatible for concatenation.")));
+ errdetail("Arrays with differing dimensions are not "
+ "compatible for concatenation.")));
}
}
- else /* (ndims1 == ndims2 + 1) */
+ else
+/* (ndims1 == ndims2 + 1) */
{
/*
- * resulting array has the first argument as the outer array,
- * with the second argument appended to the end of the outer
- * dimension
+ * resulting array has the first argument as the outer array, with
+ * the second argument appended to the end of the outer dimension
*/
- int i;
+ int i;
ndims = ndims1;
dims = dims1;
@@ -287,8 +298,8 @@ array_cat(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing dimensions are not "
- "compatible for concatenation.")));
+ errdetail("Arrays with differing dimensions are not "
+ "compatible for concatenation.")));
}
}
@@ -320,13 +331,13 @@ create_singleton_array(FunctionCallInfo fcinfo,
Datum element,
int ndims)
{
- Datum dvalues[1];
- int16 typlen;
- bool typbyval;
- char typalign;
- int dims[MAXDIM];
- int lbs[MAXDIM];
- int i;
+ Datum dvalues[1];
+ int16 typlen;
+ bool typbyval;
+ char typalign;
+ int dims[MAXDIM];
+ int lbs[MAXDIM];
+ int i;
ArrayMetaState *my_extra;
if (element_type == 0)
@@ -359,7 +370,7 @@ create_singleton_array(FunctionCallInfo fcinfo,
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 04e7f0f2515..128a69f9d39 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.94 2003/07/27 04:53:02 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.95 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,9 +79,9 @@ static Datum *ReadArrayStr(char *arrayStr, int nitems, int ndim, int *dim,
int typlen, bool typbyval, char typalign,
int *nbytes);
static Datum *ReadArrayBinary(StringInfo buf, int nitems,
- FmgrInfo *receiveproc, Oid typelem,
- int typlen, bool typbyval, char typalign,
- int *nbytes);
+ FmgrInfo *receiveproc, Oid typelem,
+ int typlen, bool typbyval, char typalign,
+ int *nbytes);
static void CopyArrayEls(char *p, Datum *values, int nitems,
int typlen, bool typbyval, char typalign,
bool freedata);
@@ -107,7 +107,7 @@ static void array_insert_slice(int ndim, int *dim, int *lb,
char *destPtr,
int *st, int *endp, char *srcPtr,
int typlen, bool typbyval, char typalign);
-static int array_cmp(FunctionCallInfo fcinfo);
+static int array_cmp(FunctionCallInfo fcinfo);
/*---------------------------------------------------------------------
* array_in :
@@ -144,21 +144,24 @@ array_in(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its input
- * conversion proc, only once per series of calls, assuming the element
- * type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the
+ * element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
if (my_extra->element_type != element_type)
{
- /* Get info about element type, including its input conversion proc */
+ /*
+ * Get info about element type, including its input conversion
+ * proc
+ */
get_type_io_data(element_type, IOFunc_input,
&my_extra->typlen, &my_extra->typbyval,
&my_extra->typalign, &my_extra->typdelim,
@@ -242,7 +245,7 @@ array_in(PG_FUNCTION_ARGS)
if (ub < lBound[ndim])
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("upper bound cannot be less than lower bound")));
+ errmsg("upper bound cannot be less than lower bound")));
dim[ndim] = ub - lBound[ndim] + 1;
ndim++;
@@ -351,7 +354,7 @@ ArrayCount(char *str, int *dim, char typdelim)
/* Signal a premature end of the string */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ errmsg("malformed array literal: \"%s\"", str)));
break;
case '\\':
/* skip the escaped character */
@@ -359,8 +362,8 @@ ArrayCount(char *str, int *dim, char typdelim)
ptr++;
else
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
break;
case '\"':
scanning_string = !scanning_string;
@@ -370,9 +373,9 @@ ArrayCount(char *str, int *dim, char typdelim)
{
if (nest_level >= MAXDIM)
ereport(ERROR,
- (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions exceeds the maximum allowed, %d",
- MAXDIM)));
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of array dimensions exceeds the maximum allowed, %d",
+ MAXDIM)));
temp[nest_level] = 0;
nest_level++;
if (ndim < nest_level)
@@ -384,8 +387,8 @@ ArrayCount(char *str, int *dim, char typdelim)
{
if (nest_level == 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
nest_level--;
if (nest_level == 0)
eoArray = itemdone = true;
@@ -479,7 +482,7 @@ ReadArrayStr(char *arrayStr,
/* Signal a premature end of the string */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", arrayStr)));
+ errmsg("malformed array literal: \"%s\"", arrayStr)));
break;
case '\\':
{
@@ -490,8 +493,8 @@ ReadArrayStr(char *arrayStr,
*cptr = *(cptr + 1);
if (*ptr == '\0')
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", arrayStr)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", arrayStr)));
break;
}
case '\"':
@@ -511,8 +514,8 @@ ReadArrayStr(char *arrayStr,
{
if (nest_level >= ndim)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", arrayStr)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", arrayStr)));
nest_level++;
indx[nest_level - 1] = 0;
/* skip leading whitespace */
@@ -526,8 +529,8 @@ ReadArrayStr(char *arrayStr,
{
if (nest_level == 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", arrayStr)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", arrayStr)));
if (i == -1)
i = ArrayGetOffset0(ndim, indx, prod);
indx[nest_level - 1] = 0;
@@ -565,7 +568,7 @@ ReadArrayStr(char *arrayStr,
if (i < 0 || i >= nitems)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", arrayStr)));
+ errmsg("malformed array literal: \"%s\"", arrayStr)));
values[i] = FunctionCall3(inputproc,
CStringGetDatum(itemstart),
@@ -693,21 +696,24 @@ array_out(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its output
- * conversion proc, only once per series of calls, assuming the element
- * type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the
+ * element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
if (my_extra->element_type != element_type)
{
- /* Get info about element type, including its output conversion proc */
+ /*
+ * Get info about element type, including its output conversion
+ * proc
+ */
get_type_io_data(element_type, IOFunc_output,
&my_extra->typlen, &my_extra->typbyval,
&my_extra->typalign, &my_extra->typdelim,
@@ -922,15 +928,15 @@ array_recv(PG_FUNCTION_ARGS)
}
/*
- * We arrange to look up info about element type, including its receive
- * conversion proc, only once per series of calls, assuming the element
- * type doesn't change underneath us.
+ * We arrange to look up info about element type, including its
+ * receive conversion proc, only once per series of calls, assuming
+ * the element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -945,8 +951,8 @@ array_recv(PG_FUNCTION_ARGS)
if (!OidIsValid(my_extra->typiofunc))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary input function available for type %s",
- format_type_be(element_type))));
+ errmsg("no binary input function available for type %s",
+ format_type_be(element_type))));
fmgr_info_cxt(my_extra->typiofunc, &my_extra->proc,
fcinfo->flinfo->fn_mcxt);
my_extra->element_type = element_type;
@@ -1004,9 +1010,9 @@ ReadArrayBinary(StringInfo buf,
for (i = 0; i < nitems; i++)
{
- int itemlen;
+ int itemlen;
StringInfoData elem_buf;
- char csave;
+ char csave;
/* Get and check the item length */
itemlen = pq_getmsgint(buf, 4);
@@ -1017,10 +1023,9 @@ ReadArrayBinary(StringInfo buf,
/*
* Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the input
- * buffer. We assume we can scribble on the input buffer
- * so as to maintain the convention that StringInfos have
- * a trailing null.
+ * StringInfo pointing to the correct portion of the input buffer.
+ * We assume we can scribble on the input buffer so as to maintain
+ * the convention that StringInfos have a trailing null.
*/
elem_buf.data = &buf->data[buf->cursor];
elem_buf.maxlen = itemlen + 1;
@@ -1042,7 +1047,7 @@ ReadArrayBinary(StringInfo buf,
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("improper binary format in array element %d",
- i + 1)));
+ i + 1)));
buf->data[buf->cursor] = csave;
}
@@ -1051,9 +1056,7 @@ ReadArrayBinary(StringInfo buf,
* Compute total data space needed
*/
if (typlen > 0)
- {
*nbytes = nitems * att_align(typlen, typalign);
- }
else
{
Assert(!typbyval);
@@ -1100,14 +1103,14 @@ array_send(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its send
- * conversion proc, only once per series of calls, assuming the element
- * type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the
+ * element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -1122,8 +1125,8 @@ array_send(PG_FUNCTION_ARGS)
if (!OidIsValid(my_extra->typiofunc))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary output function available for type %s",
- format_type_be(element_type))));
+ errmsg("no binary output function available for type %s",
+ format_type_be(element_type))));
fmgr_info_cxt(my_extra->typiofunc, &my_extra->proc,
fcinfo->flinfo->fn_mcxt);
my_extra->element_type = element_type;
@@ -1160,7 +1163,7 @@ array_send(PG_FUNCTION_ARGS)
outputbytes = DatumGetByteaP(FunctionCall2(&my_extra->proc,
itemvalue,
- ObjectIdGetDatum(typelem)));
+ ObjectIdGetDatum(typelem)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
@@ -1187,10 +1190,11 @@ array_length_coerce(PG_FUNCTION_ARGS)
int32 len = PG_GETARG_INT32(1);
bool isExplicit = PG_GETARG_BOOL(2);
FmgrInfo *fmgr_info = fcinfo->flinfo;
- typedef struct {
+ typedef struct
+ {
Oid elemtype;
FmgrInfo coerce_finfo;
- } alc_extra;
+ } alc_extra;
alc_extra *my_extra;
FunctionCallInfoData locfcinfo;
@@ -1471,7 +1475,7 @@ array_get_slice(ArrayType *array,
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("slices of fixed-length arrays not implemented")));
+ errmsg("slices of fixed-length arrays not implemented")));
/*
* fixed-length arrays -- these are assumed to be 1-d, 0-based XXX
@@ -1634,12 +1638,12 @@ array_set(ArrayType *array,
/*
* if number of dims is zero, i.e. an empty array, create an array
- * with nSubscripts dimensions, and set the lower bounds to the supplied
- * subscripts
+ * with nSubscripts dimensions, and set the lower bounds to the
+ * supplied subscripts
*/
if (ndim == 0)
{
- Oid elmtype = ARR_ELEMTYPE(array);
+ Oid elmtype = ARR_ELEMTYPE(array);
for (i = 0; i < nSubscripts; i++)
{
@@ -1648,7 +1652,7 @@ array_set(ArrayType *array,
}
return construct_md_array(&dataValue, nSubscripts, dim, lb, elmtype,
- elmlen, elmbyval, elmalign);
+ elmlen, elmbyval, elmalign);
}
if (ndim != nSubscripts || ndim <= 0 || ndim > MAXDIM)
@@ -1818,17 +1822,17 @@ array_set_slice(ArrayType *array,
/*
* if number of dims is zero, i.e. an empty array, create an array
- * with nSubscripts dimensions, and set the upper and lower bounds
- * to the supplied subscripts
+ * with nSubscripts dimensions, and set the upper and lower bounds to
+ * the supplied subscripts
*/
if (ndim == 0)
{
- Datum *dvalues;
- int nelems;
- Oid elmtype = ARR_ELEMTYPE(array);
+ Datum *dvalues;
+ int nelems;
+ Oid elmtype = ARR_ELEMTYPE(array);
deconstruct_array(srcArray, elmtype, elmlen, elmbyval, elmalign,
- &dvalues, &nelems);
+ &dvalues, &nelems);
for (i = 0; i < nSubscripts; i++)
{
@@ -1837,7 +1841,7 @@ array_set_slice(ArrayType *array,
}
return construct_md_array(dvalues, nSubscripts, dim, lb, elmtype,
- elmlen, elmbyval, elmalign);
+ elmlen, elmbyval, elmalign);
}
if (ndim < nSubscripts || ndim <= 0 || ndim > MAXDIM)
@@ -2028,11 +2032,12 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType)
bool typbyval;
char typalign;
char *s;
- typedef struct {
+ typedef struct
+ {
ArrayMetaState inp_extra;
ArrayMetaState ret_extra;
- } am_extra;
- am_extra *my_extra;
+ } am_extra;
+ am_extra *my_extra;
ArrayMetaState *inp_extra;
ArrayMetaState *ret_extra;
@@ -2054,9 +2059,9 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType)
PG_RETURN_ARRAYTYPE_P(v);
/*
- * We arrange to look up info about input and return element types only
- * once per series of calls, assuming the element type doesn't change
- * underneath us.
+ * We arrange to look up info about input and return element types
+ * only once per series of calls, assuming the element type doesn't
+ * change underneath us.
*/
my_extra = (am_extra *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
@@ -2179,8 +2184,8 @@ construct_array(Datum *elems, int nelems,
Oid elmtype,
int elmlen, bool elmbyval, char elmalign)
{
- int dims[1];
- int lbs[1];
+ int dims[1];
+ int lbs[1];
dims[0] = nelems;
lbs[0] = 1;
@@ -2364,7 +2369,7 @@ array_eq(PG_FUNCTION_ARGS)
if (element_type != ARR_ELEMTYPE(array2))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot compare arrays of different element types")));
+ errmsg("cannot compare arrays of different element types")));
/* fast path if the arrays do not have the same number of elements */
if (nitems1 != nitems2)
@@ -2372,21 +2377,22 @@ array_eq(PG_FUNCTION_ARGS)
else
{
/*
- * We arrange to look up the equality function only once per series of
- * calls, assuming the element type doesn't change underneath us.
+ * We arrange to look up the equality function only once per
+ * series of calls, assuming the element type doesn't change
+ * underneath us.
*/
my_extra = (ArrayMetaState *) ae_fmgr_info->fn_extra;
if (my_extra == NULL)
{
ae_fmgr_info->fn_extra = MemoryContextAlloc(ae_fmgr_info->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) ae_fmgr_info->fn_extra;
my_extra->element_type = InvalidOid;
}
if (my_extra->element_type != element_type)
{
- Oid opfuncid = equality_oper_funcid(element_type);
+ Oid opfuncid = equality_oper_funcid(element_type);
get_typlenbyvalalign(element_type,
&my_extra->typlen,
@@ -2410,9 +2416,9 @@ array_eq(PG_FUNCTION_ARGS)
/* Loop over source data */
for (i = 0; i < nitems1; i++)
{
- Datum elt1;
- Datum elt2;
- bool oprresult;
+ Datum elt1;
+ Datum elt2;
+ bool oprresult;
/* Get element pair */
elt1 = fetch_att(p1, typbyval, typlen);
@@ -2519,20 +2525,20 @@ array_cmp(FunctionCallInfo fcinfo)
int i;
typedef struct
{
- Oid element_type;
- int16 typlen;
- bool typbyval;
- char typalign;
- FmgrInfo eqproc;
- FmgrInfo ordproc;
- } ac_extra;
- ac_extra *my_extra;
+ Oid element_type;
+ int16 typlen;
+ bool typbyval;
+ char typalign;
+ FmgrInfo eqproc;
+ FmgrInfo ordproc;
+ } ac_extra;
+ ac_extra *my_extra;
element_type = ARR_ELEMTYPE(array1);
if (element_type != ARR_ELEMTYPE(array2))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot compare arrays of different element types")));
+ errmsg("cannot compare arrays of different element types")));
/*
* We arrange to look up the element type info and related functions
@@ -2550,8 +2556,8 @@ array_cmp(FunctionCallInfo fcinfo)
if (my_extra->element_type != element_type)
{
- Oid eqfuncid = equality_oper_funcid(element_type);
- Oid ordfuncid = ordering_oper_funcid(element_type);
+ Oid eqfuncid = equality_oper_funcid(element_type);
+ Oid ordfuncid = ordering_oper_funcid(element_type);
get_typlenbyvalalign(element_type,
&my_extra->typlen,
@@ -2569,10 +2575,10 @@ array_cmp(FunctionCallInfo fcinfo)
/* extract a C array of arg array datums */
deconstruct_array(array1, element_type, typlen, typbyval, typalign,
- &dvalues1, &nelems1);
+ &dvalues1, &nelems1);
deconstruct_array(array2, element_type, typlen, typbyval, typalign,
- &dvalues2, &nelems2);
+ &dvalues2, &nelems2);
min_nelems = Min(nelems1, nelems2);
for (i = 0; i < min_nelems; i++)
@@ -2875,7 +2881,7 @@ array_insert_slice(int ndim,
/*
* array_type_coerce -- allow explicit or assignment coercion from
* one array type to another.
- *
+ *
* Caller should have already verified that the source element type can be
* coerced into the target element type.
*/
@@ -2885,11 +2891,12 @@ array_type_coerce(PG_FUNCTION_ARGS)
ArrayType *src = PG_GETARG_ARRAYTYPE_P(0);
Oid src_elem_type = ARR_ELEMTYPE(src);
FmgrInfo *fmgr_info = fcinfo->flinfo;
- typedef struct {
+ typedef struct
+ {
Oid srctype;
Oid desttype;
FmgrInfo coerce_finfo;
- } atc_extra;
+ } atc_extra;
atc_extra *my_extra;
FunctionCallInfoData locfcinfo;
@@ -2925,12 +2932,11 @@ array_type_coerce(PG_FUNCTION_ARGS)
errmsg("target type is not an array")));
/*
- * We don't deal with domain constraints yet, so bail out.
- * This isn't currently a problem, because we also don't
- * support arrays of domain type elements either. But in the
- * future we might. At that point consideration should be given
- * to removing the check below and adding a domain constraints
- * check to the coercion.
+ * We don't deal with domain constraints yet, so bail out. This
+ * isn't currently a problem, because we also don't support arrays
+ * of domain type elements either. But in the future we might. At
+ * that point consideration should be given to removing the check
+ * below and adding a domain constraints check to the coercion.
*/
if (getBaseType(tgt_elem_type) != tgt_elem_type)
ereport(ERROR,
@@ -2943,7 +2949,7 @@ array_type_coerce(PG_FUNCTION_ARGS)
{
/* should never happen, but check anyway */
elog(ERROR, "no conversion function from %s to %s",
- format_type_be(src_elem_type), format_type_be(tgt_elem_type));
+ format_type_be(src_elem_type), format_type_be(tgt_elem_type));
}
if (OidIsValid(funcId))
fmgr_info_cxt(funcId, &my_extra->coerce_finfo, fmgr_info->fn_mcxt);
@@ -2954,13 +2960,13 @@ array_type_coerce(PG_FUNCTION_ARGS)
}
/*
- * If it's binary-compatible, modify the element type in the array header,
- * but otherwise leave the array as we received it.
+ * If it's binary-compatible, modify the element type in the array
+ * header, but otherwise leave the array as we received it.
*/
if (my_extra->coerce_finfo.fn_oid == InvalidOid)
{
ArrayType *result = DatumGetArrayTypePCopy(PG_GETARG_DATUM(0));
-
+
ARR_ELEMTYPE(result) = my_extra->desttype;
PG_RETURN_ARRAYTYPE_P(result);
}
@@ -2983,13 +2989,13 @@ array_type_coerce(PG_FUNCTION_ARGS)
* rcontext is where to keep working state
*/
ArrayBuildState *
-accumArrayResult(ArrayBuildState *astate,
+accumArrayResult(ArrayBuildState * astate,
Datum dvalue, bool disnull,
Oid element_type,
MemoryContext rcontext)
{
MemoryContext arr_context,
- oldcontext;
+ oldcontext;
if (astate == NULL)
{
@@ -3021,7 +3027,7 @@ accumArrayResult(ArrayBuildState *astate,
if ((astate->nelems % ARRAY_ELEMS_CHUNKSIZE) == 0)
astate->dvalues = (Datum *)
repalloc(astate->dvalues,
- (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(Datum));
+ (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(Datum));
}
if (disnull)
@@ -3045,7 +3051,7 @@ accumArrayResult(ArrayBuildState *astate,
* rcontext is where to construct result
*/
Datum
-makeArrayResult(ArrayBuildState *astate,
+makeArrayResult(ArrayBuildState * astate,
MemoryContext rcontext)
{
int dims[1];
@@ -3067,7 +3073,7 @@ makeArrayResult(ArrayBuildState *astate,
* rcontext is where to construct result
*/
Datum
-makeMdArrayResult(ArrayBuildState *astate,
+makeMdArrayResult(ArrayBuildState * astate,
int ndims,
int *dims,
int *lbs,
diff --git a/src/backend/utils/adt/ascii.c b/src/backend/utils/adt/ascii.c
index fe7d6ba74ff..adb03d3d0ed 100644
--- a/src/backend/utils/adt/ascii.c
+++ b/src/backend/utils/adt/ascii.c
@@ -5,7 +5,7 @@
* Portions Copyright (c) 1999-2002, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/ascii.c,v 1.16 2003/07/27 04:53:03 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/ascii.c,v 1.17 2003/08/04 00:43:25 momjian Exp $
*
*-----------------------------------------------------------------------
*/
@@ -16,7 +16,7 @@
#include "utils/ascii.h"
static void pg_to_ascii(unsigned char *src, unsigned char *src_end,
- unsigned char *dest, int enc);
+ unsigned char *dest, int enc);
static text *encode_to_ascii(text *data, int enc);
@@ -65,8 +65,8 @@ pg_to_ascii(unsigned char *src, unsigned char *src_end, unsigned char *dest, int
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("unsupported encoding conversion from %s to ASCII",
- pg_encoding_to_char(enc))));
+ errmsg("unsupported encoding conversion from %s to ASCII",
+ pg_encoding_to_char(enc))));
return; /* keep compiler quiet */
}
@@ -95,7 +95,7 @@ static text *
encode_to_ascii(text *data, int enc)
{
pg_to_ascii((unsigned char *) VARDATA(data), /* src */
- (unsigned char *) (data) + VARSIZE(data), /* src end */
+ (unsigned char *) (data) + VARSIZE(data), /* src end */
(unsigned char *) VARDATA(data), /* dest */
enc); /* encoding */
@@ -109,8 +109,8 @@ encode_to_ascii(text *data, int enc)
Datum
to_ascii_encname(PG_FUNCTION_ARGS)
{
- text *data = PG_GETARG_TEXT_P_COPY(0);
- int enc = pg_char_to_encoding(NameStr(*PG_GETARG_NAME(1)));
+ text *data = PG_GETARG_TEXT_P_COPY(0);
+ int enc = pg_char_to_encoding(NameStr(*PG_GETARG_NAME(1)));
PG_RETURN_TEXT_P(encode_to_ascii(data, enc));
}
@@ -122,8 +122,8 @@ to_ascii_encname(PG_FUNCTION_ARGS)
Datum
to_ascii_enc(PG_FUNCTION_ARGS)
{
- text *data = PG_GETARG_TEXT_P_COPY(0);
- int enc = PG_GETARG_INT32(1);
+ text *data = PG_GETARG_TEXT_P_COPY(0);
+ int enc = PG_GETARG_INT32(1);
PG_RETURN_TEXT_P(encode_to_ascii(data, enc));
}
@@ -135,8 +135,8 @@ to_ascii_enc(PG_FUNCTION_ARGS)
Datum
to_ascii_default(PG_FUNCTION_ARGS)
{
- text *data = PG_GETARG_TEXT_P_COPY(0);
- int enc = GetDatabaseEncoding();
+ text *data = PG_GETARG_TEXT_P_COPY(0);
+ int enc = GetDatabaseEncoding();
PG_RETURN_TEXT_P(encode_to_ascii(data, enc));
}
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index 5bf3a025d1c..78a702a13ec 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/char.c,v 1.36 2003/07/27 04:53:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/char.c,v 1.37 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@ charout(PG_FUNCTION_ARGS)
* charrecv - converts external binary format to char
*
* The external representation is one byte, with no character set
- * conversion. This is somewhat dubious, perhaps, but in many
+ * conversion. This is somewhat dubious, perhaps, but in many
* cases people use char for a 1-byte binary type.
*/
Datum
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index ed449e524dd..b6d1b83d791 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/date.c,v 1.87 2003/07/27 04:53:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/date.c,v 1.88 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,7 +97,7 @@ date_in(PG_FUNCTION_ARGS)
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for date: \"%s\"", str)));
+ errmsg("invalid input syntax for date: \"%s\"", str)));
}
date = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE;
@@ -465,7 +465,7 @@ abstime_date(PG_FUNCTION_ARGS)
case NOEND_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert reserved abstime value to date")));
+ errmsg("cannot convert reserved abstime value to date")));
/*
* pretend to drop through to make compiler think that result
@@ -527,7 +527,7 @@ text_date(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for date: \"%s\"",
- VARDATA(str))));
+ VARDATA(str))));
sp = VARDATA(str);
dp = dstr;
@@ -570,7 +570,7 @@ time_in(PG_FUNCTION_ARGS)
errmsg("invalid input syntax for time: \"%s\"", str)));
if ((ParseDateTime(str, lowstr, field, ftype, MAXDATEFIELDS, &nf) != 0)
- || (DecodeTimeOnly(field, ftype, nf, &dtype, tm, &fsec, &tz) != 0))
+ || (DecodeTimeOnly(field, ftype, nf, &dtype, tm, &fsec, &tz) != 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for time: \"%s\"", str)));
@@ -749,9 +749,9 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
/*
* Note: this round-to-nearest code is not completely consistent
* about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement round-to-nearest-even,
- * but the integer code always rounds up (away from zero). Is it
- * worth trying to be consistent?
+ * values. On most platforms, rint() will implement
+ * round-to-nearest-even, but the integer code always rounds up
+ * (away from zero). Is it worth trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (*time >= INT64CONST(0))
@@ -761,8 +761,8 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
}
else
{
- *time = - ((((- *time) + TimeOffsets[typmod]) / TimeScales[typmod])
- * TimeScales[typmod]);
+ *time = -((((-*time) + TimeOffsets[typmod]) / TimeScales[typmod])
+ * TimeScales[typmod]);
}
#else
*time = (rint(((double) *time) * TimeScales[typmod])
@@ -1093,7 +1093,7 @@ time_interval(PG_FUNCTION_ARGS)
* Convert interval to time data type.
*
* This is defined as producing the fractional-day portion of the interval.
- * Therefore, we can just ignore the months field. It is not real clear
+ * Therefore, we can just ignore the months field. It is not real clear
* what to do with negative intervals, but we choose to subtract the floor,
* so that, say, '-2 hours' becomes '22:00:00'.
*/
@@ -1114,7 +1114,7 @@ interval_time(PG_FUNCTION_ARGS)
}
else if (result < 0)
{
- days = (-result + INT64CONST(86400000000-1)) / INT64CONST(86400000000);
+ days = (-result + INT64CONST(86400000000 - 1)) / INT64CONST(86400000000);
result += days * INT64CONST(86400000000);
}
#else
@@ -1256,7 +1256,7 @@ text_time(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for time: \"%s\"",
- VARDATA(str))));
+ VARDATA(str))));
sp = VARDATA(str);
dp = dstr;
@@ -1290,8 +1290,8 @@ time_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("TIME units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
up = VARDATA(units);
lp = lowunits;
@@ -1360,8 +1360,8 @@ time_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("TIME units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -1379,8 +1379,8 @@ time_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("TIME units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -1432,15 +1432,15 @@ timetz_in(PG_FUNCTION_ARGS)
if (strlen(str) >= sizeof(lowstr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for time with time zone: \"%s\"",
- str)));
+ errmsg("invalid input syntax for time with time zone: \"%s\"",
+ str)));
if ((ParseDateTime(str, lowstr, field, ftype, MAXDATEFIELDS, &nf) != 0)
|| (DecodeTimeOnly(field, ftype, nf, &dtype, tm, &fsec, &tz) != 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for time with time zone: \"%s\"",
- str)));
+ errmsg("invalid input syntax for time with time zone: \"%s\"",
+ str)));
result = (TimeTzADT *) palloc(sizeof(TimeTzADT));
tm2timetz(tm, fsec, tz, result);
@@ -2019,8 +2019,8 @@ text_timetz(PG_FUNCTION_ARGS)
if (VARSIZE(str) - VARHDRSZ > MAXDATELEN)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for time with time zone: \"%s\"",
- VARDATA(str))));
+ errmsg("invalid input syntax for time with time zone: \"%s\"",
+ VARDATA(str))));
sp = VARDATA(str);
dp = dstr;
@@ -2054,8 +2054,8 @@ timetz_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("TIMETZ units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
up = VARDATA(units);
lp = lowunits;
@@ -2138,8 +2138,8 @@ timetz_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("TIMETZ units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -2157,8 +2157,8 @@ timetz_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("TIMETZ units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -2187,8 +2187,8 @@ timetz_zone(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("time zone \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(zone))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(zone))))));
up = VARDATA(zone);
lp = lowzone;
@@ -2246,8 +2246,8 @@ timetz_izone(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("INTERVAL time zone \"%s\" not legal",
- DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ DatumGetCString(DirectFunctionCall1(interval_out,
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = -(zone->time / INT64CONST(1000000));
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index f053c9ebb2c..1b1fd98a1ff 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/datetime.c,v 1.108 2003/07/29 00:03:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/datetime.c,v 1.109 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -575,9 +575,9 @@ static datetkn deltatktbl[] = {
static unsigned int szdeltatktbl = sizeof deltatktbl / sizeof deltatktbl[0];
-static datetkn *datecache[MAXDATEFIELDS] = {NULL};
+static datetkn *datecache[MAXDATEFIELDS] = {NULL};
-static datetkn *deltacache[MAXDATEFIELDS] = {NULL};
+static datetkn *deltacache[MAXDATEFIELDS] = {NULL};
/*
@@ -593,7 +593,7 @@ static datetkn *deltacache[MAXDATEFIELDS] = {NULL};
*
* Rewritten to eliminate overflow problems. This now allows the
* routines to work correctly for all Julian day counts from
- * 0 to 2147483647 (Nov 24, -4713 to Jun 3, 5874898) assuming
+ * 0 to 2147483647 (Nov 24, -4713 to Jun 3, 5874898) assuming
* a 32-bit integer. Longer types should also work to the limits
* of their precision.
*/
@@ -604,18 +604,21 @@ date2j(int y, int m, int d)
int julian;
int century;
- if (m > 2) {
+ if (m > 2)
+ {
m += 1;
y += 4800;
- } else {
+ }
+ else
+ {
m += 13;
y += 4799;
}
- century = y/100;
- julian = y*365 - 32167;
- julian += y/4 - century + century/4;
- julian += 7834*m/256 + d;
+ century = y / 100;
+ julian = y * 365 - 32167;
+ julian += y / 4 - century + century / 4;
+ julian += 7834 * m / 256 + d;
return julian;
} /* date2j() */
@@ -623,25 +626,25 @@ date2j(int y, int m, int d)
void
j2date(int jd, int *year, int *month, int *day)
{
- unsigned int julian;
- unsigned int quad;
- unsigned int extra;
+ unsigned int julian;
+ unsigned int quad;
+ unsigned int extra;
int y;
julian = jd;
julian += 32044;
- quad = julian/146097;
- extra = (julian - quad*146097)*4 + 3;
- julian += 60 + quad*3 + extra/146097;
- quad = julian/1461;
- julian -= quad*1461;
+ quad = julian / 146097;
+ extra = (julian - quad * 146097) * 4 + 3;
+ julian += 60 + quad * 3 + extra / 146097;
+ quad = julian / 1461;
+ julian -= quad * 1461;
y = julian * 4 / 1461;
julian = ((y != 0) ? ((julian + 305) % 365) : ((julian + 306) % 366))
+ 123;
- y += quad*4;
+ y += quad * 4;
*year = y - 4800;
quad = julian * 2141 / 65536;
- *day = julian - 7834*quad/256;
+ *day = julian - 7834 * quad / 256;
*month = (quad + 10) % 12 + 1;
return;
@@ -652,7 +655,7 @@ j2date(int jd, int *year, int *month, int *day)
* j2day - convert Julian date to day-of-week (0..6 == Sun..Sat)
*
* Note: various places use the locution j2day(date - 1) to produce a
- * result according to the convention 0..6 = Mon..Sun. This is a bit of
+ * result according to the convention 0..6 = Mon..Sun. This is a bit of
* a crock, but will work as long as the computation here is just a modulo.
*/
int
@@ -1252,8 +1255,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
{
case DTK_CURRENT:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("\"current\" is no longer supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("\"current\" is no longer supported")));
return -1;
break;
@@ -1269,7 +1272,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
*dtype = DTK_DATE;
GetCurrentDateTime(tm);
j2date((date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - 1),
- &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
+ &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
@@ -1289,7 +1292,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
*dtype = DTK_DATE;
GetCurrentDateTime(tm);
j2date((date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + 1),
- &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
+ &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
@@ -1435,8 +1438,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("inconsistent use of year %04d and \"BC\"",
- tm->tm_year)));
+ errmsg("inconsistent use of year %04d and \"BC\"",
+ tm->tm_year)));
}
else if (is2digits)
{
@@ -1994,8 +1997,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
{
case DTK_CURRENT:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("\"current\" is no longer supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("\"current\" is no longer supported")));
return -1;
break;
@@ -2423,6 +2426,7 @@ DecodeNumber(int flen, char *str, int fmask,
switch (fmask & DTK_DATE_M)
{
case 0:
+
/*
* Nothing so far; make a decision about what we think the
* input is. There used to be lots of heuristics here, but
@@ -2487,9 +2491,7 @@ DecodeNumber(int flen, char *str, int fmask,
* exactly two digits.
*/
if (*tmask == DTK_M(YEAR))
- {
*is2digits = (flen == 2);
- }
return 0;
}
@@ -3300,8 +3302,8 @@ EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, cha
tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here should
- * be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here
+ * should be at least equal to MAX_TIMESTAMP_PRECISION.
*
* In float mode, don't print fractional seconds before 1 AD,
* since it's unlikely there's any precision left ...
@@ -3350,8 +3352,8 @@ EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, cha
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here should
- * be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here
+ * should be at least equal to MAX_TIMESTAMP_PRECISION.
*
* In float mode, don't print fractional seconds before 1 AD,
* since it's unlikely there's any precision left ...
@@ -3396,8 +3398,8 @@ EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, cha
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here should
- * be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here
+ * should be at least equal to MAX_TIMESTAMP_PRECISION.
*
* In float mode, don't print fractional seconds before 1 AD,
* since it's unlikely there's any precision left ...
@@ -3450,8 +3452,8 @@ EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, cha
sprintf((str + 10), " %02d:%02d", tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here should
- * be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here
+ * should be at least equal to MAX_TIMESTAMP_PRECISION.
*
* In float mode, don't print fractional seconds before 1 AD,
* since it's unlikely there's any precision left ...
@@ -3746,7 +3748,7 @@ ClearDateCache(bool newval, bool doit, bool interactive)
/*
* We've been burnt by stupid errors in the ordering of the datetkn tables
- * once too often. Arrange to check them during postmaster start.
+ * once too often. Arrange to check them during postmaster start.
*/
static bool
CheckDateTokenTable(const char *tablename, datetkn *base, unsigned int nel)
@@ -3756,11 +3758,11 @@ CheckDateTokenTable(const char *tablename, datetkn *base, unsigned int nel)
for (i = 1; i < nel; i++)
{
- if (strncmp(base[i-1].token, base[i].token, TOKMAXLEN) >= 0)
+ if (strncmp(base[i - 1].token, base[i].token, TOKMAXLEN) >= 0)
{
elog(LOG, "ordering error in %s table: \"%.*s\" >= \"%.*s\"",
tablename,
- TOKMAXLEN, base[i-1].token,
+ TOKMAXLEN, base[i - 1].token,
TOKMAXLEN, base[i].token);
ok = false;
}
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 9ef2e80da76..2bcd2ac68a5 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/float.c,v 1.91 2003/07/30 19:48:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/float.c,v 1.92 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -104,13 +104,13 @@ static double cbrt(double x);
/* Configurable GUC parameter */
-int extra_float_digits = 0; /* Added to DBL_DIG or FLT_DIG */
+int extra_float_digits = 0; /* Added to DBL_DIG or FLT_DIG */
static void CheckFloat4Val(double val);
static void CheckFloat8Val(double val);
-static int float4_cmp_internal(float4 a, float4 b);
-static int float8_cmp_internal(float8 a, float8 b);
+static int float4_cmp_internal(float4 a, float4 b);
+static int float8_cmp_internal(float8 a, float8 b);
/*
@@ -198,7 +198,7 @@ float4in(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for float4: \"%s\"",
- num)));
+ num)));
}
else
{
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 1df3593ee96..717b350bebe 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/formatting.c,v 1.64 2003/07/27 04:53:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/formatting.c,v 1.65 2003/08/04 00:43:25 momjian Exp $
*
*
* Portions Copyright (c) 1999-2002, PostgreSQL Global Development Group
@@ -279,15 +279,15 @@ typedef struct
#define NUM_F_DECIMAL (1 << 1)
#define NUM_F_LDECIMAL (1 << 2)
#define NUM_F_ZERO (1 << 3)
-#define NUM_F_BLANK (1 << 4)
+#define NUM_F_BLANK (1 << 4)
#define NUM_F_FILLMODE (1 << 5)
-#define NUM_F_LSIGN (1 << 6)
+#define NUM_F_LSIGN (1 << 6)
#define NUM_F_BRACKET (1 << 7)
-#define NUM_F_MINUS (1 << 8)
+#define NUM_F_MINUS (1 << 8)
#define NUM_F_PLUS (1 << 9)
-#define NUM_F_ROMAN (1 << 10)
+#define NUM_F_ROMAN (1 << 10)
#define NUM_F_MULTI (1 << 11)
-#define NUM_F_PLUS_POST (1 << 12)
+#define NUM_F_PLUS_POST (1 << 12)
#define NUM_F_MINUS_POST (1 << 13)
#define NUM_LSIGN_PRE -1
@@ -1018,7 +1018,7 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n)
NUM_cache_remove(last_NUMCacheEntry);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot use \"V\" and decimal point together")));
+ errmsg("cannot use \"V\" and decimal point together")));
}
num->flag |= NUM_F_DECIMAL;
break;
@@ -1123,7 +1123,7 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n)
NUM_cache_remove(last_NUMCacheEntry);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot use \"V\" and decimal point together")));
+ errmsg("cannot use \"V\" and decimal point together")));
}
num->flag |= NUM_F_MULTI;
break;
@@ -3072,7 +3072,7 @@ to_timestamp(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("inconsistent use of year %04d and \"BC\"",
- tm.tm_year)));
+ tm.tm_year)));
}
if (tmfc.j)
@@ -3106,7 +3106,7 @@ to_timestamp(PG_FUNCTION_ARGS)
if (!tm.tm_year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("cannot convert yday without year information")));
+ errmsg("cannot convert yday without year information")));
y = ysum[isleap(tm.tm_year)];
@@ -3600,7 +3600,7 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
* simple + - < >
*/
if (*Np->inout_p == '-' || (IS_BRACKET(Np->Num) &&
- *Np->inout_p == '<'))
+ *Np->inout_p == '<'))
{
*Np->number = '-'; /* set - */
@@ -3678,7 +3678,7 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
(IS_ZERO((_n)->Num)==FALSE && \
(_n)->number == (_n)->number_p && \
*(_n)->number == '0' && \
- (_n)->Num->post != 0)
+ (_n)->Num->post != 0)
/* ----------
* Add digit or sign to number-string
@@ -3687,8 +3687,8 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
static void
NUM_numpart_to_char(NUMProc *Np, int id)
{
- int end;
-
+ int end;
+
if (IS_ROMAN(Np->Num))
return;
@@ -3710,13 +3710,13 @@ NUM_numpart_to_char(NUMProc *Np, int id)
Np->num_in = FALSE;
/*
- * Write sign if real number will write to output
- * Note: IS_PREDEC_SPACE() handle "9.9" --> " .1"
+ * Write sign if real number will write to output Note:
+ * IS_PREDEC_SPACE() handle "9.9" --> " .1"
*/
- if (Np->sign_wrote == FALSE &&
- (Np->num_curr >= Np->num_pre || (IS_ZERO(Np->Num) && Np->Num->zero_start == Np->num_curr )) &&
- (IS_PREDEC_SPACE(Np)==FALSE || (Np->last_relevant && *Np->last_relevant == '.')))
- {
+ if (Np->sign_wrote == FALSE &&
+ (Np->num_curr >= Np->num_pre || (IS_ZERO(Np->Num) && Np->Num->zero_start == Np->num_curr)) &&
+ (IS_PREDEC_SPACE(Np) == FALSE || (Np->last_relevant && *Np->last_relevant == '.')))
+ {
if (IS_LSIGN(Np->Num))
{
if (Np->Num->lsign == NUM_LSIGN_PRE)
@@ -3739,7 +3739,7 @@ NUM_numpart_to_char(NUMProc *Np, int id)
{
if (!IS_FILLMODE(Np->Num))
{
- *Np->inout_p = ' '; /* Write + */
+ *Np->inout_p = ' '; /* Write + */
++Np->inout_p;
}
Np->sign_wrote = TRUE;
@@ -3751,8 +3751,8 @@ NUM_numpart_to_char(NUMProc *Np, int id)
Np->sign_wrote = TRUE;
}
}
-
-
+
+
/*
* digits / FM / Zero / Dec. point
*/
@@ -3796,10 +3796,11 @@ NUM_numpart_to_char(NUMProc *Np, int id)
strcpy(Np->inout_p, Np->decimal); /* Write DEC/D */
Np->inout_p += strlen(Np->inout_p);
}
+
/*
* Ora 'n' -- FM9.9 --> 'n.'
*/
- else if (IS_FILLMODE(Np->Num) &&
+ else if (IS_FILLMODE(Np->Num) &&
Np->last_relevant && *Np->last_relevant == '.')
{
@@ -3816,6 +3817,7 @@ NUM_numpart_to_char(NUMProc *Np, int id)
if (Np->last_relevant && Np->number_p > Np->last_relevant &&
id != NUM_0)
;
+
/*
* '0.1' -- 9.9 --> ' .1'
*/
@@ -3826,6 +3828,7 @@ NUM_numpart_to_char(NUMProc *Np, int id)
*Np->inout_p = ' ';
++Np->inout_p;
}
+
/*
* '0' -- FM9.9 --> '0.'
*/
@@ -3846,11 +3849,11 @@ NUM_numpart_to_char(NUMProc *Np, int id)
}
end = Np->num_count + (Np->num_pre ? 1 : 0) + (IS_DECIMAL(Np->Num) ? 1 : 0);
-
+
if (Np->last_relevant && Np->last_relevant == Np->number_p)
end = Np->num_curr;
-
- if (Np->num_curr+1 == end)
+
+ if (Np->num_curr + 1 == end)
{
if (Np->sign_wrote == TRUE && IS_BRACKET(Np->Num))
{
@@ -3895,7 +3898,7 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
if (Np->Num->zero_start)
--Np->Num->zero_start;
-
+
/*
* Roman correction
*/
@@ -3923,20 +3926,18 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
* Sign
*/
if (type == FROM_CHAR)
- {
Np->sign = FALSE;
- }
else
{
Np->sign = sign;
-
+
/* MI/PL/SG - write sign itself and not in number */
if (IS_PLUS(Np->Num) || IS_MINUS(Np->Num))
{
- if (IS_PLUS(Np->Num) && IS_MINUS(Np->Num)==FALSE)
- Np->sign_wrote = FALSE; /* need sign */
+ if (IS_PLUS(Np->Num) && IS_MINUS(Np->Num) == FALSE)
+ Np->sign_wrote = FALSE; /* need sign */
else
- Np->sign_wrote = TRUE; /* needn't sign */
+ Np->sign_wrote = TRUE; /* needn't sign */
}
else
{
@@ -3950,10 +3951,10 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
else if (Np->sign != '+' && IS_PLUS(Np->Num))
Np->Num->flag &= ~NUM_F_PLUS;
- if (Np->sign == '+' && IS_FILLMODE(Np->Num) && IS_LSIGN(Np->Num)==FALSE)
- Np->sign_wrote = TRUE; /* needn't sign */
+ if (Np->sign == '+' && IS_FILLMODE(Np->Num) && IS_LSIGN(Np->Num) == FALSE)
+ Np->sign_wrote = TRUE; /* needn't sign */
else
- Np->sign_wrote = FALSE; /* need sign */
+ Np->sign_wrote = FALSE; /* need sign */
if (Np->Num->lsign == NUM_LSIGN_PRE && Np->Num->pre == Np->Num->pre_lsign_num)
Np->Num->lsign = NUM_LSIGN_POST;
@@ -3973,12 +3974,12 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
{
if (IS_DECIMAL(Np->Num))
Np->last_relevant = get_last_relevant_decnum(
- Np->number +
+ Np->number +
((Np->Num->zero_end - Np->num_pre > 0) ?
Np->Num->zero_end - Np->num_pre : 0));
}
- if (Np->sign_wrote==FALSE && Np->num_pre == 0)
+ if (Np->sign_wrote == FALSE && Np->num_pre == 0)
++Np->num_count;
}
else
@@ -4010,7 +4011,7 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
IS_MINUS(Np->Num) ? "Yes" : "No",
IS_FILLMODE(Np->Num) ? "Yes" : "No",
IS_ROMAN(Np->Num) ? "Yes" : "No"
- );
+ );
#endif
/*
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index 9934d6a3e0d..6567dafc955 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/geo_ops.c,v 1.78 2003/07/27 04:53:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/geo_ops.c,v 1.79 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -140,7 +140,7 @@ single_decode(char *str, float8 *x, char **s)
static int
single_encode(float8 x, char *str)
{
- int ndig = DBL_DIG + extra_float_digits;
+ int ndig = DBL_DIG + extra_float_digits;
if (ndig < 1)
ndig = 1;
@@ -196,7 +196,7 @@ pair_decode(char *str, float8 *x, float8 *y, char **s)
static int
pair_encode(float8 x, float8 y, char *str)
{
- int ndig = DBL_DIG + extra_float_digits;
+ int ndig = DBL_DIG + extra_float_digits;
if (ndig < 1)
ndig = 1;
@@ -3363,7 +3363,7 @@ poly_in(PG_FUNCTION_ARGS)
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for polygon: \"%s\"", str)));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
@@ -3375,7 +3375,7 @@ poly_in(PG_FUNCTION_ARGS)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for polygon: \"%s\"", str)));
make_bound_box(poly);
@@ -3406,7 +3406,7 @@ Datum
poly_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- POLYGON *poly;
+ POLYGON *poly;
int32 npts;
int32 i;
int size;
@@ -3440,7 +3440,7 @@ poly_recv(PG_FUNCTION_ARGS)
Datum
poly_send(PG_FUNCTION_ARGS)
{
- POLYGON *poly = PG_GETARG_POLYGON_P(0);
+ POLYGON *poly = PG_GETARG_POLYGON_P(0);
StringInfoData buf;
int32 i;
@@ -4246,7 +4246,7 @@ circle_in(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for circle: \"%s\"", str)));
+ errmsg("invalid input syntax for circle: \"%s\"", str)));
}
if (*s != '\0')
diff --git a/src/backend/utils/adt/inet_net_ntop.c b/src/backend/utils/adt/inet_net_ntop.c
index 0287fec27f0..7a38c535569 100644
--- a/src/backend/utils/adt/inet_net_ntop.c
+++ b/src/backend/utils/adt/inet_net_ntop.c
@@ -16,7 +16,7 @@
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static const char rcsid[] = "$Id: inet_net_ntop.c,v 1.15 2003/06/25 01:08:13 momjian Exp $";
+static const char rcsid[] = "$Id: inet_net_ntop.c,v 1.16 2003/08/04 00:43:25 momjian Exp $";
#endif
#include "postgres.h"
@@ -40,13 +40,13 @@ static const char rcsid[] = "$Id: inet_net_ntop.c,v 1.15 2003/06/25 01:08:13 mom
#endif
static char *inet_net_ntop_ipv4(const u_char *src, int bits,
- char *dst, size_t size);
+ char *dst, size_t size);
static char *inet_cidr_ntop_ipv4(const u_char *src, int bits,
- char *dst, size_t size);
+ char *dst, size_t size);
static char *inet_net_ntop_ipv6(const u_char *src, int bits,
- char *dst, size_t size);
+ char *dst, size_t size);
static char *inet_cidr_ntop_ipv6(const u_char *src, int bits,
- char *dst, size_t size);
+ char *dst, size_t size);
/*
* char *
@@ -160,26 +160,30 @@ emsgsize:
* 0x11110000 in its fourth octet.
* author:
* Vadim Kogan (UCB), June 2001
- * Original version (IPv4) by Paul Vixie (ISC), July 1996
+ * Original version (IPv4) by Paul Vixie (ISC), July 1996
*/
static char *
inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
{
- u_int m;
- int b;
- int p;
- int zero_s, zero_l, tmp_zero_s, tmp_zero_l;
- int i;
- int is_ipv4 = 0;
- int double_colon = 0;
+ u_int m;
+ int b;
+ int p;
+ int zero_s,
+ zero_l,
+ tmp_zero_s,
+ tmp_zero_l;
+ int i;
+ int is_ipv4 = 0;
+ int double_colon = 0;
unsigned char inbuf[16];
- char outbuf[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255/128")];
- char *cp;
- int words;
- u_char *s;
+ char outbuf[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255/128")];
+ char *cp;
+ int words;
+ u_char *s;
- if (bits < 0 || bits > 128) {
+ if (bits < 0 || bits > 128)
+ {
errno = EINVAL;
return (NULL);
}
@@ -187,20 +191,24 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
cp = outbuf;
double_colon = 0;
- if (bits == 0) {
+ if (bits == 0)
+ {
*cp++ = ':';
*cp++ = ':';
*cp = '\0';
double_colon = 1;
- } else {
- /* Copy src to private buffer. Zero host part. */
+ }
+ else
+ {
+ /* Copy src to private buffer. Zero host part. */
p = (bits + 7) / 8;
memcpy(inbuf, src, p);
memset(inbuf + p, 0, 16 - p);
b = bits % 8;
- if (b != 0) {
+ if (b != 0)
+ {
m = ~0 << (8 - b);
- inbuf[p-1] &= m;
+ inbuf[p - 1] &= m;
}
s = inbuf;
@@ -212,13 +220,18 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
/* Find the longest substring of zero's */
zero_s = zero_l = tmp_zero_s = tmp_zero_l = 0;
- for (i = 0; i < (words * 2); i += 2) {
- if ((s[i] | s[i+1]) == 0) {
+ for (i = 0; i < (words * 2); i += 2)
+ {
+ if ((s[i] | s[i + 1]) == 0)
+ {
if (tmp_zero_l == 0)
tmp_zero_s = i / 2;
tmp_zero_l++;
- } else {
- if (tmp_zero_l && zero_l < tmp_zero_l) {
+ }
+ else
+ {
+ if (tmp_zero_l && zero_l < tmp_zero_l)
+ {
zero_s = tmp_zero_s;
zero_l = tmp_zero_l;
tmp_zero_l = 0;
@@ -226,23 +239,27 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
}
- if (tmp_zero_l && zero_l < tmp_zero_l) {
+ if (tmp_zero_l && zero_l < tmp_zero_l)
+ {
zero_s = tmp_zero_s;
zero_l = tmp_zero_l;
}
if (zero_l != words && zero_s == 0 && ((zero_l == 6) ||
- ((zero_l == 5 && s[10] == 0xff && s[11] == 0xff) ||
- ((zero_l == 7 && s[14] != 0 && s[15] != 1)))))
+ ((zero_l == 5 && s[10] == 0xff && s[11] == 0xff) ||
+ ((zero_l == 7 && s[14] != 0 && s[15] != 1)))))
is_ipv4 = 1;
/* Format whole words. */
- for (p = 0; p < words; p++) {
- if (zero_l != 0 && p >= zero_s && p < zero_s + zero_l) {
+ for (p = 0; p < words; p++)
+ {
+ if (zero_l != 0 && p >= zero_s && p < zero_s + zero_l)
+ {
/* Time to skip some zeros */
if (p == zero_s)
*cp++ = ':';
- if (p == words - 1) {
+ if (p == words - 1)
+ {
*cp++ = ':';
double_colon = 1;
}
@@ -251,15 +268,19 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
continue;
}
- if (is_ipv4 && p > 5 ) {
+ if (is_ipv4 && p > 5)
+ {
*cp++ = (p == 6) ? ':' : '.';
cp += SPRINTF((cp, "%u", *s++));
/* we can potentially drop the last octet */
- if (p != 7 || bits > 120) {
+ if (p != 7 || bits > 120)
+ {
*cp++ = '.';
cp += SPRINTF((cp, "%u", *s++));
}
- } else {
+ }
+ else
+ {
if (cp != outbuf)
*cp++ = ':';
cp += SPRINTF((cp, "%x", *s * 256 + s[1]));
@@ -268,7 +289,8 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
}
- if (!double_colon) {
+ if (!double_colon)
+ {
if (bits < 128 - 32)
cp += SPRINTF((cp, "::"));
else if (bits < 128 - 16)
@@ -281,7 +303,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
if (strlen(outbuf) + 1 > size)
goto emsgsize;
strcpy(dst, outbuf);
-
+
return (dst);
emsgsize:
@@ -373,21 +395,24 @@ emsgsize:
}
static int
-decoct(const u_char *src, int bytes, char *dst, size_t size) {
- char *odst = dst;
- char *t;
- int b;
+decoct(const u_char *src, int bytes, char *dst, size_t size)
+{
+ char *odst = dst;
+ char *t;
+ int b;
- for (b = 1; b <= bytes; b++) {
+ for (b = 1; b <= bytes; b++)
+ {
if (size < sizeof "255.")
return (0);
t = dst;
dst += SPRINTF((dst, "%u", *src++));
- if (b != bytes) {
+ if (b != bytes)
+ {
*dst++ = '.';
*dst = '\0';
}
- size -= (size_t)(dst - t);
+ size -= (size_t) (dst - t);
}
return (dst - odst);
}
@@ -402,42 +427,52 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
* Keep this in mind if you think this function should have been coded
* to use pointer overlays. All the world's not a VAX.
*/
- char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255/128"];
- char *tp;
- struct { int base, len; } best, cur;
- u_int words[NS_IN6ADDRSZ / NS_INT16SZ];
- int i;
+ char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255/128"];
+ char *tp;
+ struct
+ {
+ int base,
+ len;
+ } best, cur;
+ u_int words[NS_IN6ADDRSZ / NS_INT16SZ];
+ int i;
- if ((bits < -1) || (bits > 128)) {
+ if ((bits < -1) || (bits > 128))
+ {
errno = EINVAL;
return (NULL);
}
/*
- * Preprocess:
- * Copy the input (bytewise) array into a wordwise array.
- * Find the longest run of 0x00's in src[] for :: shorthanding.
+ * Preprocess: Copy the input (bytewise) array into a wordwise array.
+ * Find the longest run of 0x00's in src[] for :: shorthanding.
*/
memset(words, '\0', sizeof words);
for (i = 0; i < NS_IN6ADDRSZ; i++)
words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3));
best.base = -1;
cur.base = -1;
- for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) {
- if (words[i] == 0) {
+ for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++)
+ {
+ if (words[i] == 0)
+ {
if (cur.base == -1)
cur.base = i, cur.len = 1;
else
cur.len++;
- } else {
- if (cur.base != -1) {
+ }
+ else
+ {
+ if (cur.base != -1)
+ {
if (best.base == -1 || cur.len > best.len)
best = cur;
cur.base = -1;
}
}
}
- if (cur.base != -1) {
+ if (cur.base != -1)
+ {
if (best.base == -1 || cur.len > best.len)
best = cur;
}
@@ -448,10 +483,12 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
* Format the result.
*/
tp = tmp;
- for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) {
+ for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++)
+ {
/* Are we inside the best run of 0x00's? */
if (best.base != -1 && i >= best.base &&
- i < (best.base + best.len)) {
+ i < (best.base + best.len))
+ {
if (i == best.base)
*tp++ = ':';
continue;
@@ -461,12 +498,14 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
*tp++ = ':';
/* Is this address an encapsulated IPv4? */
if (i == 6 && best.base == 0 && (best.len == 6 ||
- (best.len == 7 && words[7] != 0x0001) ||
- (best.len == 5 && words[5] == 0xffff))) {
- int n;
-
- n = decoct(src+12, 4, tp, sizeof tmp - (tp - tmp));
- if (n == 0) {
+ (best.len == 7 && words[7] != 0x0001) ||
+ (best.len == 5 && words[5] == 0xffff)))
+ {
+ int n;
+
+ n = decoct(src + 12, 4, tp, sizeof tmp - (tp - tmp));
+ if (n == 0)
+ {
errno = EMSGSIZE;
return (NULL);
}
@@ -477,8 +516,8 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
/* Was it a trailing run of 0x00's? */
- if (best.base != -1 && (best.base + best.len) ==
- (NS_IN6ADDRSZ / NS_INT16SZ))
+ if (best.base != -1 && (best.base + best.len) ==
+ (NS_IN6ADDRSZ / NS_INT16SZ))
*tp++ = ':';
*tp = '\0';
@@ -488,7 +527,8 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
/*
* Check for overflow, copy, and we're done.
*/
- if ((size_t)(tp - tmp) > size) {
+ if ((size_t) (tp - tmp) > size)
+ {
errno = EMSGSIZE;
return (NULL);
}
diff --git a/src/backend/utils/adt/inet_net_pton.c b/src/backend/utils/adt/inet_net_pton.c
index 4c7ca9d618a..b1b4bb69876 100644
--- a/src/backend/utils/adt/inet_net_pton.c
+++ b/src/backend/utils/adt/inet_net_pton.c
@@ -16,7 +16,7 @@
*/
#if defined(LIBC_SCCS) && !defined(lint)
-static const char rcsid[] = "$Id: inet_net_pton.c,v 1.15 2003/06/24 22:21:22 momjian Exp $";
+static const char rcsid[] = "$Id: inet_net_pton.c,v 1.16 2003/08/04 00:43:25 momjian Exp $";
#endif
#include "postgres.h"
@@ -35,8 +35,8 @@ static const char rcsid[] = "$Id: inet_net_pton.c,v 1.15 2003/06/24 22:21:22 mom
static int inet_net_pton_ipv4(const char *src, u_char *dst);
static int inet_cidr_pton_ipv4(const char *src, u_char *dst, size_t size);
-static int inet_net_pton_ipv6(const char *src, u_char *dst);
-static int inet_cidr_pton_ipv6(const char *src, u_char *dst, size_t size);
+static int inet_net_pton_ipv6(const char *src, u_char *dst);
+static int inet_cidr_pton_ipv6(const char *src, u_char *dst, size_t size);
/*
* static int
@@ -339,24 +339,27 @@ emsgsize:
}
static int
-getbits(const char *src, int *bitsp) {
+getbits(const char *src, int *bitsp)
+{
static const char digits[] = "0123456789";
- int n;
- int val;
- char ch;
+ int n;
+ int val;
+ char ch;
val = 0;
n = 0;
- while ((ch = *src++) != '\0') {
+ while ((ch = *src++) != '\0')
+ {
const char *pch;
pch = strchr(digits, ch);
- if (pch != NULL) {
+ if (pch != NULL)
+ {
if (n++ != 0 && val == 0) /* no leading zeros */
return (0);
val *= 10;
val += (pch - digits);
- if (val > 128) /* range */
+ if (val > 128) /* range */
return (0);
continue;
}
@@ -369,30 +372,34 @@ getbits(const char *src, int *bitsp) {
}
static int
-getv4(const char *src, u_char *dst, int *bitsp) {
+getv4(const char *src, u_char *dst, int *bitsp)
+{
static const char digits[] = "0123456789";
- u_char *odst = dst;
- int n;
- u_int val;
- char ch;
+ u_char *odst = dst;
+ int n;
+ u_int val;
+ char ch;
val = 0;
n = 0;
- while ((ch = *src++) != '\0') {
+ while ((ch = *src++) != '\0')
+ {
const char *pch;
pch = strchr(digits, ch);
- if (pch != NULL) {
+ if (pch != NULL)
+ {
if (n++ != 0 && val == 0) /* no leading zeros */
return (0);
val *= 10;
val += (pch - digits);
- if (val > 255) /* range */
+ if (val > 255) /* range */
return (0);
continue;
}
- if (ch == '.' || ch == '/') {
- if (dst - odst > 3) /* too many octets? */
+ if (ch == '.' || ch == '/')
+ {
+ if (dst - odst > 3) /* too many octets? */
return (0);
*dst++ = val;
if (ch == '/')
@@ -405,7 +412,7 @@ getv4(const char *src, u_char *dst, int *bitsp) {
}
if (n == 0)
return (0);
- if (dst - odst > 3) /* too many octets? */
+ if (dst - odst > 3) /* too many octets? */
return (0);
*dst++ = val;
return (1);
@@ -422,15 +429,21 @@ inet_net_pton_ipv6(const char *src, u_char *dst)
#define NS_INADDRSZ 4
static int
-inet_cidr_pton_ipv6(const char *src, u_char *dst, size_t size) {
+inet_cidr_pton_ipv6(const char *src, u_char *dst, size_t size)
+{
static const char xdigits_l[] = "0123456789abcdef",
- xdigits_u[] = "0123456789ABCDEF";
- u_char tmp[NS_IN6ADDRSZ], *tp, *endp, *colonp;
- const char *xdigits, *curtok;
- int ch, saw_xdigit;
- u_int val;
- int digits;
- int bits;
+ xdigits_u[] = "0123456789ABCDEF";
+ u_char tmp[NS_IN6ADDRSZ],
+ *tp,
+ *endp,
+ *colonp;
+ const char *xdigits,
+ *curtok;
+ int ch,
+ saw_xdigit;
+ u_int val;
+ int digits;
+ int bits;
if (size < NS_IN6ADDRSZ)
goto emsgsize;
@@ -447,12 +460,14 @@ inet_cidr_pton_ipv6(const char *src, u_char *dst, size_t size) {
val = 0;
digits = 0;
bits = -1;
- while ((ch = *src++) != '\0') {
+ while ((ch = *src++) != '\0')
+ {
const char *pch;
if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL)
pch = strchr((xdigits = xdigits_u), ch);
- if (pch != NULL) {
+ if (pch != NULL)
+ {
val <<= 4;
val |= (pch - xdigits);
if (++digits > 4)
@@ -460,14 +475,17 @@ inet_cidr_pton_ipv6(const char *src, u_char *dst, size_t size) {
saw_xdigit = 1;
continue;
}
- if (ch == ':') {
+ if (ch == ':')
+ {
curtok = src;
- if (!saw_xdigit) {
+ if (!saw_xdigit)
+ {
if (colonp)
goto enoent;
colonp = tp;
continue;
- } else if (*src == '\0')
+ }
+ else if (*src == '\0')
goto enoent;
if (tp + NS_INT16SZ > endp)
return (0);
@@ -479,16 +497,18 @@ inet_cidr_pton_ipv6(const char *src, u_char *dst, size_t size) {
continue;
}
if (ch == '.' && ((tp + NS_INADDRSZ) <= endp) &&
- getv4(curtok, tp, &bits) > 0) {
+ getv4(curtok, tp, &bits) > 0)
+ {
tp += NS_INADDRSZ;
saw_xdigit = 0;
- break; /* '\0' was seen by inet_pton4(). */
+ break; /* '\0' was seen by inet_pton4(). */
}
if (ch == '/' && getbits(src, &bits) > 0)
break;
goto enoent;
}
- if (saw_xdigit) {
+ if (saw_xdigit)
+ {
if (tp + NS_INT16SZ > endp)
goto enoent;
*tp++ = (u_char) (val >> 8) & 0xff;
@@ -497,20 +517,22 @@ inet_cidr_pton_ipv6(const char *src, u_char *dst, size_t size) {
if (bits == -1)
bits = 128;
- endp = tmp + 16;
+ endp = tmp + 16;
- if (colonp != NULL) {
+ if (colonp != NULL)
+ {
/*
- * Since some memmove()'s erroneously fail to handle
- * overlapping regions, we'll do the shift by hand.
+ * Since some memmove()'s erroneously fail to handle overlapping
+ * regions, we'll do the shift by hand.
*/
- const int n = tp - colonp;
- int i;
+ const int n = tp - colonp;
+ int i;
if (tp == endp)
goto enoent;
- for (i = 1; i <= n; i++) {
- endp[- i] = colonp[n - i];
+ for (i = 1; i <= n; i++)
+ {
+ endp[-i] = colonp[n - i];
colonp[n - i] = 0;
}
tp = endp;
@@ -525,11 +547,11 @@ inet_cidr_pton_ipv6(const char *src, u_char *dst, size_t size) {
return (bits);
- enoent:
+enoent:
errno = ENOENT;
return (-1);
- emsgsize:
+emsgsize:
errno = EMSGSIZE;
return (-1);
}
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 1c25df09562..c7e31ebcada 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/int.c,v 1.55 2003/07/27 04:53:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/int.c,v 1.56 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -173,9 +173,7 @@ int2vectorrecv(PG_FUNCTION_ARGS)
int slot;
for (slot = 0; slot < INDEX_MAX_KEYS; slot++)
- {
result[slot] = (int16) pq_getmsgint(buf, sizeof(int16));
- }
PG_RETURN_POINTER(result);
}
@@ -191,9 +189,7 @@ int2vectorsend(PG_FUNCTION_ARGS)
pq_begintypsend(&buf);
for (slot = 0; slot < INDEX_MAX_KEYS; slot++)
- {
pq_sendint(&buf, int2Array[slot], sizeof(int16));
- }
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index 123c5e72257..3be22ba5279 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/int8.c,v 1.45 2003/07/27 04:53:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/int8.c,v 1.46 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -85,7 +85,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for int8: \"%s\"", str)));
+ errmsg("invalid input syntax for int8: \"%s\"", str)));
}
/* process digits */
@@ -113,7 +113,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for int8: \"%s\"", str)));
+ errmsg("invalid input syntax for int8: \"%s\"", str)));
}
*result = (sign < 0) ? -tmp : tmp;
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 0f832aa6e13..4cffc0d018d 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/like.c,v 1.54 2003/07/27 04:53:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/like.c,v 1.55 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -451,7 +451,7 @@ like_escape_bytea(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
e = VARDATA(esc);
diff --git a/src/backend/utils/adt/like_match.c b/src/backend/utils/adt/like_match.c
index 9530873ae3f..ebe2ce75f34 100644
--- a/src/backend/utils/adt/like_match.c
+++ b/src/backend/utils/adt/like_match.c
@@ -19,7 +19,7 @@
* Copyright (c) 1996-2002, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/like_match.c,v 1.5 2003/07/27 04:53:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/like_match.c,v 1.6 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -289,7 +289,7 @@ do_like_escape(text *pat, text *esc)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
e = VARDATA(esc);
diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c
index 36ae41f9c71..4bef89a71b3 100644
--- a/src/backend/utils/adt/mac.c
+++ b/src/backend/utils/adt/mac.c
@@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for MAC addresses.
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/mac.c,v 1.29 2003/07/27 04:53:06 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/mac.c,v 1.30 2003/08/04 00:43:25 momjian Exp $
*/
#include "postgres.h"
@@ -62,7 +62,7 @@ macaddr_in(PG_FUNCTION_ARGS)
if (count != 6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for macaddr: \"%s\"", str)));
+ errmsg("invalid input syntax for macaddr: \"%s\"", str)));
if ((a < 0) || (a > 255) || (b < 0) || (b > 255) ||
(c < 0) || (c > 255) || (d < 0) || (d > 255) ||
@@ -110,7 +110,7 @@ Datum
macaddr_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- macaddr *addr;
+ macaddr *addr;
addr = (macaddr *) palloc(sizeof(macaddr));
@@ -130,7 +130,7 @@ macaddr_recv(PG_FUNCTION_ARGS)
Datum
macaddr_send(PG_FUNCTION_ARGS)
{
- macaddr *addr = PG_GETARG_MACADDR_P(0);
+ macaddr *addr = PG_GETARG_MACADDR_P(0);
StringInfoData buf;
pq_begintypsend(&buf);
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 4f1234e35a0..0e84ad652fd 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/nabstime.c,v 1.111 2003/07/28 00:09:16 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/nabstime.c,v 1.112 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -83,7 +83,7 @@ static int istinterval(char *i_string,
AbsoluteTime *i_end);
-/*
+/*
* GetCurrentAbsoluteTime()
*
* Get the current system time (relative to Unix epoch).
@@ -152,7 +152,7 @@ GetCurrentDateTime(struct tm * tm)
abstime2tm(GetCurrentTransactionStartTime(), &tz, tm, NULL);
}
-/*
+/*
* GetCurrentTimeUsec()
*
* Get the transaction start time ("now()") broken down as a struct tm,
@@ -373,13 +373,13 @@ abstimein(PG_FUNCTION_ARGS)
if (strlen(str) >= sizeof(lowstr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for abstime: \"%s\"", str)));
+ errmsg("invalid input syntax for abstime: \"%s\"", str)));
if ((ParseDateTime(str, lowstr, field, ftype, MAXDATEFIELDS, &nf) != 0)
|| (DecodeDateTime(field, ftype, nf, &dtype, tm, &fsec, &tz) != 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for abstime: \"%s\"", str)));
+ errmsg("invalid input syntax for abstime: \"%s\"", str)));
switch (dtype)
{
@@ -654,7 +654,7 @@ abstime_timestamp(PG_FUNCTION_ARGS)
case INVALID_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert \"invalid\" abstime to timestamp")));
+ errmsg("cannot convert \"invalid\" abstime to timestamp")));
TIMESTAMP_NOBEGIN(result);
break;
@@ -727,7 +727,7 @@ abstime_timestamptz(PG_FUNCTION_ARGS)
case INVALID_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert \"invalid\" abstime to timestamp")));
+ errmsg("cannot convert \"invalid\" abstime to timestamp")));
TIMESTAMP_NOBEGIN(result);
break;
@@ -776,13 +776,13 @@ reltimein(PG_FUNCTION_ARGS)
if (strlen(str) >= sizeof(lowstr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for reltime: \"%s\"", str)));
+ errmsg("invalid input syntax for reltime: \"%s\"", str)));
if ((ParseDateTime(str, lowstr, field, ftype, MAXDATEFIELDS, &nf) != 0)
|| (DecodeInterval(field, ftype, nf, &dtype, tm, &fsec) != 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for reltime: \"%s\"", str)));
+ errmsg("invalid input syntax for reltime: \"%s\"", str)));
switch (dtype)
{
@@ -849,7 +849,7 @@ reltimesend(PG_FUNCTION_ARGS)
static void
reltime2tm(RelativeTime time, struct tm * tm)
{
- double dtime = time;
+ double dtime = time;
FMODULO(dtime, tm->tm_year, 31557600);
FMODULO(dtime, tm->tm_mon, 2592000);
@@ -1032,7 +1032,7 @@ reltime_interval(PG_FUNCTION_ARGS)
case INVALID_RELTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert \"invalid\" reltime to interval")));
+ errmsg("cannot convert \"invalid\" reltime to interval")));
result->time = 0;
result->month = 0;
break;
diff --git a/src/backend/utils/adt/name.c b/src/backend/utils/adt/name.c
index 47af778f3df..3c6c4049fcb 100644
--- a/src/backend/utils/adt/name.c
+++ b/src/backend/utils/adt/name.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/name.c,v 1.47 2003/07/27 04:53:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/name.c,v 1.48 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -89,7 +89,7 @@ namerecv(PG_FUNCTION_ARGS)
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("identifier too long"),
errdetail("Identifier must be less than %d characters.",
- NAMEDATALEN)));
+ NAMEDATALEN)));
result = (NameData *) palloc0(NAMEDATALEN);
memcpy(result, str, nbytes);
pfree(str);
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index a9683116e94..e3e7d185df6 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for the INET and CIDR types.
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/network.c,v 1.44 2003/08/01 23:22:52 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/network.c,v 1.45 2003/08/04 00:43:25 momjian Exp $
*
* Jon Postel RIP 16 Oct 1998
*/
@@ -21,9 +21,9 @@
static Datum text_network(text *src, int type);
static int32 network_cmp_internal(inet *a1, inet *a2);
-static int bitncmp(void *l, void *r, int n);
+static int bitncmp(void *l, void *r, int n);
static bool addressOK(unsigned char *a, int bits, int family);
-static int ip_addrsize(inet *inetptr);
+static int ip_addrsize(inet *inetptr);
/*
* Access macros.
@@ -50,13 +50,14 @@ static int ip_addrsize(inet *inetptr);
static int
ip_addrsize(inet *inetptr)
{
- switch (ip_family(inetptr)) {
- case PGSQL_AF_INET:
- return 4;
- case PGSQL_AF_INET6:
- return 16;
- default:
- return -1;
+ switch (ip_family(inetptr))
+ {
+ case PGSQL_AF_INET:
+ return 4;
+ case PGSQL_AF_INET6:
+ return 16;
+ default:
+ return -1;
}
}
@@ -64,34 +65,34 @@ ip_addrsize(inet *inetptr)
static inet *
network_in(char *src, int type)
{
- int bits;
+ int bits;
inet *dst;
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6
- * addresses will have a : somewhere in them (several, in fact) so
- * if there is one present, assume it's V6, otherwise assume it's V4.
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6
+ * addresses will have a : somewhere in them (several, in fact) so if
+ * there is one present, assume it's V6, otherwise assume it's V4.
*/
if (strchr(src, ':') != NULL)
ip_family(dst) = PGSQL_AF_INET6;
else
ip_family(dst) = PGSQL_AF_INET;
-
+
bits = inet_net_pton(ip_family(dst), src, ip_addr(dst),
- type ? ip_addrsize(dst) : -1);
+ type ? ip_addrsize(dst) : -1);
if ((bits < 0) || (bits > ip_maxbits(dst)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- /* translator: first %s is inet or cidr */
+ /* translator: first %s is inet or cidr */
errmsg("invalid input syntax for %s: \"%s\"",
- type ? "cidr" : "inet", src)));
+ type ? "cidr" : "inet", src)));
/*
- * Error check: CIDR values must not have any bits set beyond
- * the masklen.
+ * Error check: CIDR values must not have any bits set beyond the
+ * masklen.
*/
if (type)
{
@@ -141,7 +142,7 @@ inet_out(PG_FUNCTION_ARGS)
int len;
dst = inet_net_ntop(ip_family(src), ip_addr(src), ip_bits(src),
- tmp, sizeof(tmp));
+ tmp, sizeof(tmp));
if (dst == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
@@ -208,10 +209,10 @@ inet_recv(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("invalid length in external inet")));
VARATT_SIZEP(addr) = VARHDRSZ
- + ((char *)ip_addr(addr) - (char *) VARDATA(addr))
+ + ((char *) ip_addr(addr) - (char *) VARDATA(addr))
+ ip_addrsize(addr);
- addrptr = (char *)ip_addr(addr);
+ addrptr = (char *) ip_addr(addr);
for (i = 0; i < nb; i++)
addrptr[i] = pq_getmsgbyte(buf);
@@ -258,7 +259,7 @@ inet_send(PG_FUNCTION_ARGS)
if (nb < 0)
nb = 0;
pq_sendbyte(&buf, nb);
- addrptr = (char *)ip_addr(addr);
+ addrptr = (char *) ip_addr(addr);
for (i = 0; i < nb; i++)
pq_sendbyte(&buf, addrptr[i]);
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
@@ -305,8 +306,8 @@ inet_set_masklen(PG_FUNCTION_ARGS)
int bits = PG_GETARG_INT32(1);
inet *dst;
- if ( bits == -1 )
- bits = ip_maxbits(src);
+ if (bits == -1)
+ bits = ip_maxbits(src);
if ((bits < 0) || (bits > ip_maxbits(src)))
ereport(ERROR,
@@ -341,7 +342,7 @@ network_cmp_internal(inet *a1, inet *a2)
int order;
order = bitncmp(ip_addr(a1), ip_addr(a2),
- Min(ip_bits(a1), ip_bits(a2)));
+ Min(ip_bits(a1), ip_bits(a2)));
if (order != 0)
return order;
order = ((int) ip_bits(a1)) - ((int) ip_bits(a2));
@@ -431,7 +432,7 @@ network_sub(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) > ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
}
PG_RETURN_BOOL(false);
@@ -446,7 +447,7 @@ network_subeq(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) >= ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
}
PG_RETURN_BOOL(false);
@@ -461,7 +462,7 @@ network_sup(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) < ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
}
PG_RETURN_BOOL(false);
@@ -476,7 +477,7 @@ network_supeq(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) <= ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
}
PG_RETURN_BOOL(false);
@@ -496,7 +497,7 @@ network_host(PG_FUNCTION_ARGS)
/* force display of max bits, regardless of masklen... */
if (inet_net_ntop(ip_family(ip), ip_addr(ip), ip_maxbits(ip),
- tmp, sizeof(tmp)) == NULL)
+ tmp, sizeof(tmp)) == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("could not format inet value: %m")));
@@ -522,7 +523,7 @@ network_show(PG_FUNCTION_ARGS)
char tmp[sizeof("xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:255.255.255.255/128")];
if (inet_net_ntop(ip_family(ip), ip_addr(ip), ip_maxbits(ip),
- tmp, sizeof(tmp)) == NULL)
+ tmp, sizeof(tmp)) == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
errmsg("could not format inet value: %m")));
@@ -553,10 +554,10 @@ network_abbrev(PG_FUNCTION_ARGS)
if (ip_type(ip))
dst = inet_cidr_ntop(ip_family(ip), ip_addr(ip),
- ip_bits(ip), tmp, sizeof(tmp));
+ ip_bits(ip), tmp, sizeof(tmp));
else
dst = inet_net_ntop(ip_family(ip), ip_addr(ip),
- ip_bits(ip), tmp, sizeof(tmp));
+ ip_bits(ip), tmp, sizeof(tmp));
if (dst == NULL)
ereport(ERROR,
@@ -582,18 +583,19 @@ network_masklen(PG_FUNCTION_ARGS)
Datum
network_family(PG_FUNCTION_ARGS)
{
- inet *ip = PG_GETARG_INET_P(0);
-
- switch (ip_family(ip)) {
- case PGSQL_AF_INET:
- PG_RETURN_INT32(4);
- break;
- case PGSQL_AF_INET6:
- PG_RETURN_INT32(6);
- break;
- default:
- PG_RETURN_INT32(0);
- break;
+ inet *ip = PG_GETARG_INET_P(0);
+
+ switch (ip_family(ip))
+ {
+ case PGSQL_AF_INET:
+ PG_RETURN_INT32(4);
+ break;
+ case PGSQL_AF_INET6:
+ PG_RETURN_INT32(6);
+ break;
+ default:
+ PG_RETURN_INT32(0);
+ break;
}
}
@@ -602,38 +604,42 @@ network_broadcast(PG_FUNCTION_ARGS)
{
inet *ip = PG_GETARG_INET_P(0);
inet *dst;
- int byte;
- int bits;
- int maxbytes;
+ int byte;
+ int bits;
+ int maxbytes;
unsigned char mask;
- unsigned char *a, *b;
+ unsigned char *a,
+ *b;
/* make sure any unused bits are zeroed */
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
- if (ip_family(ip) == PGSQL_AF_INET) {
+ if (ip_family(ip) == PGSQL_AF_INET)
maxbytes = 4;
- } else {
+ else
maxbytes = 16;
- }
bits = ip_bits(ip);
a = ip_addr(ip);
b = ip_addr(dst);
- for (byte = 0 ; byte < maxbytes ; byte++) {
- if (bits >= 8) {
+ for (byte = 0; byte < maxbytes; byte++)
+ {
+ if (bits >= 8)
+ {
mask = 0x00;
bits -= 8;
- } else if (bits == 0) {
+ }
+ else if (bits == 0)
mask = 0xff;
- } else {
+ else
+ {
mask = 0xff >> bits;
bits = 0;
}
b[byte] = a[byte] | mask;
- }
+ }
ip_family(dst) = ip_family(ip);
ip_bits(dst) = ip_bits(ip);
@@ -650,38 +656,42 @@ network_network(PG_FUNCTION_ARGS)
{
inet *ip = PG_GETARG_INET_P(0);
inet *dst;
- int byte;
- int bits;
- int maxbytes;
+ int byte;
+ int bits;
+ int maxbytes;
unsigned char mask;
- unsigned char *a, *b;
+ unsigned char *a,
+ *b;
/* make sure any unused bits are zeroed */
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
- if (ip_family(ip) == PGSQL_AF_INET) {
+ if (ip_family(ip) == PGSQL_AF_INET)
maxbytes = 4;
- } else {
+ else
maxbytes = 16;
- }
bits = ip_bits(ip);
a = ip_addr(ip);
b = ip_addr(dst);
byte = 0;
- while (bits) {
- if (bits >= 8) {
+ while (bits)
+ {
+ if (bits >= 8)
+ {
mask = 0xff;
bits -= 8;
- } else {
+ }
+ else
+ {
mask = 0xff << (8 - bits);
bits = 0;
}
b[byte] = a[byte] & mask;
byte++;
- }
+ }
ip_family(dst) = ip_family(ip);
ip_bits(dst) = ip_bits(ip);
@@ -698,43 +708,46 @@ network_netmask(PG_FUNCTION_ARGS)
{
inet *ip = PG_GETARG_INET_P(0);
inet *dst;
- int byte;
- int bits;
- int maxbytes;
+ int byte;
+ int bits;
+ int maxbytes;
unsigned char mask;
unsigned char *b;
/* make sure any unused bits are zeroed */
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
- if (ip_family(ip) == PGSQL_AF_INET) {
+ if (ip_family(ip) == PGSQL_AF_INET)
maxbytes = 4;
- } else {
+ else
maxbytes = 16;
- }
bits = ip_bits(ip);
b = ip_addr(dst);
byte = 0;
- while (bits) {
- if (bits >= 8) {
+ while (bits)
+ {
+ if (bits >= 8)
+ {
mask = 0xff;
bits -= 8;
- } else {
+ }
+ else
+ {
mask = 0xff << (8 - bits);
bits = 0;
}
b[byte] = mask;
byte++;
- }
+ }
ip_family(dst) = ip_family(ip);
ip_bits(dst) = ip_bits(ip);
ip_type(dst) = 0;
VARATT_SIZEP(dst) = VARHDRSZ
- + ((char *)ip_addr(dst) - (char *) VARDATA(dst))
+ + ((char *) ip_addr(dst) - (char *) VARDATA(dst))
+ ip_addrsize(dst);
PG_RETURN_INET_P(dst);
@@ -745,43 +758,46 @@ network_hostmask(PG_FUNCTION_ARGS)
{
inet *ip = PG_GETARG_INET_P(0);
inet *dst;
- int byte;
- int bits;
- int maxbytes;
+ int byte;
+ int bits;
+ int maxbytes;
unsigned char mask;
unsigned char *b;
/* make sure any unused bits are zeroed */
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
- if (ip_family(ip) == PGSQL_AF_INET) {
+ if (ip_family(ip) == PGSQL_AF_INET)
maxbytes = 4;
- } else {
+ else
maxbytes = 16;
- }
bits = ip_maxbits(ip) - ip_bits(ip);
b = ip_addr(dst);
byte = maxbytes - 1;
- while (bits) {
- if (bits >= 8) {
+ while (bits)
+ {
+ if (bits >= 8)
+ {
mask = 0xff;
bits -= 8;
- } else {
+ }
+ else
+ {
mask = 0xff >> (8 - bits);
bits = 0;
}
b[byte] = mask;
byte--;
- }
+ }
ip_family(dst) = ip_family(ip);
ip_bits(dst) = ip_bits(ip);
ip_type(dst) = 0;
VARATT_SIZEP(dst) = VARHDRSZ
- + ((char *)ip_addr(dst) - (char *) VARDATA(dst))
+ + ((char *) ip_addr(dst) - (char *) VARDATA(dst))
+ ip_addrsize(dst);
PG_RETURN_INET_P(dst);
@@ -806,13 +822,12 @@ convert_network_to_scalar(Datum value, Oid typid)
case CIDROID:
{
inet *ip = DatumGetInetP(value);
- int len;
- double res;
- int i;
+ int len;
+ double res;
+ int i;
/*
- * Note that we don't use the full address
- * here.
+ * Note that we don't use the full address here.
*/
if (ip_family(ip) == PGSQL_AF_INET)
len = 4;
@@ -820,7 +835,8 @@ convert_network_to_scalar(Datum value, Oid typid)
len = 5;
res = ip_family(ip);
- for (i = 0 ; i < len ; i++) {
+ for (i = 0; i < len; i++)
+ {
res *= 256;
res += ip_addr(ip)[i];
}
@@ -851,30 +867,34 @@ convert_network_to_scalar(Datum value, Oid typid)
/*
* int
* bitncmp(l, r, n)
- * compare bit masks l and r, for n bits.
+ * compare bit masks l and r, for n bits.
* return:
- * -1, 1, or 0 in the libc tradition.
+ * -1, 1, or 0 in the libc tradition.
* note:
- * network byte order assumed. this means 192.5.5.240/28 has
- * 0x11110000 in its fourth octet.
+ * network byte order assumed. this means 192.5.5.240/28 has
+ * 0x11110000 in its fourth octet.
* author:
- * Paul Vixie (ISC), June 1996
+ * Paul Vixie (ISC), June 1996
*/
static int
bitncmp(void *l, void *r, int n)
{
- u_int lb, rb;
- int x, b;
+ u_int lb,
+ rb;
+ int x,
+ b;
b = n / 8;
x = memcmp(l, r, b);
if (x)
return (x);
- lb = ((const u_char *)l)[b];
- rb = ((const u_char *)r)[b];
- for (b = n % 8; b > 0; b--) {
- if ((lb & 0x80) != (rb & 0x80)) {
+ lb = ((const u_char *) l)[b];
+ rb = ((const u_char *) r)[b];
+ for (b = n % 8; b > 0; b--)
+ {
+ if ((lb & 0x80) != (rb & 0x80))
+ {
if (lb & 0x80)
return (1);
return (-1);
@@ -888,16 +908,19 @@ bitncmp(void *l, void *r, int n)
static bool
addressOK(unsigned char *a, int bits, int family)
{
- int byte;
- int nbits;
- int maxbits;
- int maxbytes;
+ int byte;
+ int nbits;
+ int maxbits;
+ int maxbytes;
unsigned char mask;
- if (family == PGSQL_AF_INET) {
+ if (family == PGSQL_AF_INET)
+ {
maxbits = 32;
maxbytes = 4;
- } else {
+ }
+ else
+ {
maxbits = 128;
maxbytes = 16;
}
@@ -912,7 +935,8 @@ addressOK(unsigned char *a, int bits, int family)
if (bits != 0)
mask >>= nbits;
- while (byte < maxbytes) {
+ while (byte < maxbytes)
+ {
if ((a[byte] & mask) != 0)
return false;
mask = 0xff;
@@ -948,5 +972,5 @@ network_scan_last(Datum in)
{
return DirectFunctionCall2(inet_set_masklen,
DirectFunctionCall1(network_broadcast, in),
- Int32GetDatum(-1));
+ Int32GetDatum(-1));
}
diff --git a/src/backend/utils/adt/not_in.c b/src/backend/utils/adt/not_in.c
index c3f0aee5cb9..5deeea2f1d1 100644
--- a/src/backend/utils/adt/not_in.c
+++ b/src/backend/utils/adt/not_in.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/Attic/not_in.c,v 1.33 2003/07/27 04:53:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/Attic/not_in.c,v 1.34 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,7 +62,7 @@ int4notin(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("invalid name syntax"),
- errhint("Must provide \"relationname.attributename\".")));
+ errhint("Must provide \"relationname.attributename\".")));
attribute = strVal(nth(nnames - 1, names));
names = ltruncate(nnames - 1, names);
relrv = makeRangeVarFromNameList(names);
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 4f5029a26c1..5b0fe291107 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -14,7 +14,7 @@
* Copyright (c) 1998-2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/numeric.c,v 1.64 2003/07/30 19:48:41 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/numeric.c,v 1.65 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@
* Numeric values are represented in a base-NBASE floating point format.
* Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed
* and wide enough to store a digit. We assume that NBASE*NBASE can fit in
- * an int. Although the purely calculational routines could handle any even
+ * an int. Although the purely calculational routines could handle any even
* NBASE that's less than sqrt(INT_MAX), in practice we are only interested
* in NBASE a power of ten, so that I/O conversions and decimal rounding
* are easy. Also, it's actually more efficient if NBASE is rather less than
@@ -101,7 +101,7 @@ typedef int16 NumericDigit;
* The value represented by a NumericVar is determined by the sign, weight,
* ndigits, and digits[] array.
* Note: the first digit of a NumericVar's value is assumed to be multiplied
- * by NBASE ** weight. Another way to say it is that there are weight+1
+ * by NBASE ** weight. Another way to say it is that there are weight+1
* digits before the decimal point. It is possible to have weight < 0.
*
* buf points at the physical start of the palloc'd digit buffer for the
@@ -166,8 +166,10 @@ static NumericVar const_two =
#if DEC_DIGITS == 4
static NumericDigit const_zero_point_five_data[1] = {5000};
+
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_five_data[1] = {50};
+
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_five_data[1] = {5};
#endif
@@ -176,8 +178,10 @@ static NumericVar const_zero_point_five =
#if DEC_DIGITS == 4
static NumericDigit const_zero_point_nine_data[1] = {9000};
+
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_nine_data[1] = {90};
+
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_nine_data[1] = {9};
#endif
@@ -188,10 +192,12 @@ static NumericVar const_zero_point_nine =
static NumericDigit const_zero_point_01_data[1] = {100};
static NumericVar const_zero_point_01 =
{1, -1, NUMERIC_POS, 2, NULL, const_zero_point_01_data};
+
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_01_data[1] = {1};
static NumericVar const_zero_point_01 =
{1, -1, NUMERIC_POS, 2, NULL, const_zero_point_01_data};
+
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_01_data[1] = {1};
static NumericVar const_zero_point_01 =
@@ -200,8 +206,10 @@ static NumericVar const_zero_point_01 =
#if DEC_DIGITS == 4
static NumericDigit const_one_point_one_data[2] = {1, 1000};
+
#elif DEC_DIGITS == 2
static NumericDigit const_one_point_one_data[2] = {1, 10};
+
#elif DEC_DIGITS == 1
static NumericDigit const_one_point_one_data[2] = {1, 1};
#endif
@@ -212,7 +220,7 @@ static NumericVar const_nan =
{0, 0, NUMERIC_NAN, 0, NULL, NULL};
#if DEC_DIGITS == 4
-static const int round_powers[4] = { 0, 1000, 100, 10 };
+static const int round_powers[4] = {0, 1000, 100, 10};
#endif
@@ -263,9 +271,9 @@ static int cmp_var(NumericVar *var1, NumericVar *var2);
static void add_var(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
- int rscale);
+ int rscale);
static void div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
- int rscale);
+ int rscale);
static int select_div_scale(NumericVar *var1, NumericVar *var2);
static void mod_var(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void ceil_var(NumericVar *var, NumericVar *result);
@@ -278,7 +286,7 @@ static void ln_var(NumericVar *arg, NumericVar *result, int rscale);
static void log_var(NumericVar *base, NumericVar *num, NumericVar *result);
static void power_var(NumericVar *base, NumericVar *exp, NumericVar *result);
static void power_var_int(NumericVar *base, int exp, NumericVar *result,
- int rscale);
+ int rscale);
static int cmp_abs(NumericVar *var1, NumericVar *var2);
static void add_abs(NumericVar *var1, NumericVar *var2, NumericVar *result);
@@ -408,7 +416,7 @@ numeric_recv(PG_FUNCTION_ARGS)
value.dscale = (uint16) pq_getmsgint(buf, sizeof(uint16));
for (i = 0; i < len; i++)
{
- NumericDigit d = pq_getmsgint(buf, sizeof(NumericDigit));
+ NumericDigit d = pq_getmsgint(buf, sizeof(NumericDigit));
if (d < 0 || d >= NBASE)
ereport(ERROR,
@@ -1081,8 +1089,8 @@ numeric_mul(PG_FUNCTION_ARGS)
/*
* Unpack the values, let mul_var() compute the result and return it.
- * Unlike add_var() and sub_var(), mul_var() will round its result.
- * In the case of numeric_mul(), which is invoked for the * operator on
+ * Unlike add_var() and sub_var(), mul_var() will round its result. In
+ * the case of numeric_mul(), which is invoked for the * operator on
* numerics, we request exact representation for the product (rscale =
* sum(dscale of arg1, dscale of arg2)).
*/
@@ -1303,7 +1311,7 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a
+ * Unpack the argument and determine the result scale. We choose a
* scale to give at least NUMERIC_MIN_SIG_DIGITS significant digits;
* but in any case not less than the input's dscale.
*/
@@ -1356,7 +1364,7 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a
+ * Unpack the argument and determine the result scale. We choose a
* scale to give at least NUMERIC_MIN_SIG_DIGITS significant digits;
* but in any case not less than the input's dscale.
*/
@@ -1369,8 +1377,8 @@ numeric_exp(PG_FUNCTION_ARGS)
val = numericvar_to_double_no_overflow(&arg);
/*
- * log10(result) = num * log10(e), so this is approximately the decimal
- * weight of the result:
+ * log10(result) = num * log10(e), so this is approximately the
+ * decimal weight of the result:
*/
val *= 0.434294481903252;
@@ -2055,7 +2063,7 @@ numeric_variance(PG_FUNCTION_ARGS)
}
else
{
- mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
+ mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
div_var(&vsumX2, &vNminus1, &vsumX, rscale); /* variance */
@@ -2131,7 +2139,7 @@ numeric_stddev(PG_FUNCTION_ARGS)
}
else
{
- mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
+ mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
div_var(&vsumX2, &vNminus1, &vsumX, rscale); /* variance */
sqrt_var(&vsumX, &vsumX, rscale); /* stddev */
@@ -2409,7 +2417,6 @@ dump_var(const char *str, NumericVar *var)
printf("\n");
}
-
#endif /* NUMERIC_DEBUG */
@@ -2434,7 +2441,7 @@ alloc_var(NumericVar *var, int ndigits)
{
digitbuf_free(var->buf);
var->buf = digitbuf_alloc(ndigits + 1);
- var->buf[0] = 0; /* spare digit for rounding */
+ var->buf[0] = 0; /* spare digit for rounding */
var->digits = var->buf + 1;
var->ndigits = ndigits;
}
@@ -2495,8 +2502,8 @@ set_var_from_str(const char *str, NumericVar *dest)
NumericDigit *digits;
/*
- * We first parse the string to extract decimal digits and determine the
- * correct decimal weight. Then convert to NBASE representation.
+ * We first parse the string to extract decimal digits and determine
+ * the correct decimal weight. Then convert to NBASE representation.
*/
/* skip leading spaces */
@@ -2529,9 +2536,9 @@ set_var_from_str(const char *str, NumericVar *dest)
if (!isdigit((unsigned char) *cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for numeric: \"%s\"", str)));
+ errmsg("invalid input syntax for numeric: \"%s\"", str)));
- decdigits = (unsigned char *) palloc(strlen(cp) + DEC_DIGITS*2);
+ decdigits = (unsigned char *) palloc(strlen(cp) + DEC_DIGITS * 2);
/* leading padding for digit alignment later */
memset(decdigits, 0, DEC_DIGITS);
@@ -2552,8 +2559,8 @@ set_var_from_str(const char *str, NumericVar *dest)
if (have_dp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for numeric: \"%s\"",
+ str)));
have_dp = TRUE;
cp++;
}
@@ -2563,7 +2570,7 @@ set_var_from_str(const char *str, NumericVar *dest)
ddigits = i - DEC_DIGITS;
/* trailing padding for digit alignment later */
- memset(decdigits + i, 0, DEC_DIGITS-1);
+ memset(decdigits + i, 0, DEC_DIGITS - 1);
/* Handle exponent, if any */
if (*cp == 'e' || *cp == 'E')
@@ -2604,16 +2611,16 @@ set_var_from_str(const char *str, NumericVar *dest)
/*
* Okay, convert pure-decimal representation to base NBASE. First we
- * need to determine the converted weight and ndigits. offset is the
+ * need to determine the converted weight and ndigits. offset is the
* number of decimal zeroes to insert before the first given digit to
* have a correctly aligned first NBASE digit.
*/
if (dweight >= 0)
- weight = (dweight + 1 + DEC_DIGITS-1) / DEC_DIGITS - 1;
+ weight = (dweight + 1 + DEC_DIGITS - 1) / DEC_DIGITS - 1;
else
- weight = - ((-dweight - 1) / DEC_DIGITS + 1);
+ weight = -((-dweight - 1) / DEC_DIGITS + 1);
offset = (weight + 1) * DEC_DIGITS - (dweight + 1);
- ndigits = (ddigits + offset + DEC_DIGITS-1) / DEC_DIGITS;
+ ndigits = (ddigits + offset + DEC_DIGITS - 1) / DEC_DIGITS;
alloc_var(dest, ndigits);
dest->sign = sign;
@@ -2626,10 +2633,10 @@ set_var_from_str(const char *str, NumericVar *dest)
while (ndigits-- > 0)
{
#if DEC_DIGITS == 4
- *digits++ = ((decdigits[i] * 10 + decdigits[i+1]) * 10 +
- decdigits[i+2]) * 10 + decdigits[i+3];
+ *digits++ = ((decdigits[i] * 10 + decdigits[i + 1]) * 10 +
+ decdigits[i + 2]) * 10 + decdigits[i + 3];
#elif DEC_DIGITS == 2
- *digits++ = decdigits[i] * 10 + decdigits[i+1];
+ *digits++ = decdigits[i] * 10 + decdigits[i + 1];
#elif DEC_DIGITS == 1
*digits++ = decdigits[i];
#else
@@ -2704,9 +2711,10 @@ get_str_from_var(NumericVar *var, int dscale)
char *endcp;
int i;
int d;
- NumericDigit dig;
+ NumericDigit dig;
+
#if DEC_DIGITS > 1
- NumericDigit d1;
+ NumericDigit d1;
#endif
if (dscale < 0)
@@ -2720,10 +2728,10 @@ get_str_from_var(NumericVar *var, int dscale)
/*
* Allocate space for the result.
*
- * i is set to to # of decimal digits before decimal point.
- * dscale is the # of decimal digits we will print after decimal point.
- * We may generate as many as DEC_DIGITS-1 excess digits at the end,
- * and in addition we need room for sign, decimal point, null terminator.
+ * i is set to to # of decimal digits before decimal point. dscale is the
+ * # of decimal digits we will print after decimal point. We may
+ * generate as many as DEC_DIGITS-1 excess digits at the end, and in
+ * addition we need room for sign, decimal point, null terminator.
*/
i = (var->weight + 1) * DEC_DIGITS;
if (i <= 0)
@@ -2754,7 +2762,7 @@ get_str_from_var(NumericVar *var, int dscale)
/* In the first digit, suppress extra leading decimal zeroes */
#if DEC_DIGITS == 4
{
- bool putit = (d > 0);
+ bool putit = (d > 0);
d1 = dig / 1000;
dig -= d1 * 1000;
@@ -2789,7 +2797,7 @@ get_str_from_var(NumericVar *var, int dscale)
/*
* If requested, output a decimal point and all the digits that follow
- * it. We initially put out a multiple of DEC_DIGITS digits, then
+ * it. We initially put out a multiple of DEC_DIGITS digits, then
* truncate if needed.
*/
if (dscale > 0)
@@ -2966,7 +2974,7 @@ apply_typmod(NumericVar *var, int32 typmod)
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("numeric field overflow"),
errdetail("ABS(value) >= 10^%d for field with precision %d, scale %d.",
- ddigits-1, precision, scale)));
+ ddigits - 1, precision, scale)));
break;
}
ddigits -= DEC_DIGITS;
@@ -2977,7 +2985,7 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Convert numeric to int8, rounding if needed.
*
- * If overflow, return FALSE (no error is raised). Return TRUE if okay.
+ * If overflow, return FALSE (no error is raised). Return TRUE if okay.
*
* CAUTION: var's contents may be modified by rounding!
*/
@@ -3006,10 +3014,11 @@ numericvar_to_int8(NumericVar *var, int64 *result)
/*
* For input like 10000000000, we must treat stripped digits as real.
- * So the loop assumes there are weight+1 digits before the decimal point.
+ * So the loop assumes there are weight+1 digits before the decimal
+ * point.
*/
weight = var->weight;
- Assert(weight >= 0 && ndigits <= weight+1);
+ Assert(weight >= 0 && ndigits <= weight + 1);
/* Construct the result */
digits = var->digits;
@@ -3021,6 +3030,7 @@ numericvar_to_int8(NumericVar *var, int64 *result)
val *= NBASE;
if (i < ndigits)
val += digits[i];
+
/*
* The overflow check is a bit tricky because we want to accept
* INT64_MIN, which will overflow the positive accumulator. We
@@ -3051,7 +3061,7 @@ int8_to_numericvar(int64 val, NumericVar *var)
int ndigits;
/* int8 can require at most 19 decimal digits; add one for safety */
- alloc_var(var, 20/DEC_DIGITS);
+ alloc_var(var, 20 / DEC_DIGITS);
if (val < 0)
{
var->sign = NUMERIC_NEG;
@@ -3071,7 +3081,8 @@ int8_to_numericvar(int64 val, NumericVar *var)
}
ptr = var->digits + var->ndigits;
ndigits = 0;
- do {
+ do
+ {
ptr--;
ndigits++;
newuval = uval / NBASE;
@@ -3420,7 +3431,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Result is rounded to no more than rscale fractional digits.
+ * in result. Result is rounded to no more than rscale fractional digits.
*/
static void
mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -3439,6 +3450,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
ri,
i1,
i2;
+
/* copy these values into local vars for speed in inner loop */
int var1ndigits = var1->ndigits;
int var2ndigits = var2->ndigits;
@@ -3462,9 +3474,10 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
- * would have more than rscale fractional digits, truncate the computation
- * with MUL_GUARD_DIGITS guard digits. We do that by pretending that
- * one or both inputs have fewer digits than they really do.
+ * would have more than rscale fractional digits, truncate the
+ * computation with MUL_GUARD_DIGITS guard digits. We do that by
+ * pretending that one or both inputs have fewer digits than they
+ * really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
maxdigits = res_weight + 1 + (rscale * DEC_DIGITS) + MUL_GUARD_DIGITS;
@@ -3498,13 +3511,13 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* We do the arithmetic in an array "dig[]" of signed int's. Since
- * INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
- * to avoid normalizing carries immediately.
+ * INT_MAX is noticeably larger than NBASE*NBASE, this gives us
+ * headroom to avoid normalizing carries immediately.
*
- * maxdig tracks the maximum possible value of any dig[] entry;
- * when this threatens to exceed INT_MAX, we take the time to propagate
- * carries. To avoid overflow in maxdig itself, it actually represents
- * the max possible value divided by NBASE-1.
+ * maxdig tracks the maximum possible value of any dig[] entry; when this
+ * threatens to exceed INT_MAX, we take the time to propagate carries.
+ * To avoid overflow in maxdig itself, it actually represents the max
+ * possible value divided by NBASE-1.
*/
dig = (int *) palloc0(res_ndigits * sizeof(int));
maxdig = 0;
@@ -3512,24 +3525,24 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
ri = res_ndigits - 1;
for (i1 = var1ndigits - 1; i1 >= 0; ri--, i1--)
{
- int var1digit = var1digits[i1];
+ int var1digit = var1digits[i1];
if (var1digit == 0)
continue;
/* Time to normalize? */
maxdig += var1digit;
- if (maxdig > INT_MAX/(NBASE-1))
+ if (maxdig > INT_MAX / (NBASE - 1))
{
/* Yes, do it */
carry = 0;
- for (i = res_ndigits-1; i >= 0; i--)
+ for (i = res_ndigits - 1; i >= 0; i--)
{
newdig = dig[i] + carry;
if (newdig >= NBASE)
{
- carry = newdig/NBASE;
- newdig -= carry*NBASE;
+ carry = newdig / NBASE;
+ newdig -= carry * NBASE;
}
else
carry = 0;
@@ -3543,9 +3556,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/* Add appropriate multiple of var2 into the accumulator */
i = ri;
for (i2 = var2ndigits - 1; i2 >= 0; i2--)
- {
dig[i--] += var1digit * var2digits[i2];
- }
}
/*
@@ -3556,13 +3567,13 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
alloc_var(result, res_ndigits);
res_digits = result->digits;
carry = 0;
- for (i = res_ndigits-1; i >= 0; i--)
+ for (i = res_ndigits - 1; i >= 0; i--)
{
newdig = dig[i] + carry;
if (newdig >= NBASE)
{
- carry = newdig/NBASE;
- newdig -= carry*NBASE;
+ carry = newdig / NBASE;
+ newdig -= carry * NBASE;
}
else
carry = 0;
@@ -3590,7 +3601,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
* div_var() -
*
* Division on variable level. Quotient of var1 / var2 is stored
- * in result. Result is rounded to no more than rscale fractional digits.
+ * in result. Result is rounded to no more than rscale fractional digits.
*/
static void
div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -3611,6 +3622,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
fquotient;
int qi;
int i;
+
/* copy these values into local vars for speed in inner loop */
int var1ndigits = var1->ndigits;
int var2ndigits = var2->ndigits;
@@ -3645,7 +3657,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
res_sign = NUMERIC_NEG;
res_weight = var1->weight - var2->weight + 1;
/* The number of accurate result digits we need to produce: */
- div_ndigits = res_weight + 1 + (rscale + DEC_DIGITS-1)/DEC_DIGITS;
+ div_ndigits = res_weight + 1 + (rscale + DEC_DIGITS - 1) / DEC_DIGITS;
/* Add guard digits for roundoff error */
div_ndigits += DIV_GUARD_DIGITS;
if (div_ndigits < DIV_GUARD_DIGITS)
@@ -3656,8 +3668,8 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* We do the arithmetic in an array "div[]" of signed int's. Since
- * INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
- * to avoid normalizing carries immediately.
+ * INT_MAX is noticeably larger than NBASE*NBASE, this gives us
+ * headroom to avoid normalizing carries immediately.
*
* We start with div[] containing one zero digit followed by the
* dividend's digits (plus appended zeroes to reach the desired
@@ -3668,7 +3680,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
for (i = 0; i < var1ndigits; i++)
- div[i+1] = var1digits[i];
+ div[i + 1] = var1digits[i];
/*
* We estimate each quotient digit using floating-point arithmetic,
@@ -3685,10 +3697,10 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
fdivisorinverse = 1.0 / fdivisor;
/*
- * maxdiv tracks the maximum possible absolute value of any div[] entry;
- * when this threatens to exceed INT_MAX, we take the time to propagate
- * carries. To avoid overflow in maxdiv itself, it actually represents
- * the max possible abs. value divided by NBASE-1.
+ * maxdiv tracks the maximum possible absolute value of any div[]
+ * entry; when this threatens to exceed INT_MAX, we take the time to
+ * propagate carries. To avoid overflow in maxdiv itself, it actually
+ * represents the max possible abs. value divided by NBASE-1.
*/
maxdiv = 1;
@@ -3702,19 +3714,19 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
for (i = 1; i < 4; i++)
{
fdividend *= NBASE;
- if (qi+i <= div_ndigits)
- fdividend += (double) div[qi+i];
+ if (qi + i <= div_ndigits)
+ fdividend += (double) div[qi + i];
}
/* Compute the (approximate) quotient digit */
fquotient = fdividend * fdivisorinverse;
qdigit = (fquotient >= 0.0) ? ((int) fquotient) :
- (((int) fquotient) - 1); /* truncate towards -infinity */
+ (((int) fquotient) - 1); /* truncate towards -infinity */
if (qdigit != 0)
{
/* Do we need to normalize now? */
maxdiv += Abs(qdigit);
- if (maxdiv > INT_MAX/(NBASE-1))
+ if (maxdiv > INT_MAX / (NBASE - 1))
{
/* Yes, do it */
carry = 0;
@@ -3723,13 +3735,13 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
newdig = div[i] + carry;
if (newdig < 0)
{
- carry = -((-newdig-1)/NBASE) - 1;
- newdig -= carry*NBASE;
+ carry = -((-newdig - 1) / NBASE) - 1;
+ newdig -= carry * NBASE;
}
else if (newdig >= NBASE)
{
- carry = newdig/NBASE;
- newdig -= carry*NBASE;
+ carry = newdig / NBASE;
+ newdig -= carry * NBASE;
}
else
carry = 0;
@@ -3737,12 +3749,14 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
}
newdig = div[qi] + carry;
div[qi] = newdig;
+
/*
- * All the div[] digits except possibly div[qi] are now
- * in the range 0..NBASE-1.
+ * All the div[] digits except possibly div[qi] are now in
+ * the range 0..NBASE-1.
*/
- maxdiv = Abs(newdig) / (NBASE-1);
+ maxdiv = Abs(newdig) / (NBASE - 1);
maxdiv = Max(maxdiv, 1);
+
/*
* Recompute the quotient digit since new info may have
* propagated into the top four dividend digits
@@ -3751,33 +3765,34 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
for (i = 1; i < 4; i++)
{
fdividend *= NBASE;
- if (qi+i <= div_ndigits)
- fdividend += (double) div[qi+i];
+ if (qi + i <= div_ndigits)
+ fdividend += (double) div[qi + i];
}
/* Compute the (approximate) quotient digit */
fquotient = fdividend * fdivisorinverse;
qdigit = (fquotient >= 0.0) ? ((int) fquotient) :
- (((int) fquotient) - 1); /* truncate towards -infinity */
+ (((int) fquotient) - 1); /* truncate towards
+ * -infinity */
maxdiv += Abs(qdigit);
}
/* Subtract off the appropriate multiple of the divisor */
if (qdigit != 0)
{
- int istop = Min(var2ndigits, div_ndigits-qi+1);
+ int istop = Min(var2ndigits, div_ndigits - qi + 1);
for (i = 0; i < istop; i++)
- div[qi+i] -= qdigit * var2digits[i];
+ div[qi + i] -= qdigit * var2digits[i];
}
}
/*
- * The dividend digit we are about to replace might still be nonzero.
- * Fold it into the next digit position. We don't need to worry about
- * overflow here since this should nearly cancel with the subtraction
- * of the divisor.
+ * The dividend digit we are about to replace might still be
+ * nonzero. Fold it into the next digit position. We don't need
+ * to worry about overflow here since this should nearly cancel
+ * with the subtraction of the divisor.
*/
- div[qi+1] += div[qi] * NBASE;
+ div[qi + 1] += div[qi] * NBASE;
div[qi] = qdigit;
}
@@ -3787,12 +3802,10 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
*/
fdividend = (double) div[qi];
for (i = 1; i < 4; i++)
- {
fdividend *= NBASE;
- }
fquotient = fdividend * fdivisorinverse;
qdigit = (fquotient >= 0.0) ? ((int) fquotient) :
- (((int) fquotient) - 1); /* truncate towards -infinity */
+ (((int) fquotient) - 1); /* truncate towards -infinity */
div[qi] = qdigit;
/*
@@ -3800,7 +3813,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
* which we combine with storing the result digits into the output.
* Note that this is still done at full precision w/guard digits.
*/
- alloc_var(result, div_ndigits+1);
+ alloc_var(result, div_ndigits + 1);
res_digits = result->digits;
carry = 0;
for (i = div_ndigits; i >= 0; i--)
@@ -3808,13 +3821,13 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
newdig = div[i] + carry;
if (newdig < 0)
{
- carry = -((-newdig-1)/NBASE) - 1;
- newdig -= carry*NBASE;
+ carry = -((-newdig - 1) / NBASE) - 1;
+ newdig -= carry * NBASE;
}
else if (newdig >= NBASE)
{
- carry = newdig/NBASE;
- newdig -= carry*NBASE;
+ carry = newdig / NBASE;
+ newdig -= carry * NBASE;
}
else
carry = 0;
@@ -3889,8 +3902,8 @@ select_div_scale(NumericVar *var1, NumericVar *var2)
}
/*
- * Estimate weight of quotient. If the two first digits are equal,
- * we can't be sure, but assume that var1 is less than var2.
+ * Estimate weight of quotient. If the two first digits are equal, we
+ * can't be sure, but assume that var1 is less than var2.
*/
qweight = weight1 - weight2;
if (firstdigit1 <= firstdigit2)
@@ -4176,16 +4189,17 @@ exp_var_internal(NumericVar *arg, NumericVar *result, int rscale)
{
ndiv2++;
local_rscale++;
- mul_var(&x, &const_zero_point_five, &x, x.dscale+1);
+ mul_var(&x, &const_zero_point_five, &x, x.dscale + 1);
}
/*
* Use the Taylor series
*
- * exp(x) = 1 + x + x^2/2! + x^3/3! + ...
+ * exp(x) = 1 + x + x^2/2! + x^3/3! + ...
*
* Given the limited range of x, this should converge reasonably quickly.
- * We run the series until the terms fall below the local_rscale limit.
+ * We run the series until the terms fall below the local_rscale
+ * limit.
*/
add_var(&const_one, &x, result);
set_var_from_var(&x, &xpow);
@@ -4265,7 +4279,7 @@ ln_var(NumericVar *arg, NumericVar *result, int rscale)
/*
* We use the Taylor series for 0.5 * ln((1+z)/(1-z)),
*
- * z + z^3/3 + z^5/5 + ...
+ * z + z^3/3 + z^5/5 + ...
*
* where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048
* due to the above range-reduction of x.
@@ -4292,7 +4306,7 @@ ln_var(NumericVar *arg, NumericVar *result, int rscale)
add_var(result, &elem, result);
- if (elem.weight < (result->weight - local_rscale * 2/DEC_DIGITS))
+ if (elem.weight < (result->weight - local_rscale * 2 / DEC_DIGITS))
break;
}
@@ -4391,7 +4405,7 @@ power_var(NumericVar *base, NumericVar *exp, NumericVar *result)
set_var_from_var(exp, &x);
if (numericvar_to_int8(&x, &expval64))
{
- int expval = (int) expval64;
+ int expval = (int) expval64;
/* Test for overflow by reverse-conversion. */
if ((int64) expval == expval64)
@@ -4420,11 +4434,11 @@ power_var(NumericVar *base, NumericVar *exp, NumericVar *result)
dec_digits = (base->weight + 1) * DEC_DIGITS;
if (dec_digits > 1)
- rscale = NUMERIC_MIN_SIG_DIGITS*2 - (int) log10(dec_digits - 1);
+ rscale = NUMERIC_MIN_SIG_DIGITS * 2 - (int) log10(dec_digits - 1);
else if (dec_digits < 1)
- rscale = NUMERIC_MIN_SIG_DIGITS*2 - (int) log10(1 - dec_digits);
+ rscale = NUMERIC_MIN_SIG_DIGITS * 2 - (int) log10(1 - dec_digits);
else
- rscale = NUMERIC_MIN_SIG_DIGITS*2;
+ rscale = NUMERIC_MIN_SIG_DIGITS * 2;
rscale = Max(rscale, base->dscale * 2);
rscale = Max(rscale, exp->dscale * 2);
@@ -4442,7 +4456,10 @@ power_var(NumericVar *base, NumericVar *exp, NumericVar *result)
/* convert input to float8, ignoring overflow */
val = numericvar_to_double_no_overflow(&ln_num);
- /* log10(result) = num * log10(e), so this is approximately the weight: */
+ /*
+ * log10(result) = num * log10(e), so this is approximately the
+ * weight:
+ */
val *= 0.434294481903252;
/* limit to something that won't cause integer overflow */
@@ -4483,7 +4500,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
(errcode(ERRCODE_FLOATING_POINT_EXCEPTION),
errmsg("zero raised to zero is undefined")));
set_var_from_var(&const_one, result);
- result->dscale = rscale; /* no need to round */
+ result->dscale = rscale; /* no need to round */
return;
case 1:
set_var_from_var(base, result);
@@ -4500,8 +4517,8 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
}
/*
- * The general case repeatedly multiplies base according to the
- * bit pattern of exp. We do the multiplications with some extra
+ * The general case repeatedly multiplies base according to the bit
+ * pattern of exp. We do the multiplications with some extra
* precision.
*/
neg = (exp < 0);
@@ -4595,8 +4612,8 @@ cmp_abs(NumericVar *var1, NumericVar *var2)
}
/*
- * At this point, we've run out of digits on one side or the other;
- * so any remaining nonzero digits imply that side is larger
+ * At this point, we've run out of digits on one side or the other; so
+ * any remaining nonzero digits imply that side is larger
*/
while (i1 < var1->ndigits)
{
@@ -4789,7 +4806,7 @@ sub_abs(NumericVar *var1, NumericVar *var2, NumericVar *result)
static void
round_var(NumericVar *var, int rscale)
{
- NumericDigit *digits = var->digits;
+ NumericDigit *digits = var->digits;
int di;
int ndigits;
int carry;
@@ -4800,8 +4817,8 @@ round_var(NumericVar *var, int rscale)
di = (var->weight + 1) * DEC_DIGITS + rscale;
/*
- * If di = 0, the value loses all digits, but could round up to 1
- * if its first extra digit is >= 5. If di < 0 the result must be 0.
+ * If di = 0, the value loses all digits, but could round up to 1 if
+ * its first extra digit is >= 5. If di < 0 the result must be 0.
*/
if (di < 0)
{
@@ -4812,7 +4829,7 @@ round_var(NumericVar *var, int rscale)
else
{
/* NBASE digits wanted */
- ndigits = (di + DEC_DIGITS-1) / DEC_DIGITS;
+ ndigits = (di + DEC_DIGITS - 1) / DEC_DIGITS;
/* 0, or number of decimal digits to keep in last NBASE digit */
di %= DEC_DIGITS;
@@ -4827,14 +4844,12 @@ round_var(NumericVar *var, int rscale)
carry = (digits[ndigits] >= HALF_NBASE) ? 1 : 0;
#else
if (di == 0)
- {
carry = (digits[ndigits] >= HALF_NBASE) ? 1 : 0;
- }
else
{
/* Must round within last NBASE digit */
- int extra,
- pow10;
+ int extra,
+ pow10;
#if DEC_DIGITS == 4
pow10 = round_powers[di];
@@ -4846,7 +4861,7 @@ round_var(NumericVar *var, int rscale)
extra = digits[--ndigits] % pow10;
digits[ndigits] -= extra;
carry = 0;
- if (extra >= pow10/2)
+ if (extra >= pow10 / 2)
{
pow10 += digits[ndigits];
if (pow10 >= NBASE)
@@ -4917,7 +4932,7 @@ trunc_var(NumericVar *var, int rscale)
else
{
/* NBASE digits wanted */
- ndigits = (di + DEC_DIGITS-1) / DEC_DIGITS;
+ ndigits = (di + DEC_DIGITS - 1) / DEC_DIGITS;
if (ndigits <= var->ndigits)
{
@@ -4932,9 +4947,9 @@ trunc_var(NumericVar *var, int rscale)
if (di > 0)
{
/* Must truncate within last NBASE digit */
- NumericDigit *digits = var->digits;
- int extra,
- pow10;
+ NumericDigit *digits = var->digits;
+ int extra,
+ pow10;
#if DEC_DIGITS == 4
pow10 = round_powers[di];
@@ -4959,7 +4974,7 @@ trunc_var(NumericVar *var, int rscale)
static void
strip_var(NumericVar *var)
{
- NumericDigit *digits = var->digits;
+ NumericDigit *digits = var->digits;
int ndigits = var->ndigits;
/* Strip leading zeroes */
diff --git a/src/backend/utils/adt/numutils.c b/src/backend/utils/adt/numutils.c
index a73842785e5..8b6b43ac894 100644
--- a/src/backend/utils/adt/numutils.c
+++ b/src/backend/utils/adt/numutils.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/numutils.c,v 1.55 2003/07/27 04:53:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/numutils.c,v 1.56 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -96,7 +96,7 @@ pg_atoi(char *s, int size, int c)
case sizeof(int32):
if (errno == ERANGE
#if defined(HAVE_LONG_INT_64)
- /* won't get ERANGE on these with 64-bit longs... */
+ /* won't get ERANGE on these with 64-bit longs... */
|| l < INT_MIN || l > INT_MAX
#endif
)
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index aa070c2694c..d0802593bf0 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/oid.c,v 1.49 2003/07/27 04:53:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/oid.c,v 1.50 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -222,9 +222,7 @@ oidvectorrecv(PG_FUNCTION_ARGS)
int slot;
for (slot = 0; slot < INDEX_MAX_KEYS; slot++)
- {
result[slot] = (Oid) pq_getmsgint(buf, sizeof(Oid));
- }
PG_RETURN_POINTER(result);
}
@@ -240,9 +238,7 @@ oidvectorsend(PG_FUNCTION_ARGS)
pq_begintypsend(&buf);
for (slot = 0; slot < INDEX_MAX_KEYS; slot++)
- {
pq_sendint(&buf, oidArray[slot], sizeof(Oid));
- }
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));
}
diff --git a/src/backend/utils/adt/oracle_compat.c b/src/backend/utils/adt/oracle_compat.c
index 8fd63164f0e..4e0c14be164 100644
--- a/src/backend/utils/adt/oracle_compat.c
+++ b/src/backend/utils/adt/oracle_compat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/oracle_compat.c,v 1.46 2003/07/27 04:53:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/oracle_compat.c,v 1.47 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,8 +22,8 @@
static text *dotrim(const char *string, int stringlen,
- const char *set, int setlen,
- bool doltrim, bool dortrim);
+ const char *set, int setlen,
+ bool doltrim, bool dortrim);
/********************************************************************
@@ -403,8 +403,8 @@ dotrim(const char *string, int stringlen,
{
/*
* In the multibyte-encoding case, build arrays of pointers to
- * character starts, so that we can avoid inefficient checks in
- * the inner loops.
+ * character starts, so that we can avoid inefficient checks
+ * in the inner loops.
*/
const char **stringchars;
const char **setchars;
@@ -499,13 +499,14 @@ dotrim(const char *string, int stringlen,
else
{
/*
- * In the single-byte-encoding case, we don't need such overhead.
+ * In the single-byte-encoding case, we don't need such
+ * overhead.
*/
if (doltrim)
{
while (stringlen > 0)
{
- char str_ch = *string;
+ char str_ch = *string;
for (i = 0; i < setlen; i++)
{
@@ -523,7 +524,7 @@ dotrim(const char *string, int stringlen,
{
while (stringlen > 0)
{
- char str_ch = string[stringlen - 1];
+ char str_ch = string[stringlen - 1];
for (i = 0; i < setlen; i++)
{
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 26bde944fa8..d6a0d978896 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -4,7 +4,7 @@
*
* Portions Copyright (c) 2002, PostgreSQL Global Development Group
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_locale.c,v 1.21 2003/07/27 04:53:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_locale.c,v 1.22 2003/08/04 00:43:25 momjian Exp $
*
*-----------------------------------------------------------------------
*/
@@ -25,7 +25,7 @@
* Instead, we only set the locales briefly when needed, cache the
* required information obtained from localeconv(), and set them back.
* The cached information is only used by the formatting functions
- * (to_char, etc.) and the money type. For the user, this should all be
+ * (to_char, etc.) and the money type. For the user, this should all be
* transparent. (Actually, LC_TIME doesn't do anything at all right
* now.)
*
@@ -40,7 +40,7 @@
* fail = true;
* setlocale(category, save);
* DOES NOT WORK RELIABLY: on some platforms the second setlocale() call
- * will change the memory save is pointing at. To do this sort of thing
+ * will change the memory save is pointing at. To do this sort of thing
* safely, you *must* pstrdup what setlocale returns the first time.
*----------
*/
@@ -134,9 +134,7 @@ locale_messages_assign(const char *value, bool doit, bool interactive)
return NULL;
}
else
- {
value = locale_xxx_assign(LC_MESSAGES, value, false, interactive);
- }
#endif
return value;
}
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index df8c8e92cc5..440783764ae 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -308,11 +308,11 @@ pg_stat_get_backend_activity(PG_FUNCTION_ARGS)
Datum
pg_stat_get_backend_activity_start(PG_FUNCTION_ARGS)
{
- PgStat_StatBeEntry *beentry;
- int32 beid;
- AbsoluteTime sec;
- int usec;
- TimestampTz result;
+ PgStat_StatBeEntry *beentry;
+ int32 beid;
+ AbsoluteTime sec;
+ int usec;
+ TimestampTz result;
beid = PG_GETARG_INT32(0);
@@ -326,8 +326,8 @@ pg_stat_get_backend_activity_start(PG_FUNCTION_ARGS)
usec = beentry->activity_start_usec;
/*
- * No time recorded for start of current query -- this is the case
- * if the user hasn't enabled query-level stats collection.
+ * No time recorded for start of current query -- this is the case if
+ * the user hasn't enabled query-level stats collection.
*/
if (sec == 0 && usec == 0)
PG_RETURN_NULL();
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index 78b132c5167..96f9903ce48 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/pseudotypes.c,v 1.9 2003/07/28 00:09:16 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/pseudotypes.c,v 1.10 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -191,7 +191,7 @@ anyarray_out(PG_FUNCTION_ARGS)
* anyarray_recv - binary input routine for pseudo-type ANYARRAY.
*
* XXX this could actually be made to work, since the incoming array
- * data will contain the element type OID. Need to think through
+ * data will contain the element type OID. Need to think through
* type-safety issues before allowing it, however.
*/
Datum
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 88533086632..8ecffd16c95 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/regexp.c,v 1.46 2003/07/27 04:53:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/regexp.c,v 1.47 2003/08/04 00:43:25 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
@@ -73,10 +73,10 @@ typedef struct cached_re_str
text *cre_pat; /* original RE (untoasted TEXT form) */
int cre_flags; /* compile flags: extended,icase etc */
regex_t cre_re; /* the compiled regular expression */
-} cached_re_str;
+} cached_re_str;
static int num_res = 0; /* # of cached re's */
-static cached_re_str re_array[MAX_CACHED_RES]; /* cached re's */
+static cached_re_str re_array[MAX_CACHED_RES]; /* cached re's */
/*
@@ -88,7 +88,7 @@ static cached_re_str re_array[MAX_CACHED_RES]; /* cached re's */
* dat --- the data to match against (need not be null-terminated)
* dat_len --- the length of the data string
* cflags --- compile options for the pattern
- * nmatch, pmatch --- optional return area for match details
+ * nmatch, pmatch --- optional return area for match details
*
* Both pattern and data are given in the database encoding. We internally
* convert to array of pg_wchar which is what Spencer's regex package wants.
@@ -105,14 +105,14 @@ RE_compile_and_execute(text *text_re, unsigned char *dat, int dat_len,
int i;
int regcomp_result;
int regexec_result;
- cached_re_str re_temp;
+ cached_re_str re_temp;
/* Convert data string to wide characters */
data = (pg_wchar *) palloc((dat_len + 1) * sizeof(pg_wchar));
data_len = pg_mb2wchar_with_len(dat, data, dat_len);
/*
- * Look for a match among previously compiled REs. Since the data
+ * Look for a match among previously compiled REs. Since the data
* structure is self-organizing with most-used entries at the front,
* our search strategy can just be to scan from the front.
*/
@@ -135,7 +135,7 @@ RE_compile_and_execute(text *text_re, unsigned char *dat, int dat_len,
regexec_result = pg_regexec(&re_array[0].cre_re,
data,
data_len,
- NULL, /* no details */
+ NULL, /* no details */
nmatch,
pmatch,
0);
@@ -213,7 +213,7 @@ RE_compile_and_execute(text *text_re, unsigned char *dat, int dat_len,
regexec_result = pg_regexec(&re_array[0].cre_re,
data,
data_len,
- NULL, /* no details */
+ NULL, /* no details */
nmatch,
pmatch,
0);
@@ -383,8 +383,8 @@ textregexsubstr(PG_FUNCTION_ARGS)
/*
* We pass two regmatch_t structs to get info about the overall match
* and the match for the first parenthesized subexpression (if any).
- * If there is a parenthesized subexpression, we return what it matched;
- * else return what the whole regexp matched.
+ * If there is a parenthesized subexpression, we return what it
+ * matched; else return what the whole regexp matched.
*/
match = RE_compile_and_execute(p,
(unsigned char *) VARDATA(s),
@@ -395,8 +395,8 @@ textregexsubstr(PG_FUNCTION_ARGS)
/* match? then return the substring matching the pattern */
if (match)
{
- int so,
- eo;
+ int so,
+ eo;
so = pmatch[1].rm_so;
eo = pmatch[1].rm_eo;
@@ -457,7 +457,7 @@ similar_escape(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
}
/* We need room for ^, $, and up to 2 output bytes per input byte */
@@ -492,9 +492,7 @@ similar_escape(PG_FUNCTION_ARGS)
*r++ = '*';
}
else if (pchar == '_')
- {
*r++ = '.';
- }
else if (pchar == '\\' || pchar == '.' || pchar == '?' ||
pchar == '{')
{
@@ -502,14 +500,12 @@ similar_escape(PG_FUNCTION_ARGS)
*r++ = pchar;
}
else
- {
*r++ = pchar;
- }
p++, plen--;
}
*r++ = '$';
-
+
VARATT_SIZEP(result) = r - ((unsigned char *) result);
PG_RETURN_TEXT_P(result);
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 584e3e5ae82..c4059a21207 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/regproc.c,v 1.79 2003/07/28 00:09:16 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/regproc.c,v 1.80 2003/08/04 00:43:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -114,13 +114,13 @@ regprocin(PG_FUNCTION_ARGS)
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no procedure with name %s", pro_name_or_oid)));
+ errmsg("no procedure with name %s", pro_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
errmsg("more than one procedure named %s",
- pro_name_or_oid)));
+ pro_name_or_oid)));
PG_RETURN_OID(result);
}
@@ -140,7 +140,7 @@ regprocin(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
errmsg("more than one procedure named %s",
- pro_name_or_oid)));
+ pro_name_or_oid)));
result = clist->oid;
@@ -464,12 +464,12 @@ regoperin(PG_FUNCTION_ARGS)
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no operator with name %s", opr_name_or_oid)));
+ errmsg("no operator with name %s", opr_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
errmsg("more than one operator named %s",
- opr_name_or_oid)));
+ opr_name_or_oid)));
PG_RETURN_OID(result);
}
@@ -489,7 +489,7 @@ regoperin(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
errmsg("more than one operator named %s",
- opr_name_or_oid)));
+ opr_name_or_oid)));
result = clist->oid;
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index ed95d8ed6c4..9222bf1c177 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -17,7 +17,7 @@
*
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.52 2003/07/22 22:14:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/ri_triggers.c,v 1.53 2003/08/04 00:43:25 momjian Exp $
*
* ----------
*/
@@ -150,29 +150,29 @@ static bool ri_OneKeyEqual(Relation rel, int column, HeapTuple oldtup,
HeapTuple newtup, RI_QueryKey *key, int pairidx);
static bool ri_AttributesEqual(Oid typeid, Datum oldvalue, Datum newvalue);
static bool ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
- HeapTuple old_row,
- Oid tgoid, int match_type,
- int tgnargs, char **tgargs);
+ HeapTuple old_row,
+ Oid tgoid, int match_type,
+ int tgnargs, char **tgargs);
static void ri_InitHashTables(void);
static void *ri_FetchPreparedPlan(RI_QueryKey *key);
static void ri_HashPreparedPlan(RI_QueryKey *key, void *plan);
static void ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname,
- int tgkind);
+ int tgkind);
static void *ri_PlanCheck(const char *querystr, int nargs, Oid *argtypes,
- RI_QueryKey *qkey, Relation fk_rel, Relation pk_rel,
- bool cache_plan);
+ RI_QueryKey *qkey, Relation fk_rel, Relation pk_rel,
+ bool cache_plan);
static bool ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
- Relation fk_rel, Relation pk_rel,
- HeapTuple old_tuple, HeapTuple new_tuple,
- int expect_OK, const char *constrname);
+ Relation fk_rel, Relation pk_rel,
+ HeapTuple old_tuple, HeapTuple new_tuple,
+ int expect_OK, const char *constrname);
static void ri_ExtractValues(RI_QueryKey *qkey, int key_idx,
- Relation rel, HeapTuple tuple,
- Datum *vals, char *nulls);
+ Relation rel, HeapTuple tuple,
+ Datum *vals, char *nulls);
static void ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
- Relation pk_rel, Relation fk_rel,
- HeapTuple violator, bool spi_err);
+ Relation pk_rel, Relation fk_rel,
+ HeapTuple violator, bool spi_err);
/* ----------
@@ -341,7 +341,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on \"%s\" violates foreign key constraint \"%s\"",
- RelationGetRelationName(trigdata->tg_relation),
+ RelationGetRelationName(trigdata->tg_relation),
tgargs[RI_CONSTRAINT_NAME_ARGNO]),
errdetail("MATCH FULL does not allow mixing of NULL and non-NULL key values.")));
heap_close(pk_rel, RowShareLock);
@@ -366,7 +366,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("MATCH PARTIAL not yet implemented")));
+ errmsg("MATCH PARTIAL not yet implemented")));
heap_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
}
@@ -381,8 +381,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
}
/*
- * No need to check anything if old and new references are the
- * same on UPDATE.
+ * No need to check anything if old and new references are the same on
+ * UPDATE.
*/
if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
{
@@ -542,7 +542,7 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("MATCH PARTIAL not yet implemented")));
+ errmsg("MATCH PARTIAL not yet implemented")));
break;
}
@@ -759,7 +759,8 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing references.
+ * We have a plan now. Run it to check for existing
+ * references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -897,8 +898,8 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
match_type, tgnargs, tgargs))
{
/*
- * There's either another row, or no row could match this one. In
- * either case, we don't need to do the check.
+ * There's either another row, or no row could match this
+ * one. In either case, we don't need to do the check.
*/
heap_close(fk_rel, RowShareLock);
return PointerGetDatum(NULL);
@@ -950,7 +951,8 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing references.
+ * We have a plan now. Run it to check for existing
+ * references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1110,9 +1112,9 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Build up the arguments
- * from the key values in the deleted PK tuple and delete the
- * referencing rows
+ * We have a plan now. Build up the arguments from the key
+ * values in the deleted PK tuple and delete the referencing
+ * rows
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1296,7 +1298,8 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing references.
+ * We have a plan now. Run it to update the existing
+ * references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1465,7 +1468,8 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing references.
+ * We have a plan now. Run it to check for existing
+ * references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1646,7 +1650,8 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing references.
+ * We have a plan now. Run it to check for existing
+ * references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1816,7 +1821,8 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing references.
+ * We have a plan now. Run it to check for existing
+ * references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -2034,7 +2040,8 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing references.
+ * We have a plan now. Run it to update the existing
+ * references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -2209,14 +2216,14 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
* appropriate column defaults, if any (if not, they stay
* NULL).
*
- * XXX This is really ugly; it'd be better to use "UPDATE
+ * XXX This is really ugly; it'd be better to use "UPDATE
* SET foo = DEFAULT", if we had it.
*/
spi_plan = (Plan *) lfirst(((_SPI_plan *) qplan)->ptlist);
foreach(l, spi_plan->targetlist)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
- Node *dfl;
+ Node *dfl;
/* Ignore any junk columns or Var=Var columns */
if (tle->resdom->resjunk)
@@ -2234,7 +2241,8 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing references.
+ * We have a plan now. Run it to update the existing
+ * references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -2444,14 +2452,14 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
* appropriate column defaults, if any (if not, they stay
* NULL).
*
- * XXX This is really ugly; it'd be better to use "UPDATE
+ * XXX This is really ugly; it'd be better to use "UPDATE
* SET foo = DEFAULT", if we had it.
*/
spi_plan = (Plan *) lfirst(((_SPI_plan *) qplan)->ptlist);
foreach(l, spi_plan->targetlist)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
- Node *dfl;
+ Node *dfl;
/* Ignore any junk columns or Var=Var columns */
if (tle->resdom->resjunk)
@@ -2469,7 +2477,8 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing references.
+ * We have a plan now. Run it to update the existing
+ * references.
*/