Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/common/heaptuple.c12
-rw-r--r--src/backend/access/common/indextuple.c28
-rw-r--r--src/backend/access/common/printtup.c27
-rw-r--r--src/backend/access/common/tupdesc.c18
-rw-r--r--src/backend/access/gist/gist.c698
-rw-r--r--src/backend/access/gist/gistget.c4
-rw-r--r--src/backend/access/gist/gistscan.c14
-rw-r--r--src/backend/access/hash/hash.c57
-rw-r--r--src/backend/access/hash/hashfunc.c14
-rw-r--r--src/backend/access/heap/heapam.c384
-rw-r--r--src/backend/access/heap/hio.c12
-rw-r--r--src/backend/access/heap/tuptoaster.c472
-rw-r--r--src/backend/access/index/istrat.c16
-rw-r--r--src/backend/access/nbtree/nbtcompare.c11
-rw-r--r--src/backend/access/nbtree/nbtinsert.c606
-rw-r--r--src/backend/access/nbtree/nbtpage.c60
-rw-r--r--src/backend/access/nbtree/nbtree.c271
-rw-r--r--src/backend/access/nbtree/nbtsearch.c93
-rw-r--r--src/backend/access/nbtree/nbtsort.c103
-rw-r--r--src/backend/access/nbtree/nbtutils.c49
-rw-r--r--src/backend/access/rtree/rtget.c6
-rw-r--r--src/backend/access/rtree/rtproc.c19
-rw-r--r--src/backend/access/rtree/rtree.c111
-rw-r--r--src/backend/access/rtree/rtscan.c14
-rw-r--r--src/backend/access/transam/rmgr.c34
-rw-r--r--src/backend/access/transam/transam.c6
-rw-r--r--src/backend/access/transam/transsup.c6
-rw-r--r--src/backend/access/transam/varsup.c31
-rw-r--r--src/backend/access/transam/xact.c98
-rw-r--r--src/backend/access/transam/xid.c7
-rw-r--r--src/backend/access/transam/xlog.c643
-rw-r--r--src/backend/access/transam/xlogutils.c143
-rw-r--r--src/backend/catalog/aclchk.c18
-rw-r--r--src/backend/catalog/catalog.c14
-rw-r--r--src/backend/catalog/heap.c64
-rw-r--r--src/backend/catalog/index.c198
-rw-r--r--src/backend/catalog/indexing.c4
-rw-r--r--src/backend/catalog/pg_aggregate.c22
-rw-r--r--src/backend/catalog/pg_largeobject.c16
-rw-r--r--src/backend/catalog/pg_operator.c10
-rw-r--r--src/backend/catalog/pg_proc.c35
-rw-r--r--src/backend/catalog/pg_type.c54
-rw-r--r--src/backend/commands/analyze.c113
-rw-r--r--src/backend/commands/async.c10
-rw-r--r--src/backend/commands/cluster.c43
-rw-r--r--src/backend/commands/command.c661
-rw-r--r--src/backend/commands/comment.c4
-rw-r--r--src/backend/commands/copy.c84
-rw-r--r--src/backend/commands/creatinh.c59
-rw-r--r--src/backend/commands/dbcommands.c73
-rw-r--r--src/backend/commands/define.c63
-rw-r--r--src/backend/commands/explain.c4
-rw-r--r--src/backend/commands/indexcmds.c86
-rw-r--r--src/backend/commands/proclang.c2
-rw-r--r--src/backend/commands/remove.c6
-rw-r--r--src/backend/commands/rename.c19
-rw-r--r--src/backend/commands/sequence.c87
-rw-r--r--src/backend/commands/trigger.c78
-rw-r--r--src/backend/commands/user.c17
-rw-r--r--src/backend/commands/vacuum.c243
-rw-r--r--src/backend/commands/variable.c123
-rw-r--r--src/backend/commands/view.c26
-rw-r--r--src/backend/executor/execAmi.c10
-rw-r--r--src/backend/executor/execJunk.c3
-rw-r--r--src/backend/executor/execMain.c116
-rw-r--r--src/backend/executor/execQual.c162
-rw-r--r--src/backend/executor/execScan.c6
-rw-r--r--src/backend/executor/execTuples.c18
-rw-r--r--src/backend/executor/execUtils.c20
-rw-r--r--src/backend/executor/functions.c29
-rw-r--r--src/backend/executor/nodeAgg.c151
-rw-r--r--src/backend/executor/nodeGroup.c14
-rw-r--r--src/backend/executor/nodeHash.c4
-rw-r--r--src/backend/executor/nodeHashjoin.c12
-rw-r--r--src/backend/executor/nodeIndexscan.c29
-rw-r--r--src/backend/executor/nodeLimit.c18
-rw-r--r--src/backend/executor/nodeMaterial.c18
-rw-r--r--src/backend/executor/nodeMergejoin.c128
-rw-r--r--src/backend/executor/nodeNestloop.c20
-rw-r--r--src/backend/executor/nodeResult.c4
-rw-r--r--src/backend/executor/nodeSeqscan.c8
-rw-r--r--src/backend/executor/nodeSetOp.c29
-rw-r--r--src/backend/executor/nodeSubplan.c10
-rw-r--r--src/backend/executor/nodeSubqueryscan.c7
-rw-r--r--src/backend/executor/nodeTidscan.c3
-rw-r--r--src/backend/executor/spi.c39
-rw-r--r--src/backend/lib/bit.c8
-rw-r--r--src/backend/libpq/auth.c45
-rw-r--r--src/backend/libpq/be-fsstubs.c14
-rw-r--r--src/backend/libpq/crypt.c4
-rw-r--r--src/backend/libpq/password.c10
-rw-r--r--src/backend/libpq/pqcomm.c55
-rw-r--r--src/backend/libpq/pqpacket.c6
-rw-r--r--src/backend/libpq/pqsignal.c9
-rw-r--r--src/backend/main/main.c59
-rw-r--r--src/backend/nodes/copyfuncs.c127
-rw-r--r--src/backend/nodes/equalfuncs.c13
-rw-r--r--src/backend/nodes/list.c15
-rw-r--r--src/backend/nodes/makefuncs.c9
-rw-r--r--src/backend/nodes/nodeFuncs.c4
-rw-r--r--src/backend/nodes/outfuncs.c15
-rw-r--r--src/backend/nodes/print.c4
-rw-r--r--src/backend/nodes/read.c27
-rw-r--r--src/backend/nodes/readfuncs.c759
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c6
-rw-r--r--src/backend/optimizer/geqo/geqo_main.c48
-rw-r--r--src/backend/optimizer/path/_deadcode/predmig.c4
-rw-r--r--src/backend/optimizer/path/allpaths.c127
-rw-r--r--src/backend/optimizer/path/clausesel.c30
-rw-r--r--src/backend/optimizer/path/costsize.c40
-rw-r--r--src/backend/optimizer/path/indxpath.c65
-rw-r--r--src/backend/optimizer/path/joinpath.c133
-rw-r--r--src/backend/optimizer/path/joinrels.c94
-rw-r--r--src/backend/optimizer/path/pathkeys.c80
-rw-r--r--src/backend/optimizer/plan/createplan.c96
-rw-r--r--src/backend/optimizer/plan/initsplan.c134
-rw-r--r--src/backend/optimizer/plan/planmain.c16
-rw-r--r--src/backend/optimizer/plan/planner.c215
-rw-r--r--src/backend/optimizer/plan/setrefs.c34
-rw-r--r--src/backend/optimizer/plan/subselect.c46
-rw-r--r--src/backend/optimizer/prep/prepkeyset.c5
-rw-r--r--src/backend/optimizer/prep/prepqual.c22
-rw-r--r--src/backend/optimizer/prep/preptlist.c39
-rw-r--r--src/backend/optimizer/prep/prepunion.c196
-rw-r--r--src/backend/optimizer/util/clauses.c85
-rw-r--r--src/backend/optimizer/util/joininfo.c4
-rw-r--r--src/backend/optimizer/util/pathnode.c11
-rw-r--r--src/backend/optimizer/util/plancat.c38
-rw-r--r--src/backend/optimizer/util/tlist.c3
-rw-r--r--src/backend/optimizer/util/var.c13
-rw-r--r--src/backend/parser/analyze.c499
-rw-r--r--src/backend/parser/keywords.c16
-rw-r--r--src/backend/parser/parse_clause.c158
-rw-r--r--src/backend/parser/parse_coerce.c48
-rw-r--r--src/backend/parser/parse_expr.c25
-rw-r--r--src/backend/parser/parse_func.c82
-rw-r--r--src/backend/parser/parse_node.c8
-rw-r--r--src/backend/parser/parse_oper.c56
-rw-r--r--src/backend/parser/parse_relation.c82
-rw-r--r--src/backend/parser/parse_target.c8
-rw-r--r--src/backend/parser/parse_type.c10
-rw-r--r--src/backend/parser/parser.c5
-rw-r--r--src/backend/port/beos/sem.c266
-rw-r--r--src/backend/port/beos/shm.c124
-rw-r--r--src/backend/port/beos/support.c277
-rw-r--r--src/backend/port/darwin/sem.c104
-rw-r--r--src/backend/port/dynloader/aix.c2
-rw-r--r--src/backend/port/dynloader/aix.h10
-rw-r--r--src/backend/port/dynloader/beos.c53
-rw-r--r--src/backend/port/dynloader/darwin.c24
-rw-r--r--src/backend/port/dynloader/darwin.h6
-rw-r--r--src/backend/port/dynloader/hpux.c5
-rw-r--r--src/backend/port/dynloader/solaris.h4
-rw-r--r--src/backend/port/qnx4/shm.c8
-rw-r--r--src/backend/port/strtol.c2
-rw-r--r--src/backend/postmaster/postmaster.c270
-rw-r--r--src/backend/regex/engine.c53
-rw-r--r--src/backend/regex/regcomp.c77
-rw-r--r--src/backend/regex/regexec.c2
-rw-r--r--src/backend/rewrite/rewriteDefine.c60
-rw-r--r--src/backend/rewrite/rewriteHandler.c123
-rw-r--r--src/backend/rewrite/rewriteManip.c91
-rw-r--r--src/backend/rewrite/rewriteRemove.c19
-rw-r--r--src/backend/rewrite/rewriteSupport.c5
-rw-r--r--src/backend/storage/buffer/buf_init.c15
-rw-r--r--src/backend/storage/buffer/buf_table.c6
-rw-r--r--src/backend/storage/buffer/bufmgr.c116
-rw-r--r--src/backend/storage/buffer/localbuf.c16
-rw-r--r--src/backend/storage/buffer/s_lock.c47
-rw-r--r--src/backend/storage/file/buffile.c5
-rw-r--r--src/backend/storage/file/fd.c18
-rw-r--r--src/backend/storage/ipc/ipc.c153
-rw-r--r--src/backend/storage/ipc/ipci.c4
-rw-r--r--src/backend/storage/ipc/shmem.c17
-rw-r--r--src/backend/storage/ipc/shmqueue.c5
-rw-r--r--src/backend/storage/ipc/sinval.c5
-rw-r--r--src/backend/storage/ipc/sinvaladt.c10
-rw-r--r--src/backend/storage/ipc/spin.c76
-rw-r--r--src/backend/storage/large_object/inv_api.c161
-rw-r--r--src/backend/storage/lmgr/deadlock.c188
-rw-r--r--src/backend/storage/lmgr/lmgr.c5
-rw-r--r--src/backend/storage/lmgr/lock.c203
-rw-r--r--src/backend/storage/lmgr/proc.c86
-rw-r--r--src/backend/storage/page/bufpage.c35
-rw-r--r--src/backend/storage/smgr/md.c56
-rw-r--r--src/backend/storage/smgr/smgr.c53
-rw-r--r--src/backend/tcop/dest.c4
-rw-r--r--src/backend/tcop/fastpath.c50
-rw-r--r--src/backend/tcop/postgres.c300
-rw-r--r--src/backend/tcop/pquery.c9
-rw-r--r--src/backend/tcop/utility.c67
-rw-r--r--src/backend/tioga/tgRecipe.c14
-rw-r--r--src/backend/tioga/tgRecipe.h3
-rw-r--r--src/backend/utils/adt/acl.c24
-rw-r--r--src/backend/utils/adt/arrayfuncs.c145
-rw-r--r--src/backend/utils/adt/ascii.c106
-rw-r--r--src/backend/utils/adt/bool.c10
-rw-r--r--src/backend/utils/adt/cash.c19
-rw-r--r--src/backend/utils/adt/date.c136
-rw-r--r--src/backend/utils/adt/datetime.c142
-rw-r--r--src/backend/utils/adt/datum.c5
-rw-r--r--src/backend/utils/adt/float.c26
-rw-r--r--src/backend/utils/adt/format_type.c20
-rw-r--r--src/backend/utils/adt/formatting.c813
-rw-r--r--src/backend/utils/adt/geo_ops.c202
-rw-r--r--src/backend/utils/adt/inet_net_ntop.c6
-rw-r--r--src/backend/utils/adt/int.c14
-rw-r--r--src/backend/utils/adt/int8.c17
-rw-r--r--src/backend/utils/adt/like.c224
-rw-r--r--src/backend/utils/adt/mac.c46
-rw-r--r--src/backend/utils/adt/misc.c5
-rw-r--r--src/backend/utils/adt/nabstime.c196
-rw-r--r--src/backend/utils/adt/network.c49
-rw-r--r--src/backend/utils/adt/not_in.c6
-rw-r--r--src/backend/utils/adt/numeric.c28
-rw-r--r--src/backend/utils/adt/numutils.c10
-rw-r--r--src/backend/utils/adt/oid.c55
-rw-r--r--src/backend/utils/adt/oracle_compat.c22
-rw-r--r--src/backend/utils/adt/pg_locale.c6
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c16
-rw-r--r--src/backend/utils/adt/quote.c146
-rw-r--r--src/backend/utils/adt/regexp.c37
-rw-r--r--src/backend/utils/adt/regproc.c4
-rw-r--r--src/backend/utils/adt/ri_triggers.c10
-rw-r--r--src/backend/utils/adt/ruleutils.c110
-rw-r--r--src/backend/utils/adt/selfuncs.c350
-rw-r--r--src/backend/utils/adt/sets.c20
-rw-r--r--src/backend/utils/adt/tid.c39
-rw-r--r--src/backend/utils/adt/timestamp.c183
-rw-r--r--src/backend/utils/adt/varbit.c122
-rw-r--r--src/backend/utils/adt/varchar.c84
-rw-r--r--src/backend/utils/adt/varlena.c13
-rw-r--r--src/backend/utils/cache/catcache.c177
-rw-r--r--src/backend/utils/cache/fcache.c7
-rw-r--r--src/backend/utils/cache/inval.c87
-rw-r--r--src/backend/utils/cache/lsyscache.c41
-rw-r--r--src/backend/utils/cache/relcache.c328
-rw-r--r--src/backend/utils/cache/syscache.c131
-rw-r--r--src/backend/utils/cache/temprel.c39
-rw-r--r--src/backend/utils/error/elog.c126
-rw-r--r--src/backend/utils/error/exc.c3
-rw-r--r--src/backend/utils/fmgr/dfmgr.c16
-rw-r--r--src/backend/utils/fmgr/fmgr.c288
-rw-r--r--src/backend/utils/hash/dynahash.c4
-rw-r--r--src/backend/utils/hash/pg_crc.c6
-rw-r--r--src/backend/utils/init/globals.c4
-rw-r--r--src/backend/utils/init/miscinit.c77
-rw-r--r--src/backend/utils/init/postinit.c44
-rw-r--r--src/backend/utils/mb/conv.c189
-rw-r--r--src/backend/utils/mb/liketest.c143
-rw-r--r--src/backend/utils/mb/palloc.c2
-rw-r--r--src/backend/utils/mb/utftest.c4
-rw-r--r--src/backend/utils/mb/wchar.c48
-rw-r--r--src/backend/utils/misc/database.c17
-rw-r--r--src/backend/utils/misc/guc.c625
-rw-r--r--src/backend/utils/misc/ps_status.c170
-rw-r--r--src/backend/utils/mmgr/aset.c161
-rw-r--r--src/backend/utils/mmgr/mcxt.c99
-rw-r--r--src/backend/utils/mmgr/portalmem.c6
-rw-r--r--src/backend/utils/sort/tuplesort.c4
-rw-r--r--src/backend/utils/sort/tuplestore.c50
261 files changed, 10924 insertions, 9675 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 03d180e36fe..9bb08054943 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.69 2001/01/24 19:42:46 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.70 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -306,8 +306,8 @@ nocachegetattr(HeapTuple tuple,
int j;
/*
- * In for(), we test <= and not < because we want to see
- * if we can go past it in initializing offsets.
+ * In for(), we test <= and not < because we want to see if we
+ * can go past it in initializing offsets.
*/
for (j = 0; j <= attnum; j++)
{
@@ -321,9 +321,9 @@ nocachegetattr(HeapTuple tuple,
}
/*
- * If slow is false, and we got here, we know that we have a tuple with
- * no nulls or varlenas before the target attribute. If possible, we
- * also want to initialize the remainder of the attribute cached
+ * If slow is false, and we got here, we know that we have a tuple
+ * with no nulls or varlenas before the target attribute. If possible,
+ * we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index e503d9b888d..da8129f307f 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.52 2001/02/22 21:48:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.53 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,9 +45,11 @@ index_formtuple(TupleDesc tupleDescriptor,
bool hasnull = false;
uint16 tupmask = 0;
int numberOfAttributes = tupleDescriptor->natts;
+
#ifdef TOAST_INDEX_HACK
Datum untoasted_value[INDEX_MAX_KEYS];
bool untoasted_free[INDEX_MAX_KEYS];
+
#endif
if (numberOfAttributes > INDEX_MAX_KEYS)
@@ -57,7 +59,7 @@ index_formtuple(TupleDesc tupleDescriptor,
#ifdef TOAST_INDEX_HACK
for (i = 0; i < numberOfAttributes; i++)
{
- Form_pg_attribute att = tupleDescriptor->attrs[i];
+ Form_pg_attribute att = tupleDescriptor->attrs[i];
untoasted_value[i] = value[i];
untoasted_free[i] = false;
@@ -73,20 +75,20 @@ index_formtuple(TupleDesc tupleDescriptor,
if (VARATT_IS_EXTERNAL(value[i]))
{
untoasted_value[i] = PointerGetDatum(
- heap_tuple_fetch_attr(
- (varattrib *) DatumGetPointer(value[i])));
+ heap_tuple_fetch_attr(
+ (varattrib *) DatumGetPointer(value[i])));
untoasted_free[i] = true;
}
/*
- * If value is above size target, and is of a compressible datatype,
- * try to compress it in-line.
+ * If value is above size target, and is of a compressible
+ * datatype, try to compress it in-line.
*/
if (VARATT_SIZE(untoasted_value[i]) > TOAST_INDEX_TARGET &&
!VARATT_IS_EXTENDED(untoasted_value[i]) &&
(att->attstorage == 'x' || att->attstorage == 'm'))
{
- Datum cvalue = toast_compress_datum(untoasted_value[i]);
+ Datum cvalue = toast_compress_datum(untoasted_value[i]);
if (DatumGetPointer(cvalue) != NULL)
{
@@ -146,8 +148,8 @@ index_formtuple(TupleDesc tupleDescriptor,
/*
* We do this because DataFill wants to initialize a "tupmask" which
* is used for HeapTuples, but we want an indextuple infomask. The
- * only relevant info is the "has variable attributes" field.
- * We have already set the hasnull bit above.
+ * only relevant info is the "has variable attributes" field. We have
+ * already set the hasnull bit above.
*/
if (tupmask & HEAP_HASVARLENA)
@@ -315,9 +317,9 @@ nocache_index_getattr(IndexTuple tup,
}
/*
- * If slow is false, and we got here, we know that we have a tuple with
- * no nulls or varlenas before the target attribute. If possible, we
- * also want to initialize the remainder of the attribute cached
+ * If slow is false, and we got here, we know that we have a tuple
+ * with no nulls or varlenas before the target attribute. If possible,
+ * we also want to initialize the remainder of the attribute cached
* offset values.
*/
if (!slow)
@@ -391,9 +393,7 @@ nocache_index_getattr(IndexTuple tup,
usecache = false;
}
else
- {
off += att[i]->attlen;
- }
}
off = att_align(off, att[attnum]->attlen, att[attnum]->attalign);
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 4f47ef0d451..d44bfe973e0 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.57 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.58 2001/03/22 03:59:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ getTypeOutputInfo(Oid type, Oid *typOutput, Oid *typElem,
*typOutput = pt->typoutput;
*typElem = pt->typelem;
- *typIsVarlena = (! pt->typbyval) && (pt->typlen == -1);
+ *typIsVarlena = (!pt->typbyval) && (pt->typlen == -1);
ReleaseSysCache(typeTuple);
return OidIsValid(*typOutput);
}
@@ -200,9 +200,10 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue;
if (OidIsValid(thisState->typoutput))
{
+
/*
- * If we have a toasted datum, forcibly detoast it here to avoid
- * memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to
+ * avoid memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -210,9 +211,9 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
attr = origattr;
outputstr = DatumGetCString(FunctionCall3(&thisState->finfo,
- attr,
- ObjectIdGetDatum(thisState->typelem),
- Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
+ attr,
+ ObjectIdGetDatum(thisState->typelem),
+ Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
pq_sendcountedtext(&buf, outputstr, strlen(outputstr));
@@ -308,9 +309,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
if (getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena))
{
+
/*
- * If we have a toasted datum, forcibly detoast it here to avoid
- * memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to
+ * avoid memory leakage inside the type's output routine.
*/
if (typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -318,9 +320,9 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
attr = origattr;
value = DatumGetCString(OidFunctionCall3(typoutput,
- attr,
- ObjectIdGetDatum(typelem),
- Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
+ attr,
+ ObjectIdGetDatum(typelem),
+ Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
@@ -405,6 +407,7 @@ printtup_internal(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
/* send # of bytes, and opaque data */
if (thisState->typisvarlena)
{
+
/*
* If we have a toasted datum, must detoast before sending.
*/
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index 86bc1a56f82..e07c6296d15 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.71 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.72 2001/03/22 03:59:11 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -242,9 +242,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We do not need to check every single field here, and in fact
* some fields such as attdispersion probably shouldn't be
- * compared. We can also disregard attnum (it was used to
- * place the row in the attrs array) and everything derived
- * from the column datatype.
+ * compared. We can also disregard attnum (it was used to place
+ * the row in the attrs array) and everything derived from the
+ * column datatype.
*/
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
return false;
@@ -276,8 +276,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
/*
* We can't assume that the items are always read from the
- * system catalogs in the same order; so use the adnum field to
- * identify the matching item to compare.
+ * system catalogs in the same order; so use the adnum field
+ * to identify the matching item to compare.
*/
for (j = 0; j < n; defval2++, j++)
{
@@ -298,9 +298,9 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
ConstrCheck *check2 = constr2->check;
/*
- * Similarly, don't assume that the checks are always read
- * in the same order; match them up by name and contents.
- * (The name *should* be unique, but...)
+ * Similarly, don't assume that the checks are always read in
+ * the same order; match them up by name and contents. (The
+ * name *should* be unique, but...)
*/
for (j = 0; j < n; check2++, j++)
{
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 9e3f935bd67..1c5577b88a0 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.71 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.72 2001/03/22 03:59:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,61 +25,62 @@
#include "access/xlogutils.h"
-/* result's status */
+/* result's status */
#define INSERTED 0x01
#define SPLITED 0x02
/* non-export function prototypes */
-static void gistdoinsert(Relation r,
- IndexTuple itup,
- InsertIndexResult *res,
- GISTSTATE *GISTstate);
-static int gistlayerinsert( Relation r, BlockNumber blkno,
- IndexTuple **itup,
- int *len,
- InsertIndexResult *res,
- GISTSTATE *giststate );
-static OffsetNumber gistwritebuffer( Relation r,
- Page page,
- IndexTuple *itup,
- int len,
- OffsetNumber off,
- GISTSTATE *giststate );
-static int gistnospace( Page page,
- IndexTuple *itvec, int len );
-static IndexTuple * gistreadbuffer( Relation r,
- Buffer buffer, int *len );
-static IndexTuple * gistjoinvector(
- IndexTuple *itvec, int *len,
- IndexTuple *additvec, int addlen );
-static IndexTuple gistunion( Relation r, IndexTuple *itvec,
- int len, GISTSTATE *giststate );
-static IndexTuple gistgetadjusted( Relation r,
- IndexTuple oldtup,
- IndexTuple addtup,
- GISTSTATE *giststate );
-static IndexTuple * gistSplit(Relation r,
- Buffer buffer,
- IndexTuple *itup,
- int *len,
- GISTSTATE *giststate,
- InsertIndexResult *res);
-static void gistnewroot(GISTSTATE *giststate, Relation r,
+static void gistdoinsert(Relation r,
+ IndexTuple itup,
+ InsertIndexResult *res,
+ GISTSTATE *GISTstate);
+static int gistlayerinsert(Relation r, BlockNumber blkno,
+ IndexTuple **itup,
+ int *len,
+ InsertIndexResult *res,
+ GISTSTATE *giststate);
+static OffsetNumber gistwritebuffer(Relation r,
+ Page page,
+ IndexTuple *itup,
+ int len,
+ OffsetNumber off,
+ GISTSTATE *giststate);
+static int gistnospace(Page page,
+ IndexTuple *itvec, int len);
+static IndexTuple *gistreadbuffer(Relation r,
+ Buffer buffer, int *len);
+static IndexTuple *gistjoinvector(
+ IndexTuple *itvec, int *len,
+ IndexTuple *additvec, int addlen);
+static IndexTuple gistunion(Relation r, IndexTuple *itvec,
+ int len, GISTSTATE *giststate);
+static IndexTuple gistgetadjusted(Relation r,
+ IndexTuple oldtup,
+ IndexTuple addtup,
+ GISTSTATE *giststate);
+static IndexTuple *gistSplit(Relation r,
+ Buffer buffer,
+ IndexTuple *itup,
+ int *len,
+ GISTSTATE *giststate,
+ InsertIndexResult *res);
+static void gistnewroot(GISTSTATE *giststate, Relation r,
IndexTuple *itup, int len);
static void GISTInitBuffer(Buffer b, uint32 f);
-static OffsetNumber gistchoose(Relation r, Page p,
- IndexTuple it,
- GISTSTATE *giststate);
-static IndexTuple gist_tuple_replacekey(Relation r,
- GISTENTRY entry, IndexTuple t);
-static void gistcentryinit(GISTSTATE *giststate,
- GISTENTRY *e, char *pr,
- Relation r, Page pg,
- OffsetNumber o, int b, bool l);
+static OffsetNumber gistchoose(Relation r, Page p,
+ IndexTuple it,
+ GISTSTATE *giststate);
+static IndexTuple gist_tuple_replacekey(Relation r,
+ GISTENTRY entry, IndexTuple t);
+static void gistcentryinit(GISTSTATE *giststate,
+ GISTENTRY *e, char *pr,
+ Relation r, Page pg,
+ OffsetNumber o, int b, bool l);
#undef GISTDEBUG
#ifdef GISTDEBUG
static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff);
+
#endif
/*
@@ -88,12 +89,14 @@ static void gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber c
Datum
gistbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -105,9 +108,11 @@ gistbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
GISTSTATE giststate;
@@ -181,6 +186,7 @@ gistbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -262,9 +268,7 @@ gistbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -297,7 +301,7 @@ gistbuild(PG_FUNCTION_ARGS)
}
#ifdef GISTDEBUG
-gist_dumptree(index, 0, GISTP_ROOT, 0);
+ gist_dumptree(index, 0, GISTP_ROOT, 0);
#endif
PG_RETURN_VOID();
@@ -312,12 +316,14 @@ gist_dumptree(index, 0, GISTP_ROOT, 0);
Datum
gistinsert(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+
#endif
InsertIndexResult res;
IndexTuple itup;
@@ -380,7 +386,7 @@ gistPageAddItem(GISTSTATE *giststate,
{
GISTENTRY tmpcentry;
IndexTuple itup = (IndexTuple) item;
- OffsetNumber retval;
+ OffsetNumber retval;
/*
* recompress the item given that we now know the exact page and
@@ -394,7 +400,7 @@ gistPageAddItem(GISTSTATE *giststate,
offsetNumber, dentry->bytes, FALSE);
*newtup = gist_tuple_replacekey(r, tmpcentry, itup);
retval = PageAddItem(page, (Item) *newtup, IndexTupleSize(*newtup),
- offsetNumber, flags);
+ offsetNumber, flags);
if (retval == InvalidOffsetNumber)
elog(ERROR, "gist: failed to add index item to %s",
RelationGetRelationName(r));
@@ -405,189 +411,213 @@ gistPageAddItem(GISTSTATE *giststate,
return (retval);
}
-static void
-gistdoinsert( Relation r,
- IndexTuple itup,
- InsertIndexResult *res,
- GISTSTATE *giststate ) {
+static void
+gistdoinsert(Relation r,
+ IndexTuple itup,
+ InsertIndexResult *res,
+ GISTSTATE *giststate)
+{
IndexTuple *instup;
- int i,ret,len = 1;
+ int i,
+ ret,
+ len = 1;
+
+ instup = (IndexTuple *) palloc(sizeof(IndexTuple));
+ instup[0] = (IndexTuple) palloc(IndexTupleSize(itup));
+ memcpy(instup[0], itup, IndexTupleSize(itup));
- instup = ( IndexTuple* ) palloc( sizeof(IndexTuple) );
- instup[0] = ( IndexTuple ) palloc( IndexTupleSize( itup ) );
- memcpy( instup[0], itup, IndexTupleSize( itup ) );
-
ret = gistlayerinsert(r, GISTP_ROOT, &instup, &len, res, giststate);
- if ( ret & SPLITED )
- gistnewroot( giststate, r, instup, len );
+ if (ret & SPLITED)
+ gistnewroot(giststate, r, instup, len);
- for(i=0;i<len;i++)
- pfree( instup[i] );
- pfree( instup );
+ for (i = 0; i < len; i++)
+ pfree(instup[i]);
+ pfree(instup);
}
static int
-gistlayerinsert( Relation r, BlockNumber blkno,
- IndexTuple **itup, /* in - out, has compressed entry */
- int *len , /* in - out */
- InsertIndexResult *res, /* out */
- GISTSTATE *giststate ) {
- Buffer buffer;
- Page page;
- OffsetNumber child;
- int ret;
+gistlayerinsert(Relation r, BlockNumber blkno,
+ IndexTuple **itup, /* in - out, has compressed entry */
+ int *len, /* in - out */
+ InsertIndexResult *res, /* out */
+ GISTSTATE *giststate)
+{
+ Buffer buffer;
+ Page page;
+ OffsetNumber child;
+ int ret;
GISTPageOpaque opaque;
buffer = ReadBuffer(r, blkno);
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
- if (!(opaque->flags & F_LEAF)) {
+ if (!(opaque->flags & F_LEAF))
+ {
/* internal page, so we must walk on tree */
/* len IS equial 1 */
- ItemId iid;
+ ItemId iid;
BlockNumber nblkno;
ItemPointerData oldtid;
- IndexTuple oldtup;
-
- child = gistchoose( r, page, *(*itup), giststate );
+ IndexTuple oldtup;
+
+ child = gistchoose(r, page, *(*itup), giststate);
iid = PageGetItemId(page, child);
oldtup = (IndexTuple) PageGetItem(page, iid);
nblkno = ItemPointerGetBlockNumber(&(oldtup->t_tid));
- /*
- * After this call:
- * 1. if child page was splited, then itup contains
- * keys for each page
- * 2. if child page wasn't splited, then itup contains
- * additional for adjustement of current key
+ /*
+ * After this call: 1. if child page was splited, then itup
+ * contains keys for each page 2. if child page wasn't splited,
+ * then itup contains additional for adjustement of current key
*/
- ret = gistlayerinsert( r, nblkno, itup, len, res, giststate );
+ ret = gistlayerinsert(r, nblkno, itup, len, res, giststate);
/* nothing inserted in child */
- if ( ! (ret & INSERTED) ) {
+ if (!(ret & INSERTED))
+ {
ReleaseBuffer(buffer);
- return 0x00;
+ return 0x00;
}
- /* child does not splited */
- if ( ! (ret & SPLITED) ) {
- IndexTuple newtup = gistgetadjusted( r, oldtup, (*itup)[0], giststate );
- if ( ! newtup ) {
+ /* child does not splited */
+ if (!(ret & SPLITED))
+ {
+ IndexTuple newtup = gistgetadjusted(r, oldtup, (*itup)[0], giststate);
+
+ if (!newtup)
+ {
/* not need to update key */
ReleaseBuffer(buffer);
return 0x00;
}
- pfree( (*itup)[0] ); /* !!! */
+ pfree((*itup)[0]); /* !!! */
(*itup)[0] = newtup;
}
- /* key is modified, so old version must be deleted */
+ /* key is modified, so old version must be deleted */
ItemPointerSet(&oldtid, blkno, child);
DirectFunctionCall2(gistdelete,
- PointerGetDatum(r),
- PointerGetDatum(&oldtid));
+ PointerGetDatum(r),
+ PointerGetDatum(&oldtid));
}
- ret = INSERTED;
+ ret = INSERTED;
- if ( gistnospace(page, (*itup), *len) ) {
+ if (gistnospace(page, (*itup), *len))
+ {
/* no space for insertion */
IndexTuple *itvec;
- int tlen;
+ int tlen;
ret |= SPLITED;
- itvec = gistreadbuffer( r, buffer, &tlen );
- itvec = gistjoinvector( itvec, &tlen, (*itup), *len );
- pfree( (*itup) );
- (*itup) = gistSplit( r, buffer, itvec, &tlen, giststate,
- (opaque->flags & F_LEAF) ? res : NULL ); /*res only for inserting in leaf*/
- ReleaseBuffer( buffer );
- pfree( itvec );
- *len = tlen; /* now tlen >= 2 */
- } else {
+ itvec = gistreadbuffer(r, buffer, &tlen);
+ itvec = gistjoinvector(itvec, &tlen, (*itup), *len);
+ pfree((*itup));
+ (*itup) = gistSplit(r, buffer, itvec, &tlen, giststate,
+ (opaque->flags & F_LEAF) ? res : NULL); /* res only for
+ * inserting in leaf */
+ ReleaseBuffer(buffer);
+ pfree(itvec);
+ *len = tlen; /* now tlen >= 2 */
+ }
+ else
+ {
/* enogth space */
- OffsetNumber off, l;
+ OffsetNumber off,
+ l;
- off = ( PageIsEmpty(page) ) ?
- FirstOffsetNumber
+ off = (PageIsEmpty(page)) ?
+ FirstOffsetNumber
:
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
- l = gistwritebuffer( r, page, (*itup), *len, off, giststate );
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ l = gistwritebuffer(r, page, (*itup), *len, off, giststate);
WriteBuffer(buffer);
- /* set res if insert into leaf page, in
- this case, len = 1 always */
- if ( res && (opaque->flags & F_LEAF) )
+ /*
+ * set res if insert into leaf page, in this case, len = 1 always
+ */
+ if (res && (opaque->flags & F_LEAF))
ItemPointerSet(&((*res)->pointerData), blkno, l);
- if ( *len > 1 ) { /* previos insert ret & SPLITED != 0 */
- int i;
- /* child was splited, so we must form union
- * for insertion in parent */
- IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
- for(i=0; i<*len; i++)
- pfree( (*itup)[i] );
+ if (*len > 1)
+ { /* previos insert ret & SPLITED != 0 */
+ int i;
+
+ /*
+ * child was splited, so we must form union for insertion in
+ * parent
+ */
+ IndexTuple newtup = gistunion(r, (*itup), *len, giststate);
+
+ for (i = 0; i < *len; i++)
+ pfree((*itup)[i]);
(*itup)[0] = newtup;
*len = 1;
}
}
-
- return ret;
-}
-/*
+ return ret;
+}
+
+/*
* Write itup vector to page, has no control of free space
*/
static OffsetNumber
-gistwritebuffer( Relation r, Page page, IndexTuple *itup,
- int len, OffsetNumber off, GISTSTATE *giststate) {
+gistwritebuffer(Relation r, Page page, IndexTuple *itup,
+ int len, OffsetNumber off, GISTSTATE *giststate)
+{
OffsetNumber l = InvalidOffsetNumber;
- int i;
- GISTENTRY tmpdentry;
- IndexTuple newtup;
-
- for(i=0; i<len; i++) {
- l = gistPageAddItem(giststate, r, page,
- (Item) itup[i], IndexTupleSize(itup[i]),
- off, LP_USED, &tmpdentry, &newtup);
- off = OffsetNumberNext( off );
+ int i;
+ GISTENTRY tmpdentry;
+ IndexTuple newtup;
+
+ for (i = 0; i < len; i++)
+ {
+ l = gistPageAddItem(giststate, r, page,
+ (Item) itup[i], IndexTupleSize(itup[i]),
+ off, LP_USED, &tmpdentry, &newtup);
+ off = OffsetNumberNext(off);
if (tmpdentry.pred != (((char *) itup[i]) + sizeof(IndexTupleData)) && tmpdentry.pred)
pfree(tmpdentry.pred);
if (itup[i] != newtup)
pfree(newtup);
}
- return l;
+ return l;
}
/*
* Check space for itup vector on page
*/
-static int
-gistnospace( Page page, IndexTuple *itvec, int len ) {
- int size = 0;
- int i;
- for(i=0; i<len; i++)
- size += IndexTupleSize( itvec[i] )+4; /* ??? */
+static int
+gistnospace(Page page, IndexTuple *itvec, int len)
+{
+ int size = 0;
+ int i;
- return (PageGetFreeSpace(page) < size);
-}
+ for (i = 0; i < len; i++)
+ size += IndexTupleSize(itvec[i]) + 4; /* ??? */
+
+ return (PageGetFreeSpace(page) < size);
+}
/*
* Read buffer into itup vector
*/
static IndexTuple *
-gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
- OffsetNumber i, maxoff;
- IndexTuple *itvec;
- Page p = (Page) BufferGetPage(buffer);
+gistreadbuffer(Relation r, Buffer buffer, int *len /* out */ )
+{
+ OffsetNumber i,
+ maxoff;
+ IndexTuple *itvec;
+ Page p = (Page) BufferGetPage(buffer);
- *len=0;
+ *len = 0;
maxoff = PageGetMaxOffsetNumber(p);
- itvec = palloc( sizeof(IndexTuple) * maxoff );
- for(i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
- itvec[ (*len)++ ] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
+ itvec = palloc(sizeof(IndexTuple) * maxoff);
+ for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
+ itvec[(*len)++] = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));
return itvec;
}
@@ -596,9 +626,10 @@ gistreadbuffer( Relation r, Buffer buffer, int *len /*out*/) {
* join two vectors into one
*/
static IndexTuple *
-gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen ) {
- itvec = (IndexTuple*) repalloc( (void*)itvec, sizeof(IndexTuple) * ( (*len) + addlen ) );
- memmove( &itvec[*len], additvec, sizeof(IndexTuple) * addlen );
+gistjoinvector(IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen)
+{
+ itvec = (IndexTuple *) repalloc((void *) itvec, sizeof(IndexTuple) * ((*len) + addlen));
+ memmove(&itvec[*len], additvec, sizeof(IndexTuple) * addlen);
*len += addlen;
return itvec;
}
@@ -607,115 +638,124 @@ gistjoinvector( IndexTuple *itvec, int *len, IndexTuple *additvec, int addlen )
* return union of itup vector
*/
static IndexTuple
-gistunion( Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate ) {
- bytea *evec;
- char *datum;
- int datumsize, i;
- GISTENTRY centry;
- char isnull;
- IndexTuple newtup;
+gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate)
+{
+ bytea *evec;
+ char *datum;
+ int datumsize,
+ i;
+ GISTENTRY centry;
+ char isnull;
+ IndexTuple newtup;
evec = (bytea *) palloc(len * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = len * sizeof(GISTENTRY) + VARHDRSZ;
- for ( i = 0 ; i< len ; i++ )
+ for (i = 0; i < len; i++)
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[i],
- (char*) itvec[i] + sizeof(IndexTupleData),
- (Relation)NULL, (Page)NULL, (OffsetNumber)NULL,
- IndexTupleSize((IndexTuple)itvec[i]) - sizeof(IndexTupleData), FALSE);
+ (char *) itvec[i] + sizeof(IndexTupleData),
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ IndexTupleSize((IndexTuple) itvec[i]) - sizeof(IndexTupleData), FALSE);
datum = (char *)
DatumGetPointer(FunctionCall2(&giststate->unionFn,
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize)));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize)));
+
+ for (i = 0; i < len; i++)
+ if (((GISTENTRY *) VARDATA(evec))[i].pred &&
+ ((GISTENTRY *) VARDATA(evec))[i].pred !=
+ ((char *) (itvec[i]) + sizeof(IndexTupleData)))
+ pfree(((GISTENTRY *) VARDATA(evec))[i].pred);
- for ( i = 0 ; i< len ; i++ )
- if ( ((GISTENTRY *) VARDATA(evec))[i].pred &&
- ((GISTENTRY *) VARDATA(evec))[i].pred !=
- ((char*)( itvec[i] )+ sizeof(IndexTupleData)) )
- pfree( ((GISTENTRY *) VARDATA(evec))[i].pred );
-
- pfree( evec );
+ pfree(evec);
- gistcentryinit(giststate, &centry, datum,
- (Relation)NULL, (Page)NULL, (OffsetNumber)NULL,
- datumsize, FALSE);
+ gistcentryinit(giststate, &centry, datum,
+ (Relation) NULL, (Page) NULL, (OffsetNumber) NULL,
+ datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n';
- newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull );
+ newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
if (centry.pred != datum)
- pfree( datum );
+ pfree(datum);
return newtup;
-}
+}
/*
* Forms union of oldtup and addtup, if union == oldtup then return NULL
*/
static IndexTuple
-gistgetadjusted( Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate ) {
- bytea *evec;
- char *datum;
- int datumsize;
- bool result;
- char isnull;
- GISTENTRY centry, *ev0p, *ev1p;
- IndexTuple newtup = NULL;
-
+gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
+{
+ bytea *evec;
+ char *datum;
+ int datumsize;
+ bool result;
+ char isnull;
+ GISTENTRY centry,
+ *ev0p,
+ *ev1p;
+ IndexTuple newtup = NULL;
+
evec = (bytea *) palloc(2 * sizeof(GISTENTRY) + VARHDRSZ);
VARATT_SIZEP(evec) = 2 * sizeof(GISTENTRY) + VARHDRSZ;
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[0],
- (char*) oldtup + sizeof(IndexTupleData), (Relation) NULL,
- (Page) NULL, (OffsetNumber) 0,
- IndexTupleSize((IndexTuple)oldtup) - sizeof(IndexTupleData), FALSE);
+ (char *) oldtup + sizeof(IndexTupleData), (Relation) NULL,
+ (Page) NULL, (OffsetNumber) 0,
+ IndexTupleSize((IndexTuple) oldtup) - sizeof(IndexTupleData), FALSE);
ev0p = &((GISTENTRY *) VARDATA(evec))[0];
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(evec))[1],
- (char*) addtup + sizeof(IndexTupleData), (Relation) NULL,
- (Page) NULL, (OffsetNumber) 0,
- IndexTupleSize((IndexTuple)addtup) - sizeof(IndexTupleData), FALSE);
+ (char *) addtup + sizeof(IndexTupleData), (Relation) NULL,
+ (Page) NULL, (OffsetNumber) 0,
+ IndexTupleSize((IndexTuple) addtup) - sizeof(IndexTupleData), FALSE);
ev1p = &((GISTENTRY *) VARDATA(evec))[1];
datum = (char *)
DatumGetPointer(FunctionCall2(&giststate->unionFn,
- PointerGetDatum(evec),
- PointerGetDatum(&datumsize)));
+ PointerGetDatum(evec),
+ PointerGetDatum(&datumsize)));
- if ( ! ( ev0p->pred && ev1p->pred ) ) {
- result = ( ev0p->pred == NULL && ev1p->pred == NULL );
- } else {
+ if (!(ev0p->pred && ev1p->pred))
+ result = (ev0p->pred == NULL && ev1p->pred == NULL);
+ else
+ {
FunctionCall3(&giststate->equalFn,
- PointerGetDatum(ev0p->pred),
- PointerGetDatum(datum),
- PointerGetDatum(&result));
+ PointerGetDatum(ev0p->pred),
+ PointerGetDatum(datum),
+ PointerGetDatum(&result));
}
- if ( result ) {
+ if (result)
+ {
/* not need to update key */
- pfree( datum );
- } else {
+ pfree(datum);
+ }
+ else
+ {
gistcentryinit(giststate, &centry, datum, ev0p->rel, ev0p->page,
- ev0p->offset, datumsize, FALSE);
+ ev0p->offset, datumsize, FALSE);
isnull = (centry.pred) ? ' ' : 'n';
- newtup = (IndexTuple) index_formtuple( r->rd_att, (Datum *) &centry.pred, &isnull );
- newtup->t_tid = oldtup->t_tid;
+ newtup = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &centry.pred, &isnull);
+ newtup->t_tid = oldtup->t_tid;
if (centry.pred != datum)
- pfree( datum );
+ pfree(datum);
}
- if ( ev0p->pred &&
- ev0p->pred != (char*) oldtup + sizeof(IndexTupleData) )
- pfree( ev0p->pred );
- if ( ev1p->pred &&
- ev1p->pred != (char*) addtup + sizeof(IndexTupleData) )
- pfree( ev1p->pred );
- pfree( evec );
+ if (ev0p->pred &&
+ ev0p->pred != (char *) oldtup + sizeof(IndexTupleData))
+ pfree(ev0p->pred);
+ if (ev1p->pred &&
+ ev1p->pred != (char *) addtup + sizeof(IndexTupleData))
+ pfree(ev1p->pred);
+ pfree(evec);
- return newtup;
+ return newtup;
}
-
+
/*
* gistSplit -- split a page in the tree.
*/
@@ -728,19 +768,27 @@ gistSplit(Relation r,
InsertIndexResult *res)
{
Page p;
- Buffer leftbuf, rightbuf;
- Page left, right;
- OffsetNumber *spl_left, *spl_right;
- IndexTuple *lvectup, *rvectup, *newtup;
- int leftoff, rightoff;
- BlockNumber lbknum, rbknum;
+ Buffer leftbuf,
+ rightbuf;
+ Page left,
+ right;
+ OffsetNumber *spl_left,
+ *spl_right;
+ IndexTuple *lvectup,
+ *rvectup,
+ *newtup;
+ int leftoff,
+ rightoff;
+ BlockNumber lbknum,
+ rbknum;
GISTPageOpaque opaque;
- char isnull;
+ char isnull;
GIST_SPLITVEC v;
bytea *entryvec;
bool *decompvec;
GISTENTRY tmpentry;
- int i, nlen;
+ int i,
+ nlen;
p = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(p);
@@ -773,17 +821,17 @@ gistSplit(Relation r,
right = (Page) BufferGetPage(rightbuf);
/* generate the item array */
- entryvec = (bytea *) palloc(VARHDRSZ + (*len+1) * sizeof(GISTENTRY));
- decompvec = (bool *) palloc(VARHDRSZ + (*len+1) * sizeof(bool));
- VARATT_SIZEP(entryvec) = (*len+1) * sizeof(GISTENTRY) + VARHDRSZ;
+ entryvec = (bytea *) palloc(VARHDRSZ + (*len + 1) * sizeof(GISTENTRY));
+ decompvec = (bool *) palloc(VARHDRSZ + (*len + 1) * sizeof(bool));
+ VARATT_SIZEP(entryvec) = (*len + 1) * sizeof(GISTENTRY) + VARHDRSZ;
for (i = 1; i <= *len; i++)
{
gistdentryinit(giststate, &((GISTENTRY *) VARDATA(entryvec))[i],
- (((char *) itup[i-1]) + sizeof(IndexTupleData)),
+ (((char *) itup[i - 1]) + sizeof(IndexTupleData)),
r, p, i,
- IndexTupleSize(itup[i-1]) - sizeof(IndexTupleData), FALSE);
+ IndexTupleSize(itup[i - 1]) - sizeof(IndexTupleData), FALSE);
if ((char *) (((GISTENTRY *) VARDATA(entryvec))[i].pred)
- == (((char *) itup[i-1]) + sizeof(IndexTupleData)))
+ == (((char *) itup[i - 1]) + sizeof(IndexTupleData)))
decompvec[i] = FALSE;
else
decompvec[i] = TRUE;
@@ -791,8 +839,8 @@ gistSplit(Relation r,
/* now let the user-defined picksplit function set up the split vector */
FunctionCall2(&giststate->picksplitFn,
- PointerGetDatum(entryvec),
- PointerGetDatum(&v));
+ PointerGetDatum(entryvec),
+ PointerGetDatum(&v));
/* clean up the entry vector: its preds need to be deleted, too */
for (i = 1; i <= *len; i++)
@@ -801,35 +849,43 @@ gistSplit(Relation r,
pfree(entryvec);
pfree(decompvec);
- spl_left = v.spl_left; spl_right = v.spl_right;
-
+ spl_left = v.spl_left;
+ spl_right = v.spl_right;
+
/* form left and right vector */
- lvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nleft );
- rvectup = (IndexTuple*) palloc( sizeof( IndexTuple )*v.spl_nright );
+ lvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nleft);
+ rvectup = (IndexTuple *) palloc(sizeof(IndexTuple) * v.spl_nright);
leftoff = rightoff = 0;
- for( i=1; i <= *len; i++ ) {
- if (i == *(spl_left) || ( i==*len && *(spl_left) != FirstOffsetNumber ) ) {
- lvectup[ leftoff++ ] = itup[ i-1 ];
+ for (i = 1; i <= *len; i++)
+ {
+ if (i == *(spl_left) || (i == *len && *(spl_left) != FirstOffsetNumber))
+ {
+ lvectup[leftoff++] = itup[i - 1];
spl_left++;
- } else {
- rvectup[ rightoff++ ] = itup[ i-1 ];
+ }
+ else
+ {
+ rvectup[rightoff++] = itup[i - 1];
spl_right++;
}
}
/* write on disk (may be need another split) */
- if ( gistnospace(right, rvectup, v.spl_nright) ) {
+ if (gistnospace(right, rvectup, v.spl_nright))
+ {
nlen = v.spl_nright;
- newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
- ( res && rvectup[ nlen-1 ] == itup[ *len - 1 ] ) ? res : NULL );
- ReleaseBuffer( rightbuf );
- } else {
+ newtup = gistSplit(r, rightbuf, rvectup, &nlen, giststate,
+ (res && rvectup[nlen - 1] == itup[*len - 1]) ? res : NULL);
+ ReleaseBuffer(rightbuf);
+ }
+ else
+ {
OffsetNumber l;
-
- l = gistwritebuffer( r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate );
+
+ l = gistwritebuffer(r, right, rvectup, v.spl_nright, FirstOffsetNumber, giststate);
WriteBuffer(rightbuf);
- if ( res )
+ if (res)
ItemPointerSet(&((*res)->pointerData), rbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_rdatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
@@ -839,32 +895,35 @@ gistSplit(Relation r,
v.spl_rdatum = tmpentry.pred;
nlen = 1;
- newtup = (IndexTuple*) palloc( sizeof(IndexTuple) * 1);
- isnull = ( v.spl_rdatum ) ? ' ' : 'n';
+ newtup = (IndexTuple *) palloc(sizeof(IndexTuple) * 1);
+ isnull = (v.spl_rdatum) ? ' ' : 'n';
newtup[0] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_rdatum), &isnull);
ItemPointerSet(&(newtup[0]->t_tid), rbknum, 1);
}
- if ( gistnospace(left, lvectup, v.spl_nleft) ) {
- int llen = v.spl_nleft;
+ if (gistnospace(left, lvectup, v.spl_nleft))
+ {
+ int llen = v.spl_nleft;
IndexTuple *lntup;
- lntup = gistSplit(r, leftbuf, lvectup, &llen, giststate,
- ( res && lvectup[ llen-1 ] == itup[ *len - 1 ] ) ? res : NULL );
- ReleaseBuffer( leftbuf );
+ lntup = gistSplit(r, leftbuf, lvectup, &llen, giststate,
+ (res && lvectup[llen - 1] == itup[*len - 1]) ? res : NULL);
+ ReleaseBuffer(leftbuf);
- newtup = gistjoinvector( newtup, &nlen, lntup, llen );
- pfree( lntup );
- } else {
+ newtup = gistjoinvector(newtup, &nlen, lntup, llen);
+ pfree(lntup);
+ }
+ else
+ {
OffsetNumber l;
-
- l = gistwritebuffer( r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate );
- if ( BufferGetBlockNumber(buffer) != GISTP_ROOT)
+
+ l = gistwritebuffer(r, left, lvectup, v.spl_nleft, FirstOffsetNumber, giststate);
+ if (BufferGetBlockNumber(buffer) != GISTP_ROOT)
PageRestoreTempPage(left, p);
WriteBuffer(leftbuf);
- if ( res )
+ if (res)
ItemPointerSet(&((*res)->pointerData), lbknum, l);
gistcentryinit(giststate, &tmpentry, v.spl_ldatum, (Relation) NULL,
(Page) NULL, (OffsetNumber) 0,
@@ -874,10 +933,10 @@ gistSplit(Relation r,
v.spl_ldatum = tmpentry.pred;
nlen += 1;
- newtup = (IndexTuple*) repalloc( (void*)newtup, sizeof(IndexTuple) * nlen);
- isnull = ( v.spl_ldatum ) ? ' ' : 'n';
- newtup[nlen-1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull);
- ItemPointerSet(&(newtup[nlen-1]->t_tid), lbknum, 1);
+ newtup = (IndexTuple *) repalloc((void *) newtup, sizeof(IndexTuple) * nlen);
+ isnull = (v.spl_ldatum) ? ' ' : 'n';
+ newtup[nlen - 1] = (IndexTuple) index_formtuple(r->rd_att, (Datum *) &(v.spl_ldatum), &isnull);
+ ItemPointerSet(&(newtup[nlen - 1]->t_tid), lbknum, 1);
}
@@ -885,10 +944,10 @@ gistSplit(Relation r,
gistadjscans(r, GISTOP_SPLIT, BufferGetBlockNumber(buffer), FirstOffsetNumber);
/* !!! pfree */
- pfree( rvectup );
- pfree( lvectup );
- pfree( v.spl_left );
- pfree( v.spl_right );
+ pfree(rvectup);
+ pfree(lvectup);
+ pfree(v.spl_left);
+ pfree(v.spl_right);
*len = nlen;
return newtup;
@@ -903,8 +962,8 @@ gistnewroot(GISTSTATE *giststate, Relation r, IndexTuple *itup, int len)
b = ReadBuffer(r, GISTP_ROOT);
GISTInitBuffer(b, 0);
p = BufferGetPage(b);
-
- gistwritebuffer( r, p, itup, len, FirstOffsetNumber, giststate );
+
+ gistwritebuffer(r, p, itup, len, FirstOffsetNumber, giststate);
WriteBuffer(b);
}
@@ -1000,8 +1059,8 @@ gistfreestack(GISTSTACK *s)
Datum
gistdelete(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
BlockNumber blkno;
OffsetNumber offnum;
Buffer buf;
@@ -1101,7 +1160,7 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
char *datum = (((char *) t) + sizeof(IndexTupleData));
/* if new entry fits in index tuple, copy it in */
- if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0 )
+ if ((Size) entry.bytes < IndexTupleSize(t) - sizeof(IndexTupleData) || (Size) entry.bytes == 0)
{
memcpy(datum, entry.pred, entry.bytes);
/* clear out old size */
@@ -1116,9 +1175,9 @@ gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t)
/* generate a new index tuple for the compressed entry */
TupleDesc tupDesc = r->rd_att;
IndexTuple newtup;
- char isnull;
+ char isnull;
- isnull = ( entry.pred ) ? ' ' : 'n';
+ isnull = (entry.pred) ? ' ' : 'n';
newtup = (IndexTuple) index_formtuple(tupDesc,
(Datum *) &(entry.pred),
&isnull);
@@ -1181,38 +1240,40 @@ gist_dumptree(Relation r, int level, BlockNumber blk, OffsetNumber coff)
Page page;
GISTPageOpaque opaque;
IndexTuple which;
- ItemId iid;
- OffsetNumber i,maxoff;
- BlockNumber cblk;
- char *pred;
+ ItemId iid;
+ OffsetNumber i,
+ maxoff;
+ BlockNumber cblk;
+ char *pred;
- pred = (char*) palloc( sizeof(char)*level+1 );
+ pred = (char *) palloc(sizeof(char) * level + 1);
MemSet(pred, '\t', level);
- pred[level]='\0';
+ pred[level] = '\0';
buffer = ReadBuffer(r, blk);
page = (Page) BufferGetPage(buffer);
opaque = (GISTPageOpaque) PageGetSpecialPointer(page);
-
- maxoff = PageGetMaxOffsetNumber( page );
-
- elog(NOTICE,"%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, ( opaque->flags & F_LEAF ) ? "LEAF" : "INTE", (int)blk, (int)maxoff, PageGetFreeSpace(page));
-
- for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) {
+
+ maxoff = PageGetMaxOffsetNumber(page);
+
+ elog(NOTICE, "%sPage: %d %s blk: %d maxoff: %d free: %d", pred, coff, (opaque->flags & F_LEAF) ? "LEAF" : "INTE", (int) blk, (int) maxoff, PageGetFreeSpace(page));
+
+ for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
+ {
iid = PageGetItemId(page, i);
which = (IndexTuple) PageGetItem(page, iid);
cblk = ItemPointerGetBlockNumber(&(which->t_tid));
-#ifdef PRINTTUPLE
- elog(NOTICE,"%s Tuple. blk: %d size: %d", pred, (int)cblk, IndexTupleSize( which ) );
-#endif
-
- if ( ! ( opaque->flags & F_LEAF ) ) {
- gist_dumptree( r, level+1, cblk, i );
- }
+#ifdef PRINTTUPLE
+ elog(NOTICE, "%s Tuple. blk: %d size: %d", pred, (int) cblk, IndexTupleSize(which));
+#endif
+
+ if (!(opaque->flags & F_LEAF))
+ gist_dumptree(r, level + 1, cblk, i);
}
ReleaseBuffer(buffer);
pfree(pred);
}
+
#endif /* defined GISTDEBUG */
void
@@ -1220,15 +1281,14 @@ gist_redo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "gist_redo: unimplemented");
}
-
+
void
gist_undo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "gist_undo: unimplemented");
}
-
+
void
-gist_desc(char *buf, uint8 xl_info, char* rec)
+gist_desc(char *buf, uint8 xl_info, char *rec)
{
}
-
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index f7b49430d07..8f3b5dd475c 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -32,8 +32,8 @@ static bool gistindex_keytest(IndexTuple tuple, TupleDesc tupdesc,
Datum
gistgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/* if we have it cached in the scan desc, just return the value */
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index d37a8c07763..ba21fee3c33 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -72,9 +72,9 @@ gistbeginscan(PG_FUNCTION_ARGS)
Datum
gistrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- bool fromEnd = PG_GETARG_BOOL(1);
- ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ bool fromEnd = PG_GETARG_BOOL(1);
+ ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
GISTScanOpaque p;
int i;
@@ -160,7 +160,7 @@ gistrescan(PG_FUNCTION_ARGS)
Datum
gistmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
GISTScanOpaque p;
GISTSTACK *o,
*n,
@@ -196,7 +196,7 @@ gistmarkpos(PG_FUNCTION_ARGS)
Datum
gistrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
GISTScanOpaque p;
GISTSTACK *o,
*n,
@@ -232,8 +232,8 @@ gistrestrpos(PG_FUNCTION_ARGS)
Datum
gistendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- GISTScanOpaque p;
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ GISTScanOpaque p;
p = (GISTScanOpaque) s->opaque;
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 44a8b225e8f..aa76ba232a0 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.49 2001/02/22 21:48:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.50 2001/03/22 03:59:12 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -41,12 +41,14 @@ bool BuildingHash = false;
Datum
hashbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -59,9 +61,11 @@ hashbuild(PG_FUNCTION_ARGS)
nitups;
HashItem hitem;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@@ -117,6 +121,7 @@ hashbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -191,9 +196,7 @@ hashbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -241,12 +244,14 @@ hashbuild(PG_FUNCTION_ARGS)
Datum
hashinsert(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+
#endif
InsertIndexResult res;
HashItem hitem;
@@ -276,8 +281,8 @@ hashinsert(PG_FUNCTION_ARGS)
Datum
hashgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/*
@@ -326,11 +331,13 @@ hashbeginscan(PG_FUNCTION_ARGS)
Datum
hashrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
- bool fromEnd = PG_GETARG_BOOL(1);
+ bool fromEnd = PG_GETARG_BOOL(1);
+
#endif
- ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
+ ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
HashScanOpaque so;
@@ -367,7 +374,7 @@ hashrescan(PG_FUNCTION_ARGS)
Datum
hashendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
HashScanOpaque so;
@@ -405,7 +412,7 @@ hashendscan(PG_FUNCTION_ARGS)
Datum
hashmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
HashScanOpaque so;
@@ -437,7 +444,7 @@ hashmarkpos(PG_FUNCTION_ARGS)
Datum
hashrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
HashScanOpaque so;
@@ -468,8 +475,8 @@ hashrestrpos(PG_FUNCTION_ARGS)
Datum
hashdelete(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
/* adjust any active scans that will be affected by this deletion */
_hash_adjscans(rel, tid);
@@ -491,8 +498,8 @@ hash_undo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "hash_undo: unimplemented");
}
-
+
void
-hash_desc(char *buf, uint8 xl_info, char* rec)
+hash_desc(char *buf, uint8 xl_info, char *rec)
{
}
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 30defc1a57b..4cb157c702c 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.29 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.30 2001/03/22 03:59:13 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -25,32 +25,32 @@
Datum
hashchar(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_CHAR(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_CHAR(0)));
}
Datum
hashint2(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT16(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_INT16(0)));
}
Datum
hashint4(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ PG_GETARG_UINT32(0));
+ PG_RETURN_UINT32(~PG_GETARG_UINT32(0));
}
Datum
hashint8(PG_FUNCTION_ARGS)
{
/* we just use the low 32 bits... */
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_INT64(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_INT64(0)));
}
Datum
hashoid(PG_FUNCTION_ARGS)
{
- PG_RETURN_UINT32(~ ((uint32) PG_GETARG_OID(0)));
+ PG_RETURN_UINT32(~((uint32) PG_GETARG_OID(0)));
}
Datum
@@ -93,7 +93,7 @@ hashint2vector(PG_FUNCTION_ARGS)
Datum
hashname(PG_FUNCTION_ARGS)
{
- char *key = NameStr(* PG_GETARG_NAME(0));
+ char *key = NameStr(*PG_GETARG_NAME(0));
return hash_any((char *) key, NAMEDATALEN);
}
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 9748daa194d..b55717744c1 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,14 +8,14 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.110 2001/01/24 19:42:47 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.111 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
* heapgettup - fetch next heap tuple from a scan
* heap_open - open a heap relation by relationId
* heap_openr - open a heap relation by name
- * heap_open[r]_nofail - same, but return NULL on failure instead of elog
+ * heap_open[r]_nofail - same, but return NULL on failure instead of elog
* heap_close - close a heap relation
* heap_beginscan - begin relation scan
* heap_rescan - restart a relation scan
@@ -88,16 +88,16 @@
#include "access/xlogutils.h"
-XLogRecPtr log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
- Buffer newbuf, HeapTuple newtup);
-XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
- char *unused, int unlen);
+XLogRecPtr log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
+ Buffer newbuf, HeapTuple newtup);
+XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
+ char *unused, int unlen);
/* comments are in heap_update */
-static xl_heaptid _locked_tuple_;
+static xl_heaptid _locked_tuple_;
static void _heap_unlock_tuple(void *data);
-static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
- ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
+static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf,
+ ItemPointerData from, Buffer newbuf, HeapTuple newtup, bool move);
/* ----------------------------------------------------------------
@@ -249,7 +249,7 @@ heapgettup(Relation relation,
OffsetNumber lineoff;
int linesleft;
ItemPointer tid = (tuple->t_data == NULL) ?
- (ItemPointer) NULL : &(tuple->t_self);
+ (ItemPointer) NULL : &(tuple->t_self);
/* ----------------
* increment access statistics
@@ -286,7 +286,7 @@ heapgettup(Relation relation,
if (!ItemPointerIsValid(tid))
Assert(!PointerIsValid(tid));
-
+
tuple->t_tableOid = relation->rd_id;
/* ----------------
@@ -538,9 +538,9 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
(
(tupleDesc)->attrs[(attnum) - 1]->attcacheoff >= 0 ?
(
- fetchatt((tupleDesc)->attrs[(attnum) - 1],
- (char *) (tup)->t_data + (tup)->t_data->t_hoff +
- (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
+ fetchatt((tupleDesc)->attrs[(attnum) - 1],
+ (char *) (tup)->t_data + (tup)->t_data->t_hoff +
+ (tupleDesc)->attrs[(attnum) - 1]->attcacheoff)
)
:
nocachegetattr((tup), (attnum), (tupleDesc), (isnull))
@@ -564,7 +564,8 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
)
);
}
-#endif /* defined(DISABLE_COMPLEX_MACRO)*/
+
+#endif /* defined(DISABLE_COMPLEX_MACRO) */
/* ----------------------------------------------------------------
@@ -791,8 +792,8 @@ heap_beginscan(Relation relation,
scan->rs_nkeys = (short) nkeys;
/*
- * we do this here instead of in initscan() because heap_rescan
- * also calls initscan() and we don't want to allocate memory again
+ * we do this here instead of in initscan() because heap_rescan also
+ * calls initscan() and we don't want to allocate memory again
*/
if (nkeys)
scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
@@ -1316,7 +1317,7 @@ heap_get_latest_tid(Relation relation,
Oid
heap_insert(Relation relation, HeapTuple tup)
{
- Buffer buffer;
+ Buffer buffer;
/* increment access statistics */
IncrHeapAccessStat(local_insert);
@@ -1350,7 +1351,7 @@ heap_insert(Relation relation, HeapTuple tup)
* toasted attributes from some other relation, invoke the toaster.
* ----------
*/
- if (HeapTupleHasExtended(tup) ||
+ if (HeapTupleHasExtended(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
heap_tuple_toast_attrs(relation, tup, NULL);
#endif
@@ -1364,17 +1365,17 @@ heap_insert(Relation relation, HeapTuple tup)
/* XLOG stuff */
{
- xl_heap_insert xlrec;
- xl_heap_header xlhdr;
- XLogRecPtr recptr;
- XLogRecData rdata[3];
- Page page = BufferGetPage(buffer);
- uint8 info = XLOG_HEAP_INSERT;
+ xl_heap_insert xlrec;
+ xl_heap_header xlhdr;
+ XLogRecPtr recptr;
+ XLogRecData rdata[3];
+ Page page = BufferGetPage(buffer);
+ uint8 info = XLOG_HEAP_INSERT;
xlrec.target.node = relation->rd_node;
xlrec.target.tid = tup->t_self;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapInsert;
rdata[0].next = &(rdata[1]);
@@ -1383,12 +1384,12 @@ heap_insert(Relation relation, HeapTuple tup)
xlhdr.t_hoff = tup->t_data->t_hoff;
xlhdr.mask = tup->t_data->t_infomask;
rdata[1].buffer = buffer;
- rdata[1].data = (char*)&xlhdr;
+ rdata[1].data = (char *) &xlhdr;
rdata[1].len = SizeOfHeapHeader;
rdata[1].next = &(rdata[2]);
rdata[2].buffer = buffer;
- rdata[2].data = (char*) tup->t_data + offsetof(HeapTupleHeaderData, t_bits);
+ rdata[2].data = (char *) tup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[2].len = tup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[2].next = NULL;
@@ -1411,10 +1412,10 @@ heap_insert(Relation relation, HeapTuple tup)
WriteBuffer(buffer);
/*
- * If tuple is cachable, mark it for rollback from the caches
- * in case we abort. Note it is OK to do this after WriteBuffer
- * releases the buffer, because the "tup" data structure is all
- * in local memory, not in the shared buffer.
+ * If tuple is cachable, mark it for rollback from the caches in case
+ * we abort. Note it is OK to do this after WriteBuffer releases the
+ * buffer, because the "tup" data structure is all in local memory,
+ * not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, tup);
@@ -1513,14 +1514,14 @@ l1:
HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
/* XLOG stuff */
{
- xl_heap_delete xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
+ xl_heap_delete xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
xlrec.target.node = relation->rd_node;
xlrec.target.tid = tp.t_self;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapDelete;
rdata[0].next = &(rdata[1]);
@@ -1551,9 +1552,10 @@ l1:
#endif
/*
- * Mark tuple for invalidation from system caches at next command boundary.
- * We have to do this before WriteBuffer because we need to look at the
- * contents of the tuple, so we need to hold our refcount on the buffer.
+ * Mark tuple for invalidation from system caches at next command
+ * boundary. We have to do this before WriteBuffer because we need to
+ * look at the contents of the tuple, so we need to hold our refcount
+ * on the buffer.
*/
RelationInvalidateHeapTuple(relation, &tp);
@@ -1567,7 +1569,7 @@ l1:
*
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via elog().
*/
void
@@ -1636,6 +1638,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
oldtup.t_data = (HeapTupleHeader) PageGetItem(dp, lp);
oldtup.t_len = ItemIdGetLength(lp);
oldtup.t_self = *otid;
+
/*
* Note: beyond this point, use oldtup not otid to refer to old tuple.
* otid may very well point at newtup->t_self, which we will overwrite
@@ -1701,23 +1704,24 @@ l2:
/*
* If the toaster needs to be activated, OR if the new tuple will not
- * fit on the same page as the old, then we need to release the context
- * lock (but not the pin!) on the old tuple's buffer while we are off
- * doing TOAST and/or table-file-extension work. We must mark the old
- * tuple to show that it's already being updated, else other processes
- * may try to update it themselves. To avoid second XLOG log record,
- * we use xact mgr hook to unlock old tuple without reading log if xact
- * will abort before update is logged. In the event of crash prio logging,
- * TQUAL routines will see HEAP_XMAX_UNLOGGED flag...
+ * fit on the same page as the old, then we need to release the
+ * context lock (but not the pin!) on the old tuple's buffer while we
+ * are off doing TOAST and/or table-file-extension work. We must mark
+ * the old tuple to show that it's already being updated, else other
+ * processes may try to update it themselves. To avoid second XLOG log
+ * record, we use xact mgr hook to unlock old tuple without reading
+ * log if xact will abort before update is logged. In the event of
+ * crash prio logging, TQUAL routines will see HEAP_XMAX_UNLOGGED
+ * flag...
*
- * NOTE: this trick is useless currently but saved for future
- * when we'll implement UNDO and will re-use transaction IDs
- * after postmaster startup.
+ * NOTE: this trick is useless currently but saved for future when we'll
+ * implement UNDO and will re-use transaction IDs after postmaster
+ * startup.
*
* We need to invoke the toaster if there are already any toasted values
* present, or if the new tuple is over-threshold.
*/
- need_toast = (HeapTupleHasExtended(&oldtup) ||
+ need_toast = (HeapTupleHasExtended(&oldtup) ||
HeapTupleHasExtended(newtup) ||
(MAXALIGN(newtup->t_len) > TOAST_TUPLE_THRESHOLD));
@@ -1726,7 +1730,7 @@ l2:
{
_locked_tuple_.node = relation->rd_node;
_locked_tuple_.tid = oldtup.t_self;
- XactPushRollback(_heap_unlock_tuple, (void*) &_locked_tuple_);
+ XactPushRollback(_heap_unlock_tuple, (void *) &_locked_tuple_);
TransactionIdStore(GetCurrentTransactionId(),
&(oldtup.t_data->t_xmax));
@@ -1762,7 +1766,7 @@ l2:
/* NO ELOG(ERROR) from here till changes are logged */
START_CRIT_SECTION();
- RelationPutHeapTuple(relation, newbuf, newtup); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, newtup); /* insert new tuple */
if (already_marked)
{
@@ -1784,7 +1788,7 @@ l2:
/* XLOG stuff */
{
- XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
+ XLogRecPtr recptr = log_heap_update(relation, buffer, oldtup.t_self,
newbuf, newtup, false);
if (newbuf != buffer)
@@ -1814,10 +1818,10 @@ l2:
WriteBuffer(buffer);
/*
- * If new tuple is cachable, mark it for rollback from the caches
- * in case we abort. Note it is OK to do this after WriteBuffer
- * releases the buffer, because the "newtup" data structure is all
- * in local memory, not in the shared buffer.
+ * If new tuple is cachable, mark it for rollback from the caches in
+ * case we abort. Note it is OK to do this after WriteBuffer releases
+ * the buffer, because the "newtup" data structure is all in local
+ * memory, not in the shared buffer.
*/
RelationMark4RollbackHeapTuple(relation, newtup);
@@ -1829,7 +1833,7 @@ l2:
*
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
- * on the relation associated with the tuple). Any failure is reported
+ * on the relation associated with the tuple). Any failure is reported
* via elog().
*/
void
@@ -2129,14 +2133,14 @@ heap_restrpos(HeapScanDesc scan)
XLogRecPtr
log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
{
- xl_heap_clean xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[3];
+ xl_heap_clean xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[3];
xlrec.node = reln->rd_node;
xlrec.block = BufferGetBlockNumber(buffer);
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapClean;
rdata[0].next = &(rdata[1]);
@@ -2157,27 +2161,27 @@ log_heap_clean(Relation reln, Buffer buffer, char *unused, int unlen)
recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CLEAN, rdata);
- return(recptr);
+ return (recptr);
}
static XLogRecPtr
-log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
+log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
Buffer newbuf, HeapTuple newtup, bool move)
{
- char tbuf[MAXALIGN(sizeof(xl_heap_header)) + 2 * sizeof(TransactionId)];
- xl_heap_update xlrec;
- xl_heap_header *xlhdr = (xl_heap_header*) tbuf;
- int hsize = SizeOfHeapHeader;
- XLogRecPtr recptr;
- XLogRecData rdata[4];
- Page page = BufferGetPage(newbuf);
- uint8 info = (move) ? XLOG_HEAP_MOVE : XLOG_HEAP_UPDATE;
+ char tbuf[MAXALIGN(sizeof(xl_heap_header)) + 2 * sizeof(TransactionId)];
+ xl_heap_update xlrec;
+ xl_heap_header *xlhdr = (xl_heap_header *) tbuf;
+ int hsize = SizeOfHeapHeader;
+ XLogRecPtr recptr;
+ XLogRecData rdata[4];
+ Page page = BufferGetPage(newbuf);
+ uint8 info = (move) ? XLOG_HEAP_MOVE : XLOG_HEAP_UPDATE;
xlrec.target.node = reln->rd_node;
xlrec.target.tid = from;
xlrec.newtid = newtup->t_self;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfHeapUpdate;
rdata[0].next = &(rdata[1]);
@@ -2190,9 +2194,9 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
xlhdr->t_natts = newtup->t_data->t_natts;
xlhdr->t_hoff = newtup->t_data->t_hoff;
xlhdr->mask = newtup->t_data->t_infomask;
- if (move) /* remember xmin & xmax */
+ if (move) /* remember xmin & xmax */
{
- TransactionId xmax;
+ TransactionId xmax;
if (newtup->t_data->t_infomask & HEAP_XMAX_INVALID ||
newtup->t_data->t_infomask & HEAP_MARKED_FOR_UPDATE)
@@ -2200,17 +2204,17 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
else
xmax = newtup->t_data->t_xmax;
memcpy(tbuf + hsize, &xmax, sizeof(TransactionId));
- memcpy(tbuf + hsize + sizeof(TransactionId),
- &(newtup->t_data->t_xmin), sizeof(TransactionId));
+ memcpy(tbuf + hsize + sizeof(TransactionId),
+ &(newtup->t_data->t_xmin), sizeof(TransactionId));
hsize += (2 * sizeof(TransactionId));
}
rdata[2].buffer = newbuf;
- rdata[2].data = (char*)xlhdr;
+ rdata[2].data = (char *) xlhdr;
rdata[2].len = hsize;
rdata[2].next = &(rdata[3]);
rdata[3].buffer = newbuf;
- rdata[3].data = (char*) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
+ rdata[3].data = (char *) newtup->t_data + offsetof(HeapTupleHeaderData, t_bits);
rdata[3].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits);
rdata[3].next = NULL;
@@ -2224,23 +2228,23 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
recptr = XLogInsert(RM_HEAP_ID, info, rdata);
- return(recptr);
+ return (recptr);
}
XLogRecPtr
-log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
- Buffer newbuf, HeapTuple newtup)
+log_heap_move(Relation reln, Buffer oldbuf, ItemPointerData from,
+ Buffer newbuf, HeapTuple newtup)
{
- return(log_heap_update(reln, oldbuf, from, newbuf, newtup, true));
+ return (log_heap_update(reln, oldbuf, from, newbuf, newtup, true));
}
static void
heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_heap_clean *xlrec = (xl_heap_clean*) XLogRecGetData(record);
- Relation reln;
- Buffer buffer;
- Page page;
+ xl_heap_clean *xlrec = (xl_heap_clean *) XLogRecGetData(record);
+ Relation reln;
+ Buffer buffer;
+ Page page;
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
return;
@@ -2266,15 +2270,15 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfHeapClean)
{
- char unbuf[BLCKSZ];
- OffsetNumber *unused = (OffsetNumber*)unbuf;
- char *unend;
- ItemId lp;
+ char unbuf[BLCKSZ];
+ OffsetNumber *unused = (OffsetNumber *) unbuf;
+ char *unend;
+ ItemId lp;
- memcpy(unbuf, (char*)xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean);
+ memcpy(unbuf, (char *) xlrec + SizeOfHeapClean, record->xl_len - SizeOfHeapClean);
unend = unbuf + (record->xl_len - SizeOfHeapClean);
- while((char*)unused < unend)
+ while ((char *) unused < unend)
{
lp = ((PageHeader) page)->pd_linp + *unused;
lp->lp_flags &= ~LP_USED;
@@ -2289,13 +2293,13 @@ heap_xlog_clean(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_heap_delete *xlrec = (xl_heap_delete*) XLogRecGetData(record);
- Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- ItemId lp = NULL;
- HeapTupleHeader htup;
+ xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
+ Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
@@ -2303,7 +2307,7 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer(false, reln,
+ buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "heap_delete_%sdo: no block", (redo) ? "re" : "un");
@@ -2320,7 +2324,8 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
return;
}
}
- else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_delete_undo: bad page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@@ -2337,7 +2342,7 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
htup->t_xmax = record->xl_xid;
htup->t_cmax = FirstCommandId;
htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
- HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+ HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
@@ -2350,12 +2355,12 @@ heap_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_heap_insert *xlrec = (xl_heap_insert*) XLogRecGetData(record);
- Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- HeapTupleHeader htup;
+ xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
+ Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ HeapTupleHeader htup;
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
@@ -2363,7 +2368,7 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer((redo) ? true : false, reln,
+ buffer = XLogReadBuffer((redo) ? true : false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
return;
@@ -2375,9 +2380,9 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (redo)
{
- char tbuf[MaxTupleSize];
- xl_heap_header xlhdr;
- uint32 newlen;
+ char tbuf[MaxTupleSize];
+ xl_heap_header xlhdr;
+ uint32 newlen;
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
@@ -2396,9 +2401,9 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_insert_redo: invalid max offset number");
newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
- memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapInsert, SizeOfHeapHeader);
- memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
- (char*)xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen);
+ memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapInsert, SizeOfHeapHeader);
+ memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
+ (char *) xlrec + SizeOfHeapInsert + SizeOfHeapHeader, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid;
@@ -2408,19 +2413,20 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
htup->t_cmin = FirstCommandId;
htup->t_xmax = htup->t_cmax = 0;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
-
- offnum = PageAddItem(page, (Item)htup, newlen, offnum,
- LP_USED | OverwritePageMode);
+
+ offnum = PageAddItem(page, (Item) htup, newlen, offnum,
+ LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_insert_redo: failed to add tuple");
PageSetLSN(page, lsn);
- PageSetSUI(page, ThisStartUpID); /* prev sui */
+ PageSetSUI(page, ThisStartUpID); /* prev sui */
UnlockAndWriteBuffer(buffer);
return;
}
/* undo insert */
- if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_insert_undo: bad page LSN");
elog(STOP, "heap_insert_undo: unimplemented");
@@ -2432,16 +2438,16 @@ heap_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
{
- xl_heap_update *xlrec = (xl_heap_update*) XLogRecGetData(record);
- Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
- Buffer buffer;
- bool samepage =
- (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
- Page page;
- OffsetNumber offnum;
- ItemId lp = NULL;
- HeapTupleHeader htup;
+ xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
+ Relation reln = XLogOpenRelation(redo, RM_HEAP_ID, xlrec->target.node);
+ Buffer buffer;
+ bool samepage =
+ (ItemPointerGetBlockNumber(&(xlrec->newtid)) ==
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp = NULL;
+ HeapTupleHeader htup;
if (!RelationIsValid(reln))
return;
@@ -2451,7 +2457,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
/* Deal with old tuple version */
- buffer = XLogReadBuffer(false, reln,
+ buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "heap_update_%sdo: no block", (redo) ? "re" : "un");
@@ -2470,7 +2476,8 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
goto newt;
}
}
- else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_update_undo: bad old tuple page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
@@ -2487,7 +2494,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
if (move)
{
TransactionIdStore(record->xl_xid, (TransactionId *) &(htup->t_cmin));
- htup->t_infomask &=
+ htup->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
htup->t_infomask |= HEAP_MOVED_OFF;
}
@@ -2496,7 +2503,7 @@ heap_xlog_update(bool redo, XLogRecPtr lsn, XLogRecord *record, bool move)
htup->t_xmax = record->xl_xid;
htup->t_cmax = FirstCommandId;
htup->t_infomask &= ~(HEAP_XMAX_COMMITTED |
- HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
+ HEAP_XMAX_INVALID | HEAP_MARKED_FOR_UPDATE);
}
if (samepage)
goto newsame;
@@ -2514,11 +2521,11 @@ newt:;
if (redo &&
((record->xl_info & XLR_BKP_BLOCK_2) ||
- ((record->xl_info & XLR_BKP_BLOCK_1) && samepage)))
+ ((record->xl_info & XLR_BKP_BLOCK_1) && samepage)))
return;
- buffer = XLogReadBuffer((redo) ? true : false, reln,
- ItemPointerGetBlockNumber(&(xlrec->newtid)));
+ buffer = XLogReadBuffer((redo) ? true : false, reln,
+ ItemPointerGetBlockNumber(&(xlrec->newtid)));
if (!BufferIsValid(buffer))
return;
@@ -2531,10 +2538,10 @@ newsame:;
if (redo)
{
- char tbuf[MaxTupleSize];
- xl_heap_header xlhdr;
- int hsize;
- uint32 newlen;
+ char tbuf[MaxTupleSize];
+ xl_heap_header xlhdr;
+ int hsize;
+ uint32 newlen;
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
@@ -2557,9 +2564,9 @@ newsame:;
hsize += (2 * sizeof(TransactionId));
newlen = record->xl_len - hsize;
- memcpy((char*)&xlhdr, (char*)xlrec + SizeOfHeapUpdate, SizeOfHeapHeader);
- memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
- (char*)xlrec + hsize, newlen);
+ memcpy((char *) &xlhdr, (char *) xlrec + SizeOfHeapUpdate, SizeOfHeapHeader);
+ memcpy(tbuf + offsetof(HeapTupleHeaderData, t_bits),
+ (char *) xlrec + hsize, newlen);
newlen += offsetof(HeapTupleHeaderData, t_bits);
htup = (HeapTupleHeader) tbuf;
htup->t_oid = xlhdr.t_oid;
@@ -2568,13 +2575,13 @@ newsame:;
if (move)
{
hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
- memcpy(&(htup->t_xmax), (char*)xlrec + hsize, sizeof(TransactionId));
- memcpy(&(htup->t_xmin),
- (char*)xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId));
+ memcpy(&(htup->t_xmax), (char *) xlrec + hsize, sizeof(TransactionId));
+ memcpy(&(htup->t_xmin),
+ (char *) xlrec + hsize + sizeof(TransactionId), sizeof(TransactionId));
TransactionIdStore(record->xl_xid, (TransactionId *) &(htup->t_cmin));
htup->t_infomask = xlhdr.mask;
- htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
- HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
+ htup->t_infomask &= ~(HEAP_XMIN_COMMITTED |
+ HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
htup->t_infomask |= HEAP_MOVED_IN;
}
else
@@ -2584,19 +2591,20 @@ newsame:;
htup->t_xmax = htup->t_cmax = 0;
htup->t_infomask = HEAP_XMAX_INVALID | xlhdr.mask;
}
-
- offnum = PageAddItem(page, (Item)htup, newlen, offnum,
- LP_USED | OverwritePageMode);
+
+ offnum = PageAddItem(page, (Item) htup, newlen, offnum,
+ LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
elog(STOP, "heap_update_redo: failed to add tuple");
PageSetLSN(page, lsn);
- PageSetSUI(page, ThisStartUpID); /* prev sui */
+ PageSetSUI(page, ThisStartUpID); /* prev sui */
UnlockAndWriteBuffer(buffer);
return;
}
/* undo */
- if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied ?! */
+ if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
+ * ?! */
elog(STOP, "heap_update_undo: bad new tuple page LSN");
elog(STOP, "heap_update_undo: unimplemented");
@@ -2606,19 +2614,19 @@ newsame:;
static void
_heap_unlock_tuple(void *data)
{
- xl_heaptid *xltid = (xl_heaptid*) data;
- Relation reln = XLogOpenRelation(false, RM_HEAP_ID, xltid->node);
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- ItemId lp;
- HeapTupleHeader htup;
+ xl_heaptid *xltid = (xl_heaptid *) data;
+ Relation reln = XLogOpenRelation(false, RM_HEAP_ID, xltid->node);
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId lp;
+ HeapTupleHeader htup;
if (!RelationIsValid(reln))
elog(STOP, "_heap_unlock_tuple: can't open relation");
- buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xltid->tid)));
+ buffer = XLogReadBuffer(false, reln,
+ ItemPointerGetBlockNumber(&(xltid->tid)));
if (!BufferIsValid(buffer))
elog(STOP, "_heap_unlock_tuple: can't read buffer");
@@ -2636,8 +2644,8 @@ _heap_unlock_tuple(void *data)
htup = (HeapTupleHeader) PageGetItem(page, lp);
- if (htup->t_xmax != GetCurrentTransactionId() ||
- htup->t_cmax != GetCurrentCommandId())
+ if (htup->t_xmax != GetCurrentTransactionId() ||
+ htup->t_cmax != GetCurrentCommandId())
elog(STOP, "_heap_unlock_tuple: invalid xmax/cmax in rollback");
htup->t_infomask &= ~HEAP_XMAX_UNLOGGED;
htup->t_infomask |= HEAP_XMAX_INVALID;
@@ -2645,9 +2653,10 @@ _heap_unlock_tuple(void *data)
return;
}
-void heap_redo(XLogRecPtr lsn, XLogRecord *record)
+void
+heap_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
@@ -2664,9 +2673,10 @@ void heap_redo(XLogRecPtr lsn, XLogRecord *record)
elog(STOP, "heap_redo: unknown op code %u", info);
}
-void heap_undo(XLogRecPtr lsn, XLogRecord *record)
+void
+heap_undo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
@@ -2687,46 +2697,50 @@ static void
out_target(char *buf, xl_heaptid *target)
{
sprintf(buf + strlen(buf), "node %u/%u; tid %u/%u",
- target->node.tblNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.tblNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
-
+
void
-heap_desc(char *buf, uint8 xl_info, char* rec)
+heap_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
info &= XLOG_HEAP_OPMASK;
if (info == XLOG_HEAP_INSERT)
{
- xl_heap_insert *xlrec = (xl_heap_insert*) rec;
+ xl_heap_insert *xlrec = (xl_heap_insert *) rec;
+
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_DELETE)
{
- xl_heap_delete *xlrec = (xl_heap_delete*) rec;
+ xl_heap_delete *xlrec = (xl_heap_delete *) rec;
+
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_HEAP_UPDATE || info == XLOG_HEAP_MOVE)
{
- xl_heap_update *xlrec = (xl_heap_update*) rec;
+ xl_heap_update *xlrec = (xl_heap_update *) rec;
+
if (info == XLOG_HEAP_UPDATE)
strcat(buf, "update: ");
else
strcat(buf, "move: ");
out_target(buf, &(xlrec->target));
sprintf(buf + strlen(buf), "; new %u/%u",
- ItemPointerGetBlockNumber(&(xlrec->newtid)),
- ItemPointerGetOffsetNumber(&(xlrec->newtid)));
+ ItemPointerGetBlockNumber(&(xlrec->newtid)),
+ ItemPointerGetOffsetNumber(&(xlrec->newtid)));
}
else if (info == XLOG_HEAP_CLEAN)
{
- xl_heap_clean *xlrec = (xl_heap_clean*) rec;
+ xl_heap_clean *xlrec = (xl_heap_clean *) rec;
+
sprintf(buf + strlen(buf), "clean: node %u/%u; blk %u",
- xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
+ xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
}
else
strcat(buf, "UNKNOWN");
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 64e7262e868..94dedbf87b9 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Id: hio.c,v 1.35 2001/01/24 19:42:48 momjian Exp $
+ * $Id: hio.c,v 1.36 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
#include "access/hio.h"
/*
- * RelationPutHeapTuple - place tuple at specified page
+ * RelationPutHeapTuple - place tuple at specified page
*
* !!! ELOG(ERROR) IS DISALLOWED HERE !!!
*
@@ -69,7 +69,7 @@ RelationPutHeapTuple(Relation relation,
*
* Returns (locked) buffer with free space >= given len.
*
- * Note that we use LockPage to lock relation for extension. We can
+ * Note that we use LockPage to lock relation for extension. We can
* do this as long as in all other places we use page-level locking
* for indices only. Alternatively, we could define pseudo-table as
* we do for transactions with XactLockTable.
@@ -92,7 +92,7 @@ RelationGetBufferForTuple(Relation relation, Size len)
*/
if (len > MaxTupleSize)
elog(ERROR, "Tuple is too big: size %lu, max size %ld",
- (unsigned long)len, MaxTupleSize);
+ (unsigned long) len, MaxTupleSize);
if (!relation->rd_myxactonly)
LockPage(relation, 0, ExclusiveLock);
@@ -140,13 +140,13 @@ RelationGetBufferForTuple(Relation relation, Size len)
{
/* We should not get here given the test at the top */
elog(STOP, "Tuple is too big: size %lu",
- (unsigned long)len);
+ (unsigned long) len);
}
}
if (!relation->rd_myxactonly)
UnlockPage(relation, 0, ExclusiveLock);
- return(buffer);
+ return (buffer);
}
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index a3cf6ae7116..d0e60681e77 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.17 2001/02/15 20:57:01 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.18 2001/03/22 03:59:13 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -41,12 +41,12 @@
#undef TOAST_DEBUG
-static void toast_delete(Relation rel, HeapTuple oldtup);
-static void toast_delete_datum(Relation rel, Datum value);
-static void toast_insert_or_update(Relation rel, HeapTuple newtup,
- HeapTuple oldtup);
-static Datum toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value);
-static varattrib *toast_fetch_datum(varattrib *attr);
+static void toast_delete(Relation rel, HeapTuple oldtup);
+static void toast_delete_datum(Relation rel, Datum value);
+static void toast_insert_or_update(Relation rel, HeapTuple newtup,
+ HeapTuple oldtup);
+static Datum toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value);
+static varattrib *toast_fetch_datum(varattrib *attr);
/* ----------
@@ -70,14 +70,14 @@ heap_tuple_toast_attrs(Relation rel, HeapTuple newtup, HeapTuple oldtup)
/* ----------
* heap_tuple_fetch_attr -
*
- * Public entry point to get back a toasted value
+ * Public entry point to get back a toasted value
* external storage (possibly still in compressed format).
* ----------
*/
-varattrib *
+varattrib *
heap_tuple_fetch_attr(varattrib *attr)
{
- varattrib *result;
+ varattrib *result;
if (VARATT_IS_EXTERNAL(attr))
{
@@ -94,7 +94,7 @@ heap_tuple_fetch_attr(varattrib *attr)
* ----------
*/
result = attr;
- }
+ }
return result;
}
@@ -107,10 +107,10 @@ heap_tuple_fetch_attr(varattrib *attr)
* or external storage.
* ----------
*/
-varattrib *
+varattrib *
heap_tuple_untoast_attr(varattrib *attr)
{
- varattrib *result;
+ varattrib *result;
if (VARATT_IS_EXTERNAL(attr))
{
@@ -121,14 +121,14 @@ heap_tuple_untoast_attr(varattrib *attr)
* Fetch it from the toast heap and decompress.
* ----------
*/
- varattrib *tmp;
+ varattrib *tmp;
tmp = toast_fetch_datum(attr);
- result = (varattrib *)palloc(attr->va_content.va_external.va_rawsize
- + VARHDRSZ);
+ result = (varattrib *) palloc(attr->va_content.va_external.va_rawsize
+ + VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_external.va_rawsize
- + VARHDRSZ;
- pglz_decompress((PGLZ_Header *)tmp, VARATT_DATA(result));
+ + VARHDRSZ;
+ pglz_decompress((PGLZ_Header *) tmp, VARATT_DATA(result));
pfree(tmp);
}
@@ -147,11 +147,11 @@ heap_tuple_untoast_attr(varattrib *attr)
* This is a compressed value inside of the main tuple
* ----------
*/
- result = (varattrib *)palloc(attr->va_content.va_compressed.va_rawsize
- + VARHDRSZ);
+ result = (varattrib *) palloc(attr->va_content.va_compressed.va_rawsize
+ + VARHDRSZ);
VARATT_SIZEP(result) = attr->va_content.va_compressed.va_rawsize
- + VARHDRSZ;
- pglz_decompress((PGLZ_Header *)attr, VARATT_DATA(result));
+ + VARHDRSZ;
+ pglz_decompress((PGLZ_Header *) attr, VARATT_DATA(result));
}
else
/* ----------
@@ -173,21 +173,21 @@ heap_tuple_untoast_attr(varattrib *attr)
static void
toast_delete(Relation rel, HeapTuple oldtup)
{
- TupleDesc tupleDesc;
- Form_pg_attribute *att;
- int numAttrs;
- int i;
- Datum value;
- bool isnull;
+ TupleDesc tupleDesc;
+ Form_pg_attribute *att;
+ int numAttrs;
+ int i;
+ Datum value;
+ bool isnull;
/* ----------
* Get the tuple descriptor, the number of and attribute
* descriptors.
* ----------
*/
- tupleDesc = rel->rd_att;
- numAttrs = tupleDesc->natts;
- att = tupleDesc->attrs;
+ tupleDesc = rel->rd_att;
+ numAttrs = tupleDesc->natts;
+ att = tupleDesc->attrs;
/* ----------
* Check for external stored attributes and delete them
@@ -216,35 +216,35 @@ toast_delete(Relation rel, HeapTuple oldtup)
static void
toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
{
- TupleDesc tupleDesc;
- Form_pg_attribute *att;
- int numAttrs;
- int i;
- bool old_isnull;
- bool new_isnull;
-
- bool need_change = false;
- bool need_free = false;
- bool need_delold = false;
- bool has_nulls = false;
-
- Size maxDataLen;
-
- char toast_action[MaxHeapAttributeNumber];
- char toast_nulls[MaxHeapAttributeNumber];
- Datum toast_values[MaxHeapAttributeNumber];
- int32 toast_sizes[MaxHeapAttributeNumber];
- bool toast_free[MaxHeapAttributeNumber];
- bool toast_delold[MaxHeapAttributeNumber];
+ TupleDesc tupleDesc;
+ Form_pg_attribute *att;
+ int numAttrs;
+ int i;
+ bool old_isnull;
+ bool new_isnull;
+
+ bool need_change = false;
+ bool need_free = false;
+ bool need_delold = false;
+ bool has_nulls = false;
+
+ Size maxDataLen;
+
+ char toast_action[MaxHeapAttributeNumber];
+ char toast_nulls[MaxHeapAttributeNumber];
+ Datum toast_values[MaxHeapAttributeNumber];
+ int32 toast_sizes[MaxHeapAttributeNumber];
+ bool toast_free[MaxHeapAttributeNumber];
+ bool toast_delold[MaxHeapAttributeNumber];
/* ----------
* Get the tuple descriptor, the number of and attribute
* descriptors and the location of the tuple values.
* ----------
*/
- tupleDesc = rel->rd_att;
- numAttrs = tupleDesc->natts;
- att = tupleDesc->attrs;
+ tupleDesc = rel->rd_att;
+ numAttrs = tupleDesc->natts;
+ att = tupleDesc->attrs;
/* ----------
* Then collect information about the values given
@@ -255,14 +255,14 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* 'x' incompressible, but OK to move off
* ----------
*/
- memset(toast_action, ' ', numAttrs * sizeof(char));
- memset(toast_nulls, ' ', numAttrs * sizeof(char));
- memset(toast_free, 0, numAttrs * sizeof(bool));
- memset(toast_delold, 0, numAttrs * sizeof(bool));
+ memset(toast_action, ' ', numAttrs * sizeof(char));
+ memset(toast_nulls, ' ', numAttrs * sizeof(char));
+ memset(toast_free, 0, numAttrs * sizeof(bool));
+ memset(toast_delold, 0, numAttrs * sizeof(bool));
for (i = 0; i < numAttrs; i++)
{
- varattrib *old_value;
- varattrib *new_value;
+ varattrib *old_value;
+ varattrib *new_value;
if (oldtup != NULL)
{
@@ -270,25 +270,25 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* For UPDATE get the old and new values of this attribute
* ----------
*/
- old_value = (varattrib *)DatumGetPointer(
- heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
- toast_values[i] =
- heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
- new_value = (varattrib *)DatumGetPointer(toast_values[i]);
+ old_value = (varattrib *) DatumGetPointer(
+ heap_getattr(oldtup, i + 1, tupleDesc, &old_isnull));
+ toast_values[i] =
+ heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
+ new_value = (varattrib *) DatumGetPointer(toast_values[i]);
/* ----------
* If the old value is an external stored one, check if it
* has changed so we have to delete it later.
* ----------
*/
- if (!old_isnull && att[i]->attlen == -1 &&
- VARATT_IS_EXTERNAL(old_value))
+ if (!old_isnull && att[i]->attlen == -1 &&
+ VARATT_IS_EXTERNAL(old_value))
{
if (new_isnull || !VARATT_IS_EXTERNAL(new_value) ||
- old_value->va_content.va_external.va_rowid !=
- new_value->va_content.va_external.va_rowid ||
- old_value->va_content.va_external.va_attno !=
- new_value->va_content.va_external.va_attno)
+ old_value->va_content.va_external.va_rowid !=
+ new_value->va_content.va_external.va_rowid ||
+ old_value->va_content.va_external.va_attno !=
+ new_value->va_content.va_external.va_attno)
{
/* ----------
* The old external store value isn't needed any
@@ -318,8 +318,8 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* For INSERT simply get the new value
* ----------
*/
- toast_values[i] =
- heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
+ toast_values[i] =
+ heap_getattr(newtup, i + 1, tupleDesc, &new_isnull);
}
/* ----------
@@ -356,7 +356,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (VARATT_IS_EXTERNAL(DatumGetPointer(toast_values[i])))
{
toast_values[i] = PointerGetDatum(heap_tuple_untoast_attr(
- (varattrib *)DatumGetPointer(toast_values[i])));
+ (varattrib *) DatumGetPointer(toast_values[i])));
toast_free[i] = true;
need_change = true;
need_free = true;
@@ -366,7 +366,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Remember the size of this attribute
* ----------
*/
- toast_sizes[i] = VARATT_SIZE(DatumGetPointer(toast_values[i]));
+ toast_sizes[i] = VARATT_SIZE(DatumGetPointer(toast_values[i]));
}
else
{
@@ -375,7 +375,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
toast_action[i] = 'p';
- toast_sizes[i] = att[i]->attlen;
+ toast_sizes[i] = att[i]->attlen;
}
}
@@ -384,7 +384,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
*
* 1: Inline compress attributes with attstorage 'x'
* 2: Store attributes with attstorage 'x' or 'e' external
- * 3: Inline compress attributes with attstorage 'm'
+ * 3: Inline compress attributes with attstorage 'm'
* 4: Store attributes with attstorage 'm' external
* ----------
*/
@@ -398,12 +398,12 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen)
+ maxDataLen)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
- Datum new_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
+ Datum new_value;
/* ----------
* Search for the biggest yet uncompressed internal attribute
@@ -420,7 +420,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -431,24 +431,28 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Attempt to compress it inline
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- new_value = toast_compress_datum(old_value);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ new_value = toast_compress_datum(old_value);
if (DatumGetPointer(new_value) != NULL)
{
/* successful compression */
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_values[i] = new_value;
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
- need_change = true;
- need_free = true;
+ toast_values[i] = new_value;
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ need_change = true;
+ need_free = true;
}
else
{
- /* incompressible data, ignore on subsequent compression passes */
+
+ /*
+ * incompressible data, ignore on subsequent compression
+ * passes
+ */
toast_action[i] = 'x';
}
}
@@ -459,11 +463,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
+ maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
/* ----------
* Search for the biggest yet inlined attribute with
@@ -481,7 +485,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -492,21 +496,21 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Store this external
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- toast_action[i] = 'p';
- toast_values[i] = toast_save_datum(rel,
- newtup->t_data->t_oid,
- i + 1,
- toast_values[i]);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ toast_action[i] = 'p';
+ toast_values[i] = toast_save_datum(rel,
+ newtup->t_data->t_oid,
+ i + 1,
+ toast_values[i]);
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
need_change = true;
- need_free = true;
+ need_free = true;
}
/* ----------
@@ -515,12 +519,12 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen)
+ maxDataLen)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
- Datum new_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
+ Datum new_value;
/* ----------
* Search for the biggest yet uncompressed internal attribute
@@ -537,7 +541,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -548,24 +552,28 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Attempt to compress it inline
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- new_value = toast_compress_datum(old_value);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ new_value = toast_compress_datum(old_value);
if (DatumGetPointer(new_value) != NULL)
{
/* successful compression */
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_values[i] = new_value;
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
- need_change = true;
- need_free = true;
+ toast_values[i] = new_value;
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ need_change = true;
+ need_free = true;
}
else
{
- /* incompressible data, ignore on subsequent compression passes */
+
+ /*
+ * incompressible data, ignore on subsequent compression
+ * passes
+ */
toast_action[i] = 'x';
}
}
@@ -575,11 +583,11 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
while (MAXALIGN(ComputeDataSize(tupleDesc, toast_values, toast_nulls)) >
- maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
+ maxDataLen && rel->rd_rel->reltoastrelid != InvalidOid)
{
- int biggest_attno = -1;
- int32 biggest_size = MAXALIGN(sizeof(varattrib));
- Datum old_value;
+ int biggest_attno = -1;
+ int32 biggest_size = MAXALIGN(sizeof(varattrib));
+ Datum old_value;
/* ----------
* Search for the biggest yet inlined attribute with
@@ -597,7 +605,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
if (toast_sizes[i] > biggest_size)
{
biggest_attno = i;
- biggest_size = toast_sizes[i];
+ biggest_size = toast_sizes[i];
}
}
@@ -608,21 +616,21 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* Store this external
* ----------
*/
- i = biggest_attno;
- old_value = toast_values[i];
- toast_action[i] = 'p';
- toast_values[i] = toast_save_datum(rel,
- newtup->t_data->t_oid,
- i + 1,
- toast_values[i]);
+ i = biggest_attno;
+ old_value = toast_values[i];
+ toast_action[i] = 'p';
+ toast_values[i] = toast_save_datum(rel,
+ newtup->t_data->t_oid,
+ i + 1,
+ toast_values[i]);
if (toast_free[i])
pfree(DatumGetPointer(old_value));
- toast_free[i] = true;
- toast_sizes[i] = VARATT_SIZE(toast_values[i]);
+ toast_free[i] = true;
+ toast_sizes[i] = VARATT_SIZE(toast_values[i]);
need_change = true;
- need_free = true;
+ need_free = true;
}
/* ----------
@@ -632,10 +640,10 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
*/
if (need_change)
{
- char *new_data;
- int32 new_len;
- MemoryContext oldcxt;
- HeapTupleHeader olddata;
+ char *new_data;
+ int32 new_len;
+ MemoryContext oldcxt;
+ HeapTupleHeader olddata;
/* ----------
* Calculate the new size of the tuple
@@ -662,24 +670,24 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
* ----------
*/
memcpy(new_data, newtup->t_data, newtup->t_data->t_hoff);
- newtup->t_data = (HeapTupleHeader)new_data;
+ newtup->t_data = (HeapTupleHeader) new_data;
newtup->t_len = new_len;
- DataFill((char *)(MAXALIGN((long)new_data +
- offsetof(HeapTupleHeaderData, t_bits) +
- ((has_nulls) ? BITMAPLEN(numAttrs) : 0))),
- tupleDesc,
- toast_values,
- toast_nulls,
- &(newtup->t_data->t_infomask),
- has_nulls ? newtup->t_data->t_bits : NULL);
+ DataFill((char *) (MAXALIGN((long) new_data +
+ offsetof(HeapTupleHeaderData, t_bits) +
+ ((has_nulls) ? BITMAPLEN(numAttrs) : 0))),
+ tupleDesc,
+ toast_values,
+ toast_nulls,
+ &(newtup->t_data->t_infomask),
+ has_nulls ? newtup->t_data->t_bits : NULL);
/* ----------
* In the case we modified a previously modified tuple again,
* free the memory from the previous run
* ----------
*/
- if ((char *)olddata != ((char *)newtup + HEAPTUPLESIZE))
+ if ((char *) olddata != ((char *) newtup + HEAPTUPLESIZE))
pfree(olddata);
/* ----------
@@ -723,7 +731,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
Datum
toast_compress_datum(Datum value)
{
- varattrib *tmp;
+ varattrib *tmp;
tmp = (varattrib *) palloc(sizeof(PGLZ_Header) + VARATT_SIZE(value));
pglz_compress(VARATT_DATA(value), VARATT_SIZE(value) - VARHDRSZ,
@@ -754,45 +762,45 @@ toast_compress_datum(Datum value)
static Datum
toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
{
- Relation toastrel;
- Relation toastidx;
- HeapTuple toasttup;
- InsertIndexResult idxres;
- TupleDesc toasttupDesc;
- Datum t_values[3];
- char t_nulls[3];
- varattrib *result;
- char chunk_data[VARHDRSZ + TOAST_MAX_CHUNK_SIZE];
- int32 chunk_size;
- int32 chunk_seq = 0;
- char *data_p;
- int32 data_todo;
+ Relation toastrel;
+ Relation toastidx;
+ HeapTuple toasttup;
+ InsertIndexResult idxres;
+ TupleDesc toasttupDesc;
+ Datum t_values[3];
+ char t_nulls[3];
+ varattrib *result;
+ char chunk_data[VARHDRSZ + TOAST_MAX_CHUNK_SIZE];
+ int32 chunk_size;
+ int32 chunk_seq = 0;
+ char *data_p;
+ int32 data_todo;
/* ----------
* Create the varattrib reference
* ----------
*/
- result = (varattrib *)palloc(sizeof(varattrib));
+ result = (varattrib *) palloc(sizeof(varattrib));
- result->va_header = sizeof(varattrib) | VARATT_FLAG_EXTERNAL;
+ result->va_header = sizeof(varattrib) | VARATT_FLAG_EXTERNAL;
if (VARATT_IS_COMPRESSED(value))
{
result->va_header |= VARATT_FLAG_COMPRESSED;
- result->va_content.va_external.va_rawsize =
- ((varattrib *)value)->va_content.va_compressed.va_rawsize;
+ result->va_content.va_external.va_rawsize =
+ ((varattrib *) value)->va_content.va_compressed.va_rawsize;
}
else
result->va_content.va_external.va_rawsize = VARATT_SIZE(value);
-
- result->va_content.va_external.va_extsize =
- VARATT_SIZE(value) - VARHDRSZ;
- result->va_content.va_external.va_valueid = newoid();
- result->va_content.va_external.va_toastrelid =
- rel->rd_rel->reltoastrelid;
- result->va_content.va_external.va_toastidxid =
- rel->rd_rel->reltoastidxid;
- result->va_content.va_external.va_rowid = mainoid;
- result->va_content.va_external.va_attno = attno;
+
+ result->va_content.va_external.va_extsize =
+ VARATT_SIZE(value) - VARHDRSZ;
+ result->va_content.va_external.va_valueid = newoid();
+ result->va_content.va_external.va_toastrelid =
+ rel->rd_rel->reltoastrelid;
+ result->va_content.va_external.va_toastidxid =
+ rel->rd_rel->reltoastidxid;
+ result->va_content.va_external.va_rowid = mainoid;
+ result->va_content.va_external.va_attno = attno;
/* ----------
* Initialize constant parts of the tuple data
@@ -808,8 +816,8 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
* Get the data to process
* ----------
*/
- data_p = VARATT_DATA(value);
- data_todo = VARATT_SIZE(value) - VARHDRSZ;
+ data_p = VARATT_DATA(value);
+ data_todo = VARATT_SIZE(value) - VARHDRSZ;
/* ----------
* Open the toast relation
@@ -818,9 +826,9 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
toastrel = heap_open(rel->rd_rel->reltoastrelid, RowExclusiveLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(rel->rd_rel->reltoastidxid);
-
+
/* ----------
- * Split up the item into chunks
+ * Split up the item into chunks
* ----------
*/
while (data_todo > 0)
@@ -848,8 +856,8 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
*/
heap_insert(toastrel, toasttup);
idxres = index_insert(toastidx, t_values, t_nulls,
- &(toasttup->t_self),
- toastrel);
+ &(toasttup->t_self),
+ toastrel);
if (idxres == NULL)
elog(ERROR, "Failed to insert index entry for TOAST tuple");
@@ -888,14 +896,14 @@ toast_save_datum(Relation rel, Oid mainoid, int16 attno, Datum value)
static void
toast_delete_datum(Relation rel, Datum value)
{
- register varattrib *attr = (varattrib *)value;
- Relation toastrel;
- Relation toastidx;
- ScanKeyData toastkey;
- IndexScanDesc toastscan;
- HeapTupleData toasttup;
- RetrieveIndexResult indexRes;
- Buffer buffer;
+ register varattrib *attr = (varattrib *) value;
+ Relation toastrel;
+ Relation toastidx;
+ ScanKeyData toastkey;
+ IndexScanDesc toastscan;
+ HeapTupleData toasttup;
+ RetrieveIndexResult indexRes;
+ Buffer buffer;
if (!VARATT_IS_EXTERNAL(attr))
return;
@@ -904,8 +912,8 @@ toast_delete_datum(Relation rel, Datum value)
* Open the toast relation and it's index
* ----------
*/
- toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
- RowExclusiveLock);
+ toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
+ RowExclusiveLock);
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
/* ----------
@@ -913,10 +921,10 @@ toast_delete_datum(Relation rel, Datum value)
* ----------
*/
ScanKeyEntryInitialize(&toastkey,
- (bits16) 0,
- (AttrNumber) 1,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ (bits16) 0,
+ (AttrNumber) 1,
+ (RegProcedure) F_OIDEQ,
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/* ----------
* Read the chunks by index
@@ -961,36 +969,36 @@ toast_delete_datum(Relation rel, Datum value)
static varattrib *
toast_fetch_datum(varattrib *attr)
{
- Relation toastrel;
- Relation toastidx;
- ScanKeyData toastkey;
- IndexScanDesc toastscan;
- HeapTupleData toasttup;
- HeapTuple ttup;
- TupleDesc toasttupDesc;
- RetrieveIndexResult indexRes;
- Buffer buffer;
-
- varattrib *result;
- int32 ressize;
- int32 residx;
- int numchunks;
- Pointer chunk;
- bool isnull;
- int32 chunksize;
-
- char *chunks_found;
- char *chunks_expected;
+ Relation toastrel;
+ Relation toastidx;
+ ScanKeyData toastkey;
+ IndexScanDesc toastscan;
+ HeapTupleData toasttup;
+ HeapTuple ttup;
+ TupleDesc toasttupDesc;
+ RetrieveIndexResult indexRes;
+ Buffer buffer;
+
+ varattrib *result;
+ int32 ressize;
+ int32 residx;
+ int numchunks;
+ Pointer chunk;
+ bool isnull;
+ int32 chunksize;
+
+ char *chunks_found;
+ char *chunks_expected;
ressize = attr->va_content.va_external.va_extsize;
- numchunks = ((ressize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;
+ numchunks = ((ressize - 1) / TOAST_MAX_CHUNK_SIZE) + 1;
- chunks_found = palloc(numchunks);
+ chunks_found = palloc(numchunks);
chunks_expected = palloc(numchunks);
- memset(chunks_found, 0, numchunks);
+ memset(chunks_found, 0, numchunks);
memset(chunks_expected, 1, numchunks);
- result = (varattrib *)palloc(ressize + VARHDRSZ);
+ result = (varattrib *) palloc(ressize + VARHDRSZ);
VARATT_SIZEP(result) = ressize + VARHDRSZ;
if (VARATT_IS_COMPRESSED(attr))
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
@@ -999,8 +1007,8 @@ toast_fetch_datum(varattrib *attr)
* Open the toast relation and it's index
* ----------
*/
- toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
- AccessShareLock);
+ toastrel = heap_open(attr->va_content.va_external.va_toastrelid,
+ AccessShareLock);
toasttupDesc = toastrel->rd_att;
toastidx = index_open(attr->va_content.va_external.va_toastidxid);
@@ -1009,10 +1017,10 @@ toast_fetch_datum(varattrib *attr)
* ----------
*/
ScanKeyEntryInitialize(&toastkey,
- (bits16) 0,
- (AttrNumber) 1,
- (RegProcedure) F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ (bits16) 0,
+ (AttrNumber) 1,
+ (RegProcedure) F_OIDEQ,
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/* ----------
* Read the chunks by index
@@ -1049,7 +1057,7 @@ toast_fetch_datum(varattrib *attr)
elog(ERROR, "unexpected chunk number %d for toast value %d",
residx,
attr->va_content.va_external.va_valueid);
- if (residx < numchunks-1)
+ if (residx < numchunks - 1)
{
if (chunksize != TOAST_MAX_CHUNK_SIZE)
elog(ERROR, "unexpected chunk size %d in chunk %d for toast value %d",
@@ -1072,7 +1080,7 @@ toast_fetch_datum(varattrib *attr)
* Copy the data into proper place in our result
* ----------
*/
- memcpy(((char *)VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
+ memcpy(((char *) VARATT_DATA(result)) + residx * TOAST_MAX_CHUNK_SIZE,
VARATT_DATA(chunk),
chunksize);
@@ -1085,7 +1093,7 @@ toast_fetch_datum(varattrib *attr)
*/
if (memcmp(chunks_found, chunks_expected, numchunks) != 0)
elog(ERROR, "not all toast chunks found for value %d",
- attr->va_content.va_external.va_valueid);
+ attr->va_content.va_external.va_valueid);
pfree(chunks_expected);
pfree(chunks_found);
diff --git a/src/backend/access/index/istrat.c b/src/backend/access/index/istrat.c
index 1cc2c42c01c..3b016392068 100644
--- a/src/backend/access/index/istrat.c
+++ b/src/backend/access/index/istrat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.48 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.49 2001/03/22 03:59:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -239,8 +239,8 @@ StrategyTermEvaluate(StrategyTerm term,
break;
case SK_NEGATE:
- result = ! DatumGetBool(FunctionCall2(&entry->sk_func,
- left, right));
+ result = !DatumGetBool(FunctionCall2(&entry->sk_func,
+ left, right));
break;
case SK_COMMUTE:
@@ -249,8 +249,8 @@ StrategyTermEvaluate(StrategyTerm term,
break;
case SK_NEGATE | SK_COMMUTE:
- result = ! DatumGetBool(FunctionCall2(&entry->sk_func,
- right, left));
+ result = !DatumGetBool(FunctionCall2(&entry->sk_func,
+ right, left));
break;
default:
@@ -263,6 +263,7 @@ StrategyTermEvaluate(StrategyTerm term,
return result;
}
+
#endif
/* ----------------
@@ -465,6 +466,7 @@ RelationInvokeStrategy(Relation relation,
}
+
#endif
/* ----------------
@@ -519,7 +521,7 @@ OperatorRelationFillScanKeyEntry(Relation operatorRelation,
if (!RegProcedureIsValid(entry->sk_procedure))
elog(ERROR,
- "OperatorRelationFillScanKeyEntry: no procedure for operator %u",
+ "OperatorRelationFillScanKeyEntry: no procedure for operator %u",
operatorObjectId);
fmgr_info(entry->sk_procedure, &entry->sk_func);
@@ -597,9 +599,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
}
if (cachesearch)
- {
ReleaseSysCache(tuple);
- }
else
{
heap_endscan(scan);
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index 435a7f72dde..fc85906d9b2 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.40 2001/01/24 19:42:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.41 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
*
@@ -150,8 +150,8 @@ btoidvectorcmp(PG_FUNCTION_ARGS)
Datum
btabstimecmp(PG_FUNCTION_ARGS)
{
- AbsoluteTime a = PG_GETARG_ABSOLUTETIME(0);
- AbsoluteTime b = PG_GETARG_ABSOLUTETIME(1);
+ AbsoluteTime a = PG_GETARG_ABSOLUTETIME(0);
+ AbsoluteTime b = PG_GETARG_ABSOLUTETIME(1);
if (AbsoluteTimeIsBefore(a, b))
PG_RETURN_INT32(-1);
@@ -236,9 +236,10 @@ bttextcmp(PG_FUNCTION_ARGS)
if (res == 0 && VARSIZE(a) != VARSIZE(b))
{
+
/*
- * The two strings are the same in the first len bytes,
- * and they are of different lengths.
+ * The two strings are the same in the first len bytes, and they
+ * are of different lengths.
*/
if (VARSIZE(a) < VARSIZE(b))
res = -1;
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 325e585e3cc..f2112de6777 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.81 2001/02/07 23:35:33 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.82 2001/03/22 03:59:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -23,23 +23,23 @@
typedef struct
{
/* context data for _bt_checksplitloc */
- Size newitemsz; /* size of new item to be inserted */
- bool non_leaf; /* T if splitting an internal node */
+ Size newitemsz; /* size of new item to be inserted */
+ bool non_leaf; /* T if splitting an internal node */
- bool have_split; /* found a valid split? */
+ bool have_split; /* found a valid split? */
/* these fields valid only if have_split is true */
- bool newitemonleft; /* new item on left or right of best split */
+ bool newitemonleft; /* new item on left or right of best split */
OffsetNumber firstright; /* best split point */
- int best_delta; /* best size delta so far */
+ int best_delta; /* best size delta so far */
} FindSplitData;
extern bool FixBTree;
-Buffer _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release);
+Buffer _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release);
static void _bt_fixtree(Relation rel, BlockNumber blkno);
-static void _bt_fixbranch(Relation rel, BlockNumber lblkno,
- BlockNumber rblkno, BTStack true_stack);
+static void _bt_fixbranch(Relation rel, BlockNumber lblkno,
+ BlockNumber rblkno, BTStack true_stack);
static void _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit);
static void _bt_fixup(Relation rel, Buffer buf);
static OffsetNumber _bt_getoff(Page page, BlockNumber blkno);
@@ -47,34 +47,34 @@ static OffsetNumber _bt_getoff(Page page, BlockNumber blkno);
static Buffer _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
static TransactionId _bt_check_unique(Relation rel, BTItem btitem,
- Relation heapRel, Buffer buf,
- ScanKey itup_scankey);
+ Relation heapRel, Buffer buf,
+ ScanKey itup_scankey);
static InsertIndexResult _bt_insertonpg(Relation rel, Buffer buf,
- BTStack stack,
- int keysz, ScanKey scankey,
- BTItem btitem,
- OffsetNumber afteritem);
-static void _bt_insertuple(Relation rel, Buffer buf,
- Size itemsz, BTItem btitem, OffsetNumber newitemoff);
+ BTStack stack,
+ int keysz, ScanKey scankey,
+ BTItem btitem,
+ OffsetNumber afteritem);
+static void _bt_insertuple(Relation rel, Buffer buf,
+ Size itemsz, BTItem btitem, OffsetNumber newitemoff);
static Buffer _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
- OffsetNumber newitemoff, Size newitemsz,
- BTItem newitem, bool newitemonleft,
- OffsetNumber *itup_off, BlockNumber *itup_blkno);
+ OffsetNumber newitemoff, Size newitemsz,
+ BTItem newitem, bool newitemonleft,
+ OffsetNumber *itup_off, BlockNumber *itup_blkno);
static OffsetNumber _bt_findsplitloc(Relation rel, Page page,
- OffsetNumber newitemoff,
- Size newitemsz,
- bool *newitemonleft);
+ OffsetNumber newitemoff,
+ Size newitemsz,
+ bool *newitemonleft);
static void _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
- int leftfree, int rightfree,
- bool newitemonleft, Size firstrightitemsz);
+ int leftfree, int rightfree,
+ bool newitemonleft, Size firstrightitemsz);
static Buffer _bt_getstackbuf(Relation rel, BTStack stack, int access);
static void _bt_pgaddtup(Relation rel, Page page,
- Size itemsize, BTItem btitem,
- OffsetNumber itup_off, const char *where);
+ Size itemsize, BTItem btitem,
+ OffsetNumber itup_off, const char *where);
static bool _bt_isequal(TupleDesc itupdesc, Page page, OffsetNumber offnum,
- int keysz, ScanKey scankey);
+ int keysz, ScanKey scankey);
-static Relation _xlheapRel; /* temporary hack */
+static Relation _xlheapRel; /* temporary hack */
/*
* _bt_doinsert() -- Handle insertion of a single btitem in the tree.
@@ -114,8 +114,8 @@ top:
buf = _bt_moveright(rel, buf, natts, itup_scankey, BT_WRITE);
/*
- * If we're not allowing duplicates, make sure the key isn't
- * already in the index. XXX this belongs somewhere else, likely
+ * If we're not allowing duplicates, make sure the key isn't already
+ * in the index. XXX this belongs somewhere else, likely
*/
if (index_is_unique)
{
@@ -134,7 +134,7 @@ top:
}
}
- _xlheapRel = heapRel; /* temporary hack */
+ _xlheapRel = heapRel; /* temporary hack */
/* do the insertion */
res = _bt_insertonpg(rel, buf, stack, natts, itup_scankey, btitem, 0);
@@ -150,7 +150,7 @@ top:
* _bt_check_unique() -- Check for violation of unique index constraint
*
* Returns NullTransactionId if there is no conflict, else an xact ID we
- * must wait for to see if it commits a conflicting tuple. If an actual
+ * must wait for to see if it commits a conflicting tuple. If an actual
* conflict is detected, no return --- just elog().
*/
static TransactionId
@@ -171,8 +171,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
maxoff = PageGetMaxOffsetNumber(page);
/*
- * Find first item >= proposed new item. Note we could also get
- * a pointer to end-of-page here.
+ * Find first item >= proposed new item. Note we could also get a
+ * pointer to end-of-page here.
*/
offset = _bt_binsrch(rel, buf, natts, itup_scankey);
@@ -187,24 +187,24 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
BlockNumber nblkno;
/*
- * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
- * how we handling NULLs - and so we must not use _bt_compare
- * in real comparison, but only for ordering/finding items on
- * pages. - vadim 03/24/97
+ * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's how we
+ * handling NULLs - and so we must not use _bt_compare in real
+ * comparison, but only for ordering/finding items on pages. -
+ * vadim 03/24/97
*
- * make sure the offset points to an actual key
- * before trying to compare it...
+ * make sure the offset points to an actual key before trying to
+ * compare it...
*/
if (offset <= maxoff)
{
- if (! _bt_isequal(itupdesc, page, offset, natts, itup_scankey))
+ if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
break; /* we're past all the equal tuples */
/*
- * Have to check is inserted heap tuple deleted one (i.e.
- * just moved to another place by vacuum)! We only need to
- * do this once, but don't want to do it at all unless
- * we see equal tuples, so as not to slow down unequal case.
+ * Have to check is inserted heap tuple deleted one (i.e. just
+ * moved to another place by vacuum)! We only need to do this
+ * once, but don't want to do it at all unless we see equal
+ * tuples, so as not to slow down unequal case.
*/
if (chtup)
{
@@ -220,11 +220,11 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
cbti = (BTItem) PageGetItem(page, PageGetItemId(page, offset));
htup.t_self = cbti->bti_itup.t_tid;
heap_fetch(heapRel, SnapshotDirty, &htup, &buffer);
- if (htup.t_data != NULL) /* it is a duplicate */
+ if (htup.t_data != NULL) /* it is a duplicate */
{
TransactionId xwait =
- (TransactionIdIsValid(SnapshotDirty->xmin)) ?
- SnapshotDirty->xmin : SnapshotDirty->xmax;
+ (TransactionIdIsValid(SnapshotDirty->xmin)) ?
+ SnapshotDirty->xmin : SnapshotDirty->xmax;
/*
* If this tuple is being updated by other transaction
@@ -238,6 +238,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/* Tell _bt_doinsert to wait... */
return xwait;
}
+
/*
* Otherwise we have a definite conflict.
*/
@@ -304,7 +305,7 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
* NOTE: if the new key is equal to one or more existing keys, we can
* legitimately place it anywhere in the series of equal keys --- in fact,
* if the new key is equal to the page's "high key" we can place it on
- * the next page. If it is equal to the high key, and there's not room
+ * the next page. If it is equal to the high key, and there's not room
* to insert the new tuple on the current page without splitting, then
* we can move right hoping to find more free space and avoid a split.
* (We should not move right indefinitely, however, since that leads to
@@ -358,16 +359,14 @@ _bt_insertonpg(Relation rel,
*/
if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %lu",
- (unsigned long)itemsz,
- (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
+ (unsigned long) itemsz,
+ (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
/*
* Determine exactly where new item will go.
*/
if (afteritem > 0)
- {
newitemoff = afteritem + 1;
- }
else
{
/*----------
@@ -383,12 +382,12 @@ _bt_insertonpg(Relation rel,
* on every insert. We implement "get tired" as a random choice,
* since stopping after scanning a fixed number of pages wouldn't work
* well (we'd never reach the right-hand side of previously split
- * pages). Currently the probability of moving right is set at 0.99,
+ * pages). Currently the probability of moving right is set at 0.99,
* which may seem too high to change the behavior much, but it does an
* excellent job of preventing O(N^2) behavior with many equal keys.
*----------
*/
- bool movedright = false;
+ bool movedright = false;
while (PageGetFreeSpace(page) < itemsz &&
!P_RIGHTMOST(lpageop) &&
@@ -396,7 +395,7 @@ _bt_insertonpg(Relation rel,
random() > (MAX_RANDOM_VALUE / 100))
{
/* step right one page */
- BlockNumber rblkno = lpageop->btpo_next;
+ BlockNumber rblkno = lpageop->btpo_next;
_bt_relbuf(rel, buf, BT_WRITE);
buf = _bt_getbuf(rel, rblkno, BT_WRITE);
@@ -404,10 +403,11 @@ _bt_insertonpg(Relation rel,
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
movedright = true;
}
+
/*
- * Now we are on the right page, so find the insert position.
- * If we moved right at all, we know we should insert at the
- * start of the page, else must find the position by searching.
+ * Now we are on the right page, so find the insert position. If
+ * we moved right at all, we know we should insert at the start of
+ * the page, else must find the position by searching.
*/
if (movedright)
newitemoff = P_FIRSTDATAKEY(lpageop);
@@ -418,9 +418,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
- * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its
- * result, so this comparison is correct even though we appear to
- * be accounting only for the item and not for its line pointer.
+ * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
+ * so this comparison is correct even though we appear to be
+ * accounting only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@@ -468,7 +468,7 @@ _bt_insertonpg(Relation rel,
if (is_root)
{
- Buffer rootbuf;
+ Buffer rootbuf;
Assert(stack == (BTStack) NULL);
/* create a new root node and release the split buffers */
@@ -481,7 +481,7 @@ _bt_insertonpg(Relation rel,
{
InsertIndexResult newres;
BTItem new_item;
- BTStackData fakestack;
+ BTStackData fakestack;
BTItem ritem;
Buffer pbuf;
@@ -489,10 +489,11 @@ _bt_insertonpg(Relation rel,
if (stack == (BTStack) NULL)
{
elog(DEBUG, "btree: concurrent ROOT page split");
+
/*
* If root page splitter failed to create new root page
- * then old root' btpo_parent still points to metapage.
- * We have to fix root page in this case.
+ * then old root' btpo_parent still points to metapage. We
+ * have to fix root page in this case.
*/
if (BTreeInvalidParent(lpageop))
{
@@ -531,9 +532,9 @@ _bt_insertonpg(Relation rel,
* item! We want to find parent pointing to where we are,
* right ? - vadim 05/27/97
*
- * Interestingly, this means we didn't *really* need to stack
- * the parent key at all; all we really care about is the
- * saved block and offset as a starting point for our search...
+ * Interestingly, this means we didn't *really* need to stack the
+ * parent key at all; all we really care about is the saved
+ * block and offset as a starting point for our search...
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY);
@@ -583,25 +584,26 @@ formres:;
}
static void
-_bt_insertuple(Relation rel, Buffer buf,
- Size itemsz, BTItem btitem, OffsetNumber newitemoff)
+_bt_insertuple(Relation rel, Buffer buf,
+ Size itemsz, BTItem btitem, OffsetNumber newitemoff)
{
- Page page = BufferGetPage(buf);
- BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
+ Page page = BufferGetPage(buf);
+ BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
START_CRIT_SECTION();
_bt_pgaddtup(rel, page, itemsz, btitem, newitemoff, "page");
/* XLOG stuff */
{
- xl_btree_insert xlrec;
- uint8 flag = XLOG_BTREE_INSERT;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
- BTItemData truncitem;
- xlrec.target.node = rel->rd_node;
+ xl_btree_insert xlrec;
+ uint8 flag = XLOG_BTREE_INSERT;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
+ BTItemData truncitem;
+
+ xlrec.target.node = rel->rd_node;
ItemPointerSet(&(xlrec.target.tid), BufferGetBlockNumber(buf), newitemoff);
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeInsert;
rdata[0].next = &(rdata[1]);
@@ -610,14 +612,14 @@ _bt_insertuple(Relation rel, Buffer buf,
{
truncitem = *btitem;
truncitem.bti_itup.t_info = sizeof(BTItemData);
- rdata[1].data = (char*)&truncitem;
+ rdata[1].data = (char *) &truncitem;
rdata[1].len = sizeof(BTItemData);
}
else
{
- rdata[1].data = (char*)btitem;
- rdata[1].len = IndexTupleDSize(btitem->bti_itup) +
- (sizeof(BTItemData) - sizeof(IndexTupleData));
+ rdata[1].data = (char *) btitem;
+ rdata[1].len = IndexTupleDSize(btitem->bti_itup) +
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
}
rdata[1].buffer = buf;
rdata[1].next = NULL;
@@ -700,8 +702,8 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
/*
* If the page we're splitting is not the rightmost page at its level
- * in the tree, then the first entry on the page is the high key
- * for the page. We need to copy that to the right half. Otherwise
+ * in the tree, then the first entry on the page is the high key for
+ * the page. We need to copy that to the right half. Otherwise
* (meaning the rightmost page case), all the items on the right half
* will be user data.
*/
@@ -779,13 +781,13 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
if (i < firstright)
{
_bt_pgaddtup(rel, leftpage, itemsz, item, leftoff,
- "left sibling");
+ "left sibling");
leftoff = OffsetNumberNext(leftoff);
}
else
{
_bt_pgaddtup(rel, rightpage, itemsz, item, rightoff,
- "right sibling");
+ "right sibling");
rightoff = OffsetNumberNext(rightoff);
}
}
@@ -812,11 +814,11 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
}
/*
- * We have to grab the right sibling (if any) and fix the prev
- * pointer there. We are guaranteed that this is deadlock-free
- * since no other writer will be holding a lock on that page
- * and trying to move left, and all readers release locks on a page
- * before trying to fetch its neighbors.
+ * We have to grab the right sibling (if any) and fix the prev pointer
+ * there. We are guaranteed that this is deadlock-free since no other
+ * writer will be holding a lock on that page and trying to move left,
+ * and all readers release locks on a page before trying to fetch its
+ * neighbors.
*/
if (!P_RIGHTMOST(ropaque))
@@ -834,12 +836,12 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*/
START_CRIT_SECTION();
{
- xl_btree_split xlrec;
- int flag = (newitemonleft) ?
- XLOG_BTREE_SPLEFT : XLOG_BTREE_SPLIT;
- BlockNumber blkno;
- XLogRecPtr recptr;
- XLogRecData rdata[4];
+ xl_btree_split xlrec;
+ int flag = (newitemonleft) ?
+ XLOG_BTREE_SPLEFT : XLOG_BTREE_SPLIT;
+ BlockNumber blkno;
+ XLogRecPtr recptr;
+ XLogRecData rdata[4];
xlrec.target.node = rel->rd_node;
ItemPointerSet(&(xlrec.target.tid), *itup_blkno, *itup_off);
@@ -856,31 +858,33 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
BlockIdSet(&(xlrec.parentblk), lopaque->btpo_parent);
BlockIdSet(&(xlrec.leftblk), lopaque->btpo_prev);
BlockIdSet(&(xlrec.rightblk), ropaque->btpo_next);
- /*
- * Dirrect access to page is not good but faster - we should
+
+ /*
+ * Dirrect access to page is not good but faster - we should
* implement some new func in page API.
*/
- xlrec.leftlen = ((PageHeader)leftpage)->pd_special -
- ((PageHeader)leftpage)->pd_upper;
+ xlrec.leftlen = ((PageHeader) leftpage)->pd_special -
+ ((PageHeader) leftpage)->pd_upper;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeSplit;
rdata[0].next = &(rdata[1]);
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)leftpage + ((PageHeader)leftpage)->pd_upper;
+ rdata[1].data = (char *) leftpage + ((PageHeader) leftpage)->pd_upper;
rdata[1].len = xlrec.leftlen;
rdata[1].next = &(rdata[2]);
rdata[2].buffer = InvalidBuffer;
- rdata[2].data = (char*)rightpage + ((PageHeader)rightpage)->pd_upper;
- rdata[2].len = ((PageHeader)rightpage)->pd_special -
- ((PageHeader)rightpage)->pd_upper;
+ rdata[2].data = (char *) rightpage + ((PageHeader) rightpage)->pd_upper;
+ rdata[2].len = ((PageHeader) rightpage)->pd_special -
+ ((PageHeader) rightpage)->pd_upper;
rdata[2].next = NULL;
if (!P_RIGHTMOST(ropaque))
{
- BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
+ BTPageOpaque sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
+
sopaque->btpo_prev = BufferGetBlockNumber(rbuf);
rdata[2].next = &(rdata[3]);
@@ -942,7 +946,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
*
* We return the index of the first existing tuple that should go on the
* righthand page, plus a boolean indicating whether the new tuple goes on
- * the left or right page. The bool is necessary to disambiguate the case
+ * the left or right page. The bool is necessary to disambiguate the case
* where firstright == newitemoff.
*/
static OffsetNumber
@@ -968,23 +972,23 @@ _bt_findsplitloc(Relation rel,
/* Passed-in newitemsz is MAXALIGNED but does not include line pointer */
newitemsz += sizeof(ItemIdData);
state.newitemsz = newitemsz;
- state.non_leaf = ! P_ISLEAF(opaque);
+ state.non_leaf = !P_ISLEAF(opaque);
state.have_split = false;
/* Total free space available on a btree page, after fixed overhead */
leftspace = rightspace =
PageGetPageSize(page) - sizeof(PageHeaderData) -
MAXALIGN(sizeof(BTPageOpaqueData))
- + sizeof(ItemIdData);
+ +sizeof(ItemIdData);
/*
- * Finding the best possible split would require checking all the possible
- * split points, because of the high-key and left-key special cases.
- * That's probably more work than it's worth; instead, stop as soon as
- * we find a "good-enough" split, where good-enough is defined as an
- * imbalance in free space of no more than pagesize/16 (arbitrary...)
- * This should let us stop near the middle on most pages, instead of
- * plowing to the end.
+ * Finding the best possible split would require checking all the
+ * possible split points, because of the high-key and left-key special
+ * cases. That's probably more work than it's worth; instead, stop as
+ * soon as we find a "good-enough" split, where good-enough is defined
+ * as an imbalance in free space of no more than pagesize/16
+ * (arbitrary...) This should let us stop near the middle on most
+ * pages, instead of plowing to the end.
*/
goodenough = leftspace / 16;
@@ -1024,6 +1028,7 @@ _bt_findsplitloc(Relation rel,
*/
leftfree = leftspace - dataitemstoleft - (int) itemsz;
rightfree = rightspace - (dataitemtotal - dataitemstoleft);
+
/*
* Will the new item go to left or right of split?
*/
@@ -1051,10 +1056,10 @@ _bt_findsplitloc(Relation rel,
}
/*
- * I believe it is not possible to fail to find a feasible split,
- * but just in case ...
+ * I believe it is not possible to fail to find a feasible split, but
+ * just in case ...
*/
- if (! state.have_split)
+ if (!state.have_split)
elog(FATAL, "_bt_findsplitloc: can't find a feasible split point for %s",
RelationGetRelationName(rel));
@@ -1071,6 +1076,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
int leftfree, int rightfree,
bool newitemonleft, Size firstrightitemsz)
{
+
/*
* Account for the new item on whichever side it is to be put.
*/
@@ -1078,19 +1084,21 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
leftfree -= (int) state->newitemsz;
else
rightfree -= (int) state->newitemsz;
+
/*
- * If we are not on the leaf level, we will be able to discard the
- * key data from the first item that winds up on the right page.
+ * If we are not on the leaf level, we will be able to discard the key
+ * data from the first item that winds up on the right page.
*/
if (state->non_leaf)
rightfree += (int) firstrightitemsz -
(int) (MAXALIGN(sizeof(BTItemData)) + sizeof(ItemIdData));
+
/*
* If feasible split point, remember best delta.
*/
if (leftfree >= 0 && rightfree >= 0)
{
- int delta = leftfree - rightfree;
+ int delta = leftfree - rightfree;
if (delta < 0)
delta = -delta;
@@ -1134,10 +1142,11 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
maxoff = PageGetMaxOffsetNumber(page);
start = stack->bts_offset;
+
/*
- * _bt_insertonpg set bts_offset to InvalidOffsetNumber in the
- * case of concurrent ROOT page split. Also, watch out for
- * possibility that page has a high key now when it didn't before.
+ * _bt_insertonpg set bts_offset to InvalidOffsetNumber in the case of
+ * concurrent ROOT page split. Also, watch out for possibility that
+ * page has a high key now when it didn't before.
*/
if (start < P_FIRSTDATAKEY(opaque))
start = P_FIRSTDATAKEY(opaque);
@@ -1159,11 +1168,15 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
return buf;
}
}
- /* by here, the item we're looking for moved right at least one page */
+
+ /*
+ * by here, the item we're looking for moved right at least one
+ * page
+ */
if (P_RIGHTMOST(opaque))
{
_bt_relbuf(rel, buf, access);
- return(InvalidBuffer);
+ return (InvalidBuffer);
}
blkno = opaque->btpo_next;
@@ -1190,27 +1203,27 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
*
* On entry, lbuf (the old root) and rbuf (its new peer) are write-
* locked. On exit, a new root page exists with entries for the
- * two new children, metapage is updated and unlocked/unpinned.
- * The new root buffer is returned to caller which has to unlock/unpin
- * lbuf, rbuf & rootbuf.
+ * two new children, metapage is updated and unlocked/unpinned.
+ * The new root buffer is returned to caller which has to unlock/unpin
+ * lbuf, rbuf & rootbuf.
*/
static Buffer
_bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
{
- Buffer rootbuf;
- Page lpage,
- rpage,
- rootpage;
- BlockNumber lbkno,
- rbkno;
- BlockNumber rootblknum;
- BTPageOpaque rootopaque;
- ItemId itemid;
- BTItem item;
- Size itemsz;
- BTItem new_item;
- Buffer metabuf;
- Page metapg;
+ Buffer rootbuf;
+ Page lpage,
+ rpage,
+ rootpage;
+ BlockNumber lbkno,
+ rbkno;
+ BlockNumber rootblknum;
+ BTPageOpaque rootopaque;
+ ItemId itemid;
+ BTItem item;
+ Size itemsz;
+ BTItem new_item;
+ Buffer metabuf;
+ Page metapg;
BTMetaPageData *metad;
/* get a new root page */
@@ -1236,9 +1249,9 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
rpage = BufferGetPage(rbuf);
/*
- * Make sure pages in old root level have valid parent links --- we will
- * need this in _bt_insertonpg() if a concurrent root split happens (see
- * README).
+ * Make sure pages in old root level have valid parent links --- we
+ * will need this in _bt_insertonpg() if a concurrent root split
+ * happens (see README).
*/
((BTPageOpaque) PageGetSpecialPointer(lpage))->btpo_parent =
((BTPageOpaque) PageGetSpecialPointer(rpage))->btpo_parent =
@@ -1264,8 +1277,8 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
pfree(new_item);
/*
- * Create downlink item for right page. The key for it is obtained from
- * the "high key" position in the left page.
+ * Create downlink item for right page. The key for it is obtained
+ * from the "high key" position in the left page.
*/
itemid = PageGetItemId(lpage, P_HIKEY);
itemsz = ItemIdGetLength(itemid);
@@ -1285,26 +1298,26 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
/* XLOG stuff */
{
- xl_btree_newroot xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
+ xl_btree_newroot xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
xlrec.node = rel->rd_node;
xlrec.level = metad->btm_level;
BlockIdSet(&(xlrec.rootblk), rootblknum);
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeNewroot;
rdata[0].next = &(rdata[1]);
- /*
- * Dirrect access to page is not good but faster - we should
+ /*
+ * Dirrect access to page is not good but faster - we should
* implement some new func in page API.
*/
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)rootpage + ((PageHeader) rootpage)->pd_upper;
- rdata[1].len = ((PageHeader)rootpage)->pd_special -
- ((PageHeader)rootpage)->pd_upper;
+ rdata[1].data = (char *) rootpage + ((PageHeader) rootpage)->pd_upper;
+ rdata[1].len = ((PageHeader) rootpage)->pd_special -
+ ((PageHeader) rootpage)->pd_upper;
rdata[1].next = NULL;
recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT, rdata);
@@ -1325,7 +1338,7 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
/* write and let go of metapage buffer */
_bt_wrtbuf(rel, metabuf);
- return(rootbuf);
+ return (rootbuf);
}
/*
@@ -1339,24 +1352,31 @@ _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf)
Buffer
_bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
{
- Buffer rootbuf;
- BlockNumber rootblk;
- Page rootpage;
- XLogRecPtr rootLSN;
- Page oldrootpage = BufferGetPage(oldrootbuf);
- BTPageOpaque oldrootopaque = (BTPageOpaque)
- PageGetSpecialPointer(oldrootpage);
- Buffer buf, leftbuf, rightbuf;
- Page page, leftpage, rightpage;
- BTPageOpaque opaque, leftopaque, rightopaque;
- OffsetNumber newitemoff;
- BTItem btitem, ritem;
- Size itemsz;
-
- if (! P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
+ Buffer rootbuf;
+ BlockNumber rootblk;
+ Page rootpage;
+ XLogRecPtr rootLSN;
+ Page oldrootpage = BufferGetPage(oldrootbuf);
+ BTPageOpaque oldrootopaque = (BTPageOpaque)
+ PageGetSpecialPointer(oldrootpage);
+ Buffer buf,
+ leftbuf,
+ rightbuf;
+ Page page,
+ leftpage,
+ rightpage;
+ BTPageOpaque opaque,
+ leftopaque,
+ rightopaque;
+ OffsetNumber newitemoff;
+ BTItem btitem,
+ ritem;
+ Size itemsz;
+
+ if (!P_LEFTMOST(oldrootopaque) || P_RIGHTMOST(oldrootopaque))
elog(ERROR, "bt_fixroot: not valid old root page");
- /* Read right neighbor and create new root page*/
+ /* Read right neighbor and create new root page */
leftbuf = _bt_getbuf(rel, oldrootopaque->btpo_next, BT_WRITE);
leftpage = BufferGetPage(leftbuf);
leftopaque = (BTPageOpaque) PageGetSpecialPointer(leftpage);
@@ -1377,26 +1397,26 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
*
* If concurrent process will split one of pages on this level then it
* will see either btpo_parent == metablock or btpo_parent == rootblk.
- * In first case it will give up its locks and walk to the leftmost page
- * (oldrootbuf) in _bt_fixup() - ie it will wait for us and let us
- * continue. In second case it will try to lock rootbuf keeping its locks
- * on buffers we already passed, also waiting for us. If we'll have to
- * unlock rootbuf (split it) and that process will have to split page
- * of new level we created (level of rootbuf) then it will wait while
- * we create upper level. Etc.
+ * In first case it will give up its locks and walk to the leftmost
+ * page (oldrootbuf) in _bt_fixup() - ie it will wait for us and let
+ * us continue. In second case it will try to lock rootbuf keeping its
+ * locks on buffers we already passed, also waiting for us. If we'll
+ * have to unlock rootbuf (split it) and that process will have to
+ * split page of new level we created (level of rootbuf) then it will
+ * wait while we create upper level. Etc.
*/
- while(! P_RIGHTMOST(leftopaque))
+ while (!P_RIGHTMOST(leftopaque))
{
rightbuf = _bt_getbuf(rel, leftopaque->btpo_next, BT_WRITE);
rightpage = BufferGetPage(rightbuf);
rightopaque = (BTPageOpaque) PageGetSpecialPointer(rightpage);
/*
- * Update LSN & StartUpID of child page buffer to ensure that
- * it will be written on disk after flushing log record for new
- * root creation. Unfortunately, for the moment (?) we do not
- * log this operation and so possibly break our rule to log entire
- * page content on first after checkpoint modification.
+ * Update LSN & StartUpID of child page buffer to ensure that it
+ * will be written on disk after flushing log record for new root
+ * creation. Unfortunately, for the moment (?) we do not log this
+ * operation and so possibly break our rule to log entire page
+ * content on first after checkpoint modification.
*/
HOLD_INTERRUPTS();
rightopaque->btpo_parent = rootblk;
@@ -1416,17 +1436,17 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
if (PageGetFreeSpace(page) < itemsz)
{
- Buffer newbuf;
- OffsetNumber firstright;
- OffsetNumber itup_off;
- BlockNumber itup_blkno;
- bool newitemonleft;
+ Buffer newbuf;
+ OffsetNumber firstright;
+ OffsetNumber itup_off;
+ BlockNumber itup_blkno;
+ bool newitemonleft;
firstright = _bt_findsplitloc(rel, page,
- newitemoff, itemsz, &newitemonleft);
+ newitemoff, itemsz, &newitemonleft);
newbuf = _bt_split(rel, buf, firstright,
- newitemoff, itemsz, btitem, newitemonleft,
- &itup_off, &itup_blkno);
+ newitemoff, itemsz, btitem, newitemonleft,
+ &itup_off, &itup_blkno);
/* Keep lock on new "root" buffer ! */
if (buf != rootbuf)
_bt_relbuf(rel, buf, BT_WRITE);
@@ -1450,10 +1470,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
/*
* Here we hold locks on old root buffer, new root buffer we've
- * created with _bt_newroot() - rootbuf, - and buf we've used
- * for last insert ops - buf. If rootbuf != buf then we have to
- * create at least one more level. And if "release" is TRUE
- * then we give up oldrootbuf.
+ * created with _bt_newroot() - rootbuf, - and buf we've used for last
+ * insert ops - buf. If rootbuf != buf then we have to create at least
+ * one more level. And if "release" is TRUE then we give up
+ * oldrootbuf.
*/
if (release)
_bt_wrtbuf(rel, oldrootbuf);
@@ -1461,10 +1481,10 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
if (rootbuf != buf)
{
_bt_wrtbuf(rel, buf);
- return(_bt_fixroot(rel, rootbuf, true));
+ return (_bt_fixroot(rel, rootbuf, true));
}
- return(rootbuf);
+ return (rootbuf);
}
/*
@@ -1474,17 +1494,17 @@ _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release)
static void
_bt_fixtree(Relation rel, BlockNumber blkno)
{
- Buffer buf;
- Page page;
- BTPageOpaque opaque;
- BlockNumber pblkno;
+ Buffer buf;
+ Page page;
+ BTPageOpaque opaque;
+ BlockNumber pblkno;
- for ( ; ; )
+ for (;;)
{
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- if (! P_LEFTMOST(opaque) || P_ISLEAF(opaque))
+ if (!P_LEFTMOST(opaque) || P_ISLEAF(opaque))
elog(ERROR, "bt_fixtree[%s]: invalid start page (need to recreate index)", RelationGetRelationName(rel));
pblkno = opaque->btpo_parent;
@@ -1534,25 +1554,26 @@ _bt_fixtree(Relation rel, BlockNumber blkno)
static void
_bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
{
- BlockNumber blkno = BufferGetBlockNumber(buf);
- Page page;
- BTPageOpaque opaque;
- BlockNumber cblkno[3];
- OffsetNumber coff[3];
- Buffer cbuf[3];
- Page cpage[3];
- BTPageOpaque copaque[3];
- BTItem btitem;
- int cidx, i;
- bool goodbye = false;
- char tbuf[BLCKSZ];
+ BlockNumber blkno = BufferGetBlockNumber(buf);
+ Page page;
+ BTPageOpaque opaque;
+ BlockNumber cblkno[3];
+ OffsetNumber coff[3];
+ Buffer cbuf[3];
+ Page cpage[3];
+ BTPageOpaque copaque[3];
+ BTItem btitem;
+ int cidx,
+ i;
+ bool goodbye = false;
+ char tbuf[BLCKSZ];
page = BufferGetPage(buf);
/* copy page to temp storage */
memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_READ);
- page = (Page)tbuf;
+ page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/* Initialize first child data */
@@ -1564,20 +1585,21 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
cbuf[0] = _bt_getbuf(rel, cblkno[0], BT_READ);
cpage[0] = BufferGetPage(cbuf[0]);
copaque[0] = (BTPageOpaque) PageGetSpecialPointer(cpage[0]);
- if (P_LEFTMOST(opaque) && ! P_LEFTMOST(copaque[0]))
+ if (P_LEFTMOST(opaque) && !P_LEFTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: non-leftmost child page of leftmost parent (need to recreate index)", RelationGetRelationName(rel));
/* caller should take care and avoid this */
if (P_RIGHTMOST(copaque[0]))
elog(ERROR, "bt_fixtlevel[%s]: invalid start child (need to recreate index)", RelationGetRelationName(rel));
- for ( ; ; )
+ for (;;)
{
+
/*
- * Read up to 2 more child pages and look for pointers
- * to them in *saved* parent page
+ * Read up to 2 more child pages and look for pointers to them in
+ * *saved* parent page
*/
coff[1] = coff[2] = InvalidOffsetNumber;
- for (cidx = 0; cidx < 2; )
+ for (cidx = 0; cidx < 2;)
{
cidx++;
cblkno[cidx] = (copaque[cidx - 1])->btpo_next;
@@ -1609,20 +1631,20 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
if (coff[1] == InvalidOffsetNumber ||
(cidx == 2 && coff[2] == InvalidOffsetNumber))
{
- Buffer newbuf;
- Page newpage;
- BTPageOpaque newopaque;
- BTItem ritem;
- Size itemsz;
- OffsetNumber newitemoff;
- BlockNumber parblk[3];
- BTStackData stack;
+ Buffer newbuf;
+ Page newpage;
+ BTPageOpaque newopaque;
+ BTItem ritem;
+ Size itemsz;
+ OffsetNumber newitemoff;
+ BlockNumber parblk[3];
+ BTStackData stack;
stack.bts_parent = NULL;
stack.bts_blkno = blkno;
stack.bts_offset = InvalidOffsetNumber;
ItemPointerSet(&(stack.bts_btitem.bti_itup.t_tid),
- cblkno[0], P_HIKEY);
+ cblkno[0], P_HIKEY);
buf = _bt_getstackbuf(rel, &stack, BT_WRITE);
if (buf == InvalidBuffer)
@@ -1644,19 +1666,19 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
if (coff[i] != InvalidOffsetNumber)
{
if (parblk[i] == parblk[i - 1] &&
- coff[i] != coff[i - 1] + 1)
+ coff[i] != coff[i - 1] + 1)
elog(ERROR, "bt_fixlevel[%s]: invalid item order(2) (need to recreate index)", RelationGetRelationName(rel));
continue;
}
/* Have to check next page ? */
- if ((! P_RIGHTMOST(opaque)) &&
- coff[i - 1] == PageGetMaxOffsetNumber(page)) /* yes */
+ if ((!P_RIGHTMOST(opaque)) &&
+ coff[i - 1] == PageGetMaxOffsetNumber(page)) /* yes */
{
newbuf = _bt_getbuf(rel, opaque->btpo_next, BT_WRITE);
newpage = BufferGetPage(newbuf);
newopaque = (BTPageOpaque) PageGetSpecialPointer(newpage);
coff[i] = _bt_getoff(newpage, cblkno[i]);
- if (coff[i] != InvalidOffsetNumber) /* found ! */
+ if (coff[i] != InvalidOffsetNumber) /* found ! */
{
if (coff[i] != P_FIRSTDATAKEY(newopaque))
elog(ERROR, "bt_fixlevel[%s]: invalid item order(3) (need to recreate index)", RelationGetRelationName(rel));
@@ -1673,7 +1695,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
}
/* insert pointer */
ritem = (BTItem) PageGetItem(cpage[i - 1],
- PageGetItemId(cpage[i - 1], P_HIKEY));
+ PageGetItemId(cpage[i - 1], P_HIKEY));
btitem = _bt_formitem(&(ritem->bti_itup));
ItemPointerSet(&(btitem->bti_itup.t_tid), cblkno[i], P_HIKEY);
itemsz = IndexTupleDSize(btitem->bti_itup)
@@ -1684,16 +1706,16 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
if (PageGetFreeSpace(page) < itemsz)
{
- OffsetNumber firstright;
- OffsetNumber itup_off;
- BlockNumber itup_blkno;
- bool newitemonleft;
+ OffsetNumber firstright;
+ OffsetNumber itup_off;
+ BlockNumber itup_blkno;
+ bool newitemonleft;
firstright = _bt_findsplitloc(rel, page,
- newitemoff, itemsz, &newitemonleft);
+ newitemoff, itemsz, &newitemonleft);
newbuf = _bt_split(rel, buf, firstright,
- newitemoff, itemsz, btitem, newitemonleft,
- &itup_off, &itup_blkno);
+ newitemoff, itemsz, btitem, newitemonleft,
+ &itup_off, &itup_blkno);
/* what buffer we need in ? */
if (newitemonleft)
_bt_relbuf(rel, newbuf, BT_WRITE);
@@ -1720,7 +1742,7 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
/* copy page with pointer to cblkno[cidx] to temp storage */
memmove(tbuf, page, PageGetPageSize(page));
_bt_relbuf(rel, buf, BT_WRITE);
- page = (Page)tbuf;
+ page = (Page) tbuf;
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
}
@@ -1760,18 +1782,19 @@ _bt_fixlevel(Relation rel, Buffer buf, BlockNumber limit)
* but it doesn't guarantee full consistency of tree.)
*/
static void
-_bt_fixbranch(Relation rel, BlockNumber lblkno,
- BlockNumber rblkno, BTStack true_stack)
+_bt_fixbranch(Relation rel, BlockNumber lblkno,
+ BlockNumber rblkno, BTStack true_stack)
{
- BlockNumber blkno = true_stack->bts_blkno;
- BTStackData stack;
- BTPageOpaque opaque;
- Buffer buf, rbuf;
- Page page;
- OffsetNumber offnum;
+ BlockNumber blkno = true_stack->bts_blkno;
+ BTStackData stack;
+ BTPageOpaque opaque;
+ Buffer buf,
+ rbuf;
+ Page page;
+ OffsetNumber offnum;
true_stack = true_stack->bts_parent;
- for ( ; ; )
+ for (;;)
{
buf = _bt_getbuf(rel, blkno, BT_READ);
@@ -1779,8 +1802,8 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
_bt_fixlevel(rel, buf, rblkno);
/*
- * Here parent level should have pointers for both
- * lblkno and rblkno and we have to find them.
+ * Here parent level should have pointers for both lblkno and
+ * rblkno and we have to find them.
*/
stack.bts_parent = NULL;
stack.bts_blkno = blkno;
@@ -1792,7 +1815,7 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
page = BufferGetPage(buf);
offnum = _bt_getoff(page, rblkno);
- if (offnum != InvalidOffsetNumber) /* right pointer found */
+ if (offnum != InvalidOffsetNumber) /* right pointer found */
{
if (offnum <= stack.bts_offset)
elog(ERROR, "bt_fixbranch[%s]: invalid item order (need to recreate index)", RelationGetRelationName(rel));
@@ -1829,10 +1852,10 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
}
/*
- * Well, we are on the level that was root or unexistent when
- * we started traversing tree down. If btpo_parent is updated
- * then we'll use it to continue, else we'll fix/restore upper
- * levels entirely.
+ * Well, we are on the level that was root or unexistent when we
+ * started traversing tree down. If btpo_parent is updated then
+ * we'll use it to continue, else we'll fix/restore upper levels
+ * entirely.
*/
if (!BTreeInvalidParent(opaque))
{
@@ -1874,18 +1897,18 @@ _bt_fixbranch(Relation rel, BlockNumber lblkno,
static void
_bt_fixup(Relation rel, Buffer buf)
{
- Page page;
- BTPageOpaque opaque;
- BlockNumber blkno;
+ Page page;
+ BTPageOpaque opaque;
+ BlockNumber blkno;
- for ( ; ; )
+ for (;;)
{
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
- * If someone else already created parent pages
- * then it's time for _bt_fixtree() to check upper
- * levels and fix them, if required.
+ * If someone else already created parent pages then it's time for
+ * _bt_fixtree() to check upper levels and fix them, if required.
*/
if (!BTreeInvalidParent(opaque))
{
@@ -1904,13 +1927,12 @@ _bt_fixup(Relation rel, Buffer buf)
}
/*
- * Ok, we are on the leftmost page, it's write locked
- * by us and its btpo_parent points to meta page - time
- * for _bt_fixroot().
+ * Ok, we are on the leftmost page, it's write locked by us and its
+ * btpo_parent points to meta page - time for _bt_fixroot().
*/
elog(NOTICE, "bt_fixup[%s]: fixing root page", RelationGetRelationName(rel));
- buf = _bt_fixroot(rel, buf, true);
- _bt_relbuf(rel, buf, BT_WRITE);
+ buf = _bt_fixroot(rel, buf, true);
+ _bt_relbuf(rel, buf, BT_WRITE);
return;
}
@@ -1918,23 +1940,23 @@ _bt_fixup(Relation rel, Buffer buf)
static OffsetNumber
_bt_getoff(Page page, BlockNumber blkno)
{
- BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
- OffsetNumber offnum = P_FIRSTDATAKEY(opaque);
- BlockNumber curblkno;
- ItemId itemid;
- BTItem item;
-
- for ( ; offnum <= maxoff; offnum++)
+ BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+ OffsetNumber maxoff = PageGetMaxOffsetNumber(page);
+ OffsetNumber offnum = P_FIRSTDATAKEY(opaque);
+ BlockNumber curblkno;
+ ItemId itemid;
+ BTItem item;
+
+ for (; offnum <= maxoff; offnum++)
{
itemid = PageGetItemId(page, offnum);
item = (BTItem) PageGetItem(page, itemid);
curblkno = ItemPointerGetBlockNumber(&(item->bti_itup.t_tid));
if (curblkno == blkno)
- return(offnum);
+ return (offnum);
}
- return(InvalidOffsetNumber);
+ return (InvalidOffsetNumber);
}
/*
@@ -1961,9 +1983,9 @@ _bt_pgaddtup(Relation rel,
const char *where)
{
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- BTItemData truncitem;
+ BTItemData truncitem;
- if (! P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
+ if (!P_ISLEAF(opaque) && itup_off == P_FIRSTDATAKEY(opaque))
{
memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData);
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 4c854fe7913..460d6c834c1 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.50 2001/02/07 23:35:33 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.51 2001/03/22 03:59:14 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -28,7 +28,7 @@
#include "miscadmin.h"
#include "storage/lmgr.h"
-extern bool FixBTree; /* comments in nbtree.c */
+extern bool FixBTree; /* comments in nbtree.c */
extern Buffer _bt_fixroot(Relation rel, Buffer oldrootbuf, bool release);
/*
@@ -100,7 +100,7 @@ _bt_metapinit(Relation rel)
*
* The access type parameter (BT_READ or BT_WRITE) controls whether
* a new root page will be created or not. If access = BT_READ,
- * and no root page exists, we just return InvalidBuffer. For
+ * and no root page exists, we just return InvalidBuffer. For
* BT_WRITE, we try to create the root page if it doesn't exist.
* NOTE that the returned root page will have only a read lock set
* on it even if access = BT_WRITE!
@@ -178,20 +178,20 @@ _bt_getroot(Relation rel, int access)
/* XLOG stuff */
{
- xl_btree_newroot xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata;
+ xl_btree_newroot xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata;
xlrec.node = rel->rd_node;
xlrec.level = 1;
BlockIdSet(&(xlrec.rootblk), rootblkno);
rdata.buffer = InvalidBuffer;
- rdata.data = (char*)&xlrec;
+ rdata.data = (char *) &xlrec;
rdata.len = SizeOfBtreeNewroot;
rdata.next = NULL;
recptr = XLogInsert(RM_BTREE_ID,
- XLOG_BTREE_NEWROOT|XLOG_BTREE_LEAF, &rdata);
+ XLOG_BTREE_NEWROOT | XLOG_BTREE_LEAF, &rdata);
PageSetLSN(rootpage, recptr);
PageSetSUI(rootpage, ThisStartUpID);
@@ -212,6 +212,7 @@ _bt_getroot(Relation rel, int access)
}
else
{
+
/*
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
@@ -232,30 +233,31 @@ _bt_getroot(Relation rel, int access)
/*
* Race condition: If the root page split between the time we looked
* at the metadata page and got the root buffer, then we got the wrong
- * buffer. Release it and try again.
+ * buffer. Release it and try again.
*/
rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
- if (! P_ISROOT(rootopaque))
+ if (!P_ISROOT(rootopaque))
{
+
/*
- * It happened, but if root page splitter failed to create
- * new root page then we'll go in loop trying to call
- * _bt_getroot again and again.
+ * It happened, but if root page splitter failed to create new
+ * root page then we'll go in loop trying to call _bt_getroot
+ * again and again.
*/
if (FixBTree)
{
- Buffer newrootbuf;
+ Buffer newrootbuf;
-check_parent:;
- if (BTreeInvalidParent(rootopaque)) /* unupdated! */
+ check_parent:;
+ if (BTreeInvalidParent(rootopaque)) /* unupdated! */
{
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_WRITE);
/* handle concurrent fix of root page */
- if (BTreeInvalidParent(rootopaque)) /* unupdated! */
+ if (BTreeInvalidParent(rootopaque)) /* unupdated! */
{
elog(NOTICE, "bt_getroot[%s]: fixing root page", RelationGetRelationName(rel));
newrootbuf = _bt_fixroot(rel, rootbuf, true);
@@ -266,20 +268,22 @@ check_parent:;
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
/* New root might be splitted while changing lock */
if (P_ISROOT(rootopaque))
- return(rootbuf);
+ return (rootbuf);
/* rootbuf is read locked */
goto check_parent;
}
- else /* someone else already fixed root */
+ else
+/* someone else already fixed root */
{
LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
LockBuffer(rootbuf, BT_READ);
}
}
+
/*
- * Ok, here we have old root page with btpo_parent pointing
- * to upper level - check parent page because of there is
- * good chance that parent is root page.
+ * Ok, here we have old root page with btpo_parent pointing to
+ * upper level - check parent page because of there is good
+ * chance that parent is root page.
*/
newrootbuf = _bt_getbuf(rel, rootopaque->btpo_parent, BT_READ);
_bt_relbuf(rel, rootbuf, BT_READ);
@@ -287,7 +291,7 @@ check_parent:;
rootpage = BufferGetPage(rootbuf);
rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
if (P_ISROOT(rootopaque))
- return(rootbuf);
+ return (rootbuf);
/* no luck -:( */
}
@@ -366,7 +370,7 @@ _bt_relbuf(Relation rel, Buffer buf, int access)
* and a pin on the buffer.
*
* NOTE: actually, the buffer manager just marks the shared buffer page
- * dirty here, the real I/O happens later. Since we can't persuade the
+ * dirty here, the real I/O happens later. Since we can't persuade the
* Unix kernel to schedule disk writes in a particular order, there's not
* much point in worrying about this. The most we can say is that all the
* writes will occur before commit.
@@ -468,14 +472,14 @@ _bt_pagedel(Relation rel, ItemPointer tid)
PageIndexTupleDelete(page, offno);
/* XLOG stuff */
{
- xl_btree_delete xlrec;
- XLogRecPtr recptr;
- XLogRecData rdata[2];
+ xl_btree_delete xlrec;
+ XLogRecPtr recptr;
+ XLogRecData rdata[2];
xlrec.target.node = rel->rd_node;
xlrec.target.tid = *tid;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = SizeOfBtreeDelete;
rdata[0].next = &(rdata[1]);
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index f02dfcbd128..97d99da4fde 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.78 2001/02/07 23:35:33 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.79 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,7 +30,8 @@
bool BuildingBtree = false; /* see comment in btbuild() */
bool FastBuild = true; /* use sort/build instead */
- /* of insertion build */
+
+ /* of insertion build */
/*
@@ -52,12 +53,14 @@ static void _bt_restscan(IndexScanDesc scan);
Datum
btbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -69,9 +72,11 @@ btbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@@ -79,15 +84,16 @@ btbuild(PG_FUNCTION_ARGS)
BTItem btitem;
bool usefast;
Snapshot snapshot;
- TransactionId XmaxRecent;
+ TransactionId XmaxRecent;
+
/*
- * spool2 is needed only when the index is an unique index.
- * Dead tuples are put into spool2 instead of spool in
- * order to avoid uniqueness check.
+ * spool2 is needed only when the index is an unique index. Dead
+ * tuples are put into spool2 instead of spool in order to avoid
+ * uniqueness check.
*/
- BTSpool *spool2 = NULL;
+ BTSpool *spool2 = NULL;
bool tupleIsAlive;
- int dead_count;
+ int dead_count;
/* note that this is a new btree */
BuildingBtree = true;
@@ -103,7 +109,7 @@ btbuild(PG_FUNCTION_ARGS)
#ifdef BTREE_BUILD_STATS
if (Show_btree_build_stats)
ResetUsage();
-#endif /* BTREE_BUILD_STATS */
+#endif /* BTREE_BUILD_STATS */
/* initialize the btree index metadata page (if this is a new index) */
if (oldPred == NULL)
@@ -155,10 +161,10 @@ btbuild(PG_FUNCTION_ARGS)
if (usefast)
{
spool = _bt_spoolinit(index, indexInfo->ii_Unique);
+
/*
- * Different from spool,the uniqueness isn't checked
- * for spool2.
- */
+ * Different from spool,the uniqueness isn't checked for spool2.
+ */
if (indexInfo->ii_Unique)
spool2 = _bt_spoolinit(index, false);
}
@@ -187,12 +193,13 @@ btbuild(PG_FUNCTION_ARGS)
}
else
tupleIsAlive = true;
-
+
MemoryContextReset(econtext->ecxt_per_tuple_memory);
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -253,8 +260,7 @@ btbuild(PG_FUNCTION_ARGS)
* btree pages - NULLs greater NOT_NULLs and NULL = NULL is TRUE.
* Sure, it's just rule for placing/finding items and no more -
* keytest'll return FALSE for a = 5 for items having 'a' isNULL.
- * Look at _bt_compare for how it works.
- * - vadim 03/23/97
+ * Look at _bt_compare for how it works. - vadim 03/23/97
*
* if (itup->t_info & INDEX_NULL_MASK) { pfree(itup); continue; }
*/
@@ -271,7 +277,8 @@ btbuild(PG_FUNCTION_ARGS)
{
if (tupleIsAlive || !spool2)
_bt_spool(btitem, spool);
- else /* dead tuples are put into spool2 */
+ else
+/* dead tuples are put into spool2 */
{
dead_count++;
_bt_spool(btitem, spool2);
@@ -288,7 +295,7 @@ btbuild(PG_FUNCTION_ARGS)
/* okay, all heap tuples are indexed */
heap_endscan(hscan);
- if (spool2 && !dead_count) /* spool2 was found to be unnecessary */
+ if (spool2 && !dead_count) /* spool2 was found to be unnecessary */
{
_bt_spooldestroy(spool2);
spool2 = NULL;
@@ -296,9 +303,7 @@ btbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -322,7 +327,7 @@ btbuild(PG_FUNCTION_ARGS)
ShowUsage();
ResetUsage();
}
-#endif /* BTREE_BUILD_STATS */
+#endif /* BTREE_BUILD_STATS */
/*
* Since we just counted the tuples in the heap, we update its stats
@@ -368,11 +373,11 @@ btbuild(PG_FUNCTION_ARGS)
Datum
btinsert(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
InsertIndexResult res;
BTItem btitem;
IndexTuple itup;
@@ -396,8 +401,8 @@ btinsert(PG_FUNCTION_ARGS)
Datum
btgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/*
@@ -408,10 +413,11 @@ btgettuple(PG_FUNCTION_ARGS)
if (ItemPointerIsValid(&(scan->currentItemData)))
{
+
/*
* Restore scan position using heap TID returned by previous call
- * to btgettuple(). _bt_restscan() re-grabs the read lock on
- * the buffer, too.
+ * to btgettuple(). _bt_restscan() re-grabs the read lock on the
+ * buffer, too.
*/
_bt_restscan(scan);
res = _bt_next(scan, dir);
@@ -421,8 +427,8 @@ btgettuple(PG_FUNCTION_ARGS)
/*
* Save heap TID to use it in _bt_restscan. Then release the read
- * lock on the buffer so that we aren't blocking other backends.
- * NOTE: we do keep the pin on the buffer!
+ * lock on the buffer so that we aren't blocking other backends. NOTE:
+ * we do keep the pin on the buffer!
*/
if (res)
{
@@ -461,11 +467,13 @@ btbeginscan(PG_FUNCTION_ARGS)
Datum
btrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED /* XXX surely it's wrong to ignore this? */
- bool fromEnd = PG_GETARG_BOOL(1);
+ bool fromEnd = PG_GETARG_BOOL(1);
+
#endif
- ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
+ ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
ItemPointer iptr;
BTScanOpaque so;
@@ -540,7 +548,7 @@ btmovescan(IndexScanDesc scan, Datum v)
Datum
btendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
BTScanOpaque so;
@@ -578,7 +586,7 @@ btendscan(PG_FUNCTION_ARGS)
Datum
btmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
BTScanOpaque so;
@@ -610,7 +618,7 @@ btmarkpos(PG_FUNCTION_ARGS)
Datum
btrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
ItemPointer iptr;
BTScanOpaque so;
@@ -640,8 +648,8 @@ btrestrpos(PG_FUNCTION_ARGS)
Datum
btdelete(PG_FUNCTION_ARGS)
{
- Relation rel = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation rel = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
/* adjust any active scans that will be affected by this deletion */
_bt_adjscans(rel, tid);
@@ -671,8 +679,8 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
- * Get back the read lock we were holding on the buffer.
- * (We still have a reference-count pin on it, though.)
+ * Get back the read lock we were holding on the buffer. (We still
+ * have a reference-count pin on it, though.)
*/
LockBuffer(buf, BT_READ);
@@ -689,13 +697,13 @@ _bt_restscan(IndexScanDesc scan)
if (!ItemPointerIsValid(&target))
{
ItemPointerSetOffsetNumber(current,
- OffsetNumberPrev(P_FIRSTDATAKEY(opaque)));
+ OffsetNumberPrev(P_FIRSTDATAKEY(opaque)));
return;
}
/*
- * The item we were on may have moved right due to insertions.
- * Find it again.
+ * The item we were on may have moved right due to insertions. Find it
+ * again.
*/
for (;;)
{
@@ -717,7 +725,8 @@ _bt_restscan(IndexScanDesc scan)
}
/*
- * By here, the item we're looking for moved right at least one page
+ * By here, the item we're looking for moved right at least one
+ * page
*/
if (P_RIGHTMOST(opaque))
elog(FATAL, "_bt_restscan: my bits moved right off the end of the world!"
@@ -742,14 +751,14 @@ _bt_restore_page(Page page, char *from, int len)
Size itemsz;
char *end = from + len;
- for ( ; from < end; )
+ for (; from < end;)
{
memcpy(&btdata, from, sizeof(BTItemData));
itemsz = IndexTupleDSize(btdata.bti_itup) +
- (sizeof(BTItemData) - sizeof(IndexTupleData));
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
itemsz = MAXALIGN(itemsz);
if (PageAddItem(page, (Item) from, itemsz,
- FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
+ FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(STOP, "_bt_restore_page: can't add item to page");
from += itemsz;
}
@@ -758,20 +767,20 @@ _bt_restore_page(Page page, char *from, int len)
static void
btree_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_delete *xlrec;
- Relation reln;
- Buffer buffer;
- Page page;
+ xl_btree_delete *xlrec;
+ Relation reln;
+ Buffer buffer;
+ Page page;
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
return;
- xlrec = (xl_btree_delete*) XLogRecGetData(record);
+ xlrec = (xl_btree_delete *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ buffer = XLogReadBuffer(false, reln,
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "btree_delete_redo: block unfound");
page = (Page) BufferGetPage(buffer);
@@ -796,21 +805,21 @@ btree_xlog_delete(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_insert *xlrec;
- Relation reln;
- Buffer buffer;
- Page page;
- BTPageOpaque pageop;
+ xl_btree_insert *xlrec;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ BTPageOpaque pageop;
if (redo && (record->xl_info & XLR_BKP_BLOCK_1))
return;
- xlrec = (xl_btree_insert*) XLogRecGetData(record);
+ xlrec = (xl_btree_insert *) XLogRecGetData(record);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
return;
- buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ buffer = XLogReadBuffer(false, reln,
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(STOP, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
page = (Page) BufferGetPage(buffer);
@@ -825,11 +834,11 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
UnlockAndReleaseBuffer(buffer);
return;
}
- if (PageAddItem(page, (Item)((char*)xlrec + SizeOfBtreeInsert),
- record->xl_len - SizeOfBtreeInsert,
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
- LP_USED) == InvalidOffsetNumber)
- elog(STOP, "btree_insert_redo: failed to add item");
+ if (PageAddItem(page, (Item) ((char *) xlrec + SizeOfBtreeInsert),
+ record->xl_len - SizeOfBtreeInsert,
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ LP_USED) == InvalidOffsetNumber)
+ elog(STOP, "btree_insert_redo: failed to add item");
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
@@ -840,7 +849,7 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_insert_undo: bad page LSN");
- if (! P_ISLEAF(pageop))
+ if (!P_ISLEAF(pageop))
{
UnlockAndReleaseBuffer(buffer);
return;
@@ -855,14 +864,14 @@ btree_xlog_insert(bool redo, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_split *xlrec = (xl_btree_split*) XLogRecGetData(record);
- Relation reln;
- BlockNumber blkno;
- Buffer buffer;
- Page page;
- BTPageOpaque pageop;
- char *op = (redo) ? "redo" : "undo";
- bool isleaf = (record->xl_info & XLOG_BTREE_LEAF);
+ xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
+ Relation reln;
+ BlockNumber blkno;
+ Buffer buffer;
+ Page page;
+ BTPageOpaque pageop;
+ char *op = (redo) ? "redo" : "undo";
+ bool isleaf = (record->xl_info & XLOG_BTREE_LEAF);
reln = XLogOpenRelation(redo, RM_BTREE_ID, xlrec->target.node);
if (!RelationIsValid(reln))
@@ -870,7 +879,7 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
/* Left (original) sibling */
blkno = (onleft) ? ItemPointerGetBlockNumber(&(xlrec->target.tid)) :
- BlockIdGetBlockNumber(&(xlrec->otherblk));
+ BlockIdGetBlockNumber(&(xlrec->otherblk));
buffer = XLogReadBuffer(false, reln, blkno);
if (!BufferIsValid(buffer))
elog(STOP, "btree_split_%s: lost left sibling", op);
@@ -892,13 +901,14 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
pageop->btpo_next = ItemPointerGetBlockNumber(&(xlrec->target.tid));
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
- _bt_restore_page(page, (char*)xlrec + SizeOfBtreeSplit, xlrec->leftlen);
+ _bt_restore_page(page, (char *) xlrec + SizeOfBtreeSplit, xlrec->leftlen);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
- else /* undo */
+ else
+/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad left sibling LSN");
@@ -906,8 +916,8 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
}
/* Right (new) sibling */
- blkno = (onleft) ? BlockIdGetBlockNumber(&(xlrec->otherblk)) :
- ItemPointerGetBlockNumber(&(xlrec->target.tid));
+ blkno = (onleft) ? BlockIdGetBlockNumber(&(xlrec->otherblk)) :
+ ItemPointerGetBlockNumber(&(xlrec->target.tid));
buffer = XLogReadBuffer((redo) ? true : false, reln, blkno);
if (!BufferIsValid(buffer))
elog(STOP, "btree_split_%s: lost right sibling", op);
@@ -922,21 +932,22 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
if (redo)
{
pageop->btpo_parent = BlockIdGetBlockNumber(&(xlrec->parentblk));
- pageop->btpo_prev = (onleft) ?
- ItemPointerGetBlockNumber(&(xlrec->target.tid)) :
- BlockIdGetBlockNumber(&(xlrec->otherblk));
+ pageop->btpo_prev = (onleft) ?
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)) :
+ BlockIdGetBlockNumber(&(xlrec->otherblk));
pageop->btpo_next = BlockIdGetBlockNumber(&(xlrec->rightblk));
pageop->btpo_flags = (isleaf) ? BTP_LEAF : 0;
_bt_restore_page(page,
- (char*)xlrec + SizeOfBtreeSplit + xlrec->leftlen,
- record->xl_len - SizeOfBtreeSplit - xlrec->leftlen);
+ (char *) xlrec + SizeOfBtreeSplit + xlrec->leftlen,
+ record->xl_len - SizeOfBtreeSplit - xlrec->leftlen);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
}
- else /* undo */
+ else
+/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
elog(STOP, "btree_split_undo: bad right sibling LSN");
@@ -965,9 +976,9 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
return;
}
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
- pageop->btpo_prev = (onleft) ?
- BlockIdGetBlockNumber(&(xlrec->otherblk)) :
- ItemPointerGetBlockNumber(&(xlrec->target.tid));
+ pageop->btpo_prev = (onleft) ?
+ BlockIdGetBlockNumber(&(xlrec->otherblk)) :
+ ItemPointerGetBlockNumber(&(xlrec->target.tid));
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
@@ -977,14 +988,14 @@ btree_xlog_split(bool redo, bool onleft, XLogRecPtr lsn, XLogRecord *record)
static void
btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
{
- xl_btree_newroot *xlrec = (xl_btree_newroot*) XLogRecGetData(record);
- Relation reln;
- Buffer buffer;
- Page page;
- BTPageOpaque pageop;
- Buffer metabuf;
- Page metapg;
- BTMetaPageData md;
+ xl_btree_newroot *xlrec = (xl_btree_newroot *) XLogRecGetData(record);
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ BTPageOpaque pageop;
+ Buffer metabuf;
+ Page metapg;
+ BTMetaPageData md;
if (!redo)
return;
@@ -1011,8 +1022,8 @@ btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
if (record->xl_len > SizeOfBtreeNewroot)
_bt_restore_page(page,
- (char*)xlrec + SizeOfBtreeNewroot,
- record->xl_len - SizeOfBtreeNewroot);
+ (char *) xlrec + SizeOfBtreeNewroot,
+ record->xl_len - SizeOfBtreeNewroot);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
@@ -1037,7 +1048,7 @@ btree_xlog_newroot(bool redo, XLogRecPtr lsn, XLogRecord *record)
void
btree_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_DELETE)
@@ -1045,9 +1056,9 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_BTREE_INSERT)
btree_xlog_insert(true, lsn, record);
else if (info == XLOG_BTREE_SPLIT)
- btree_xlog_split(true, false, lsn, record); /* new item on the right */
+ btree_xlog_split(true, false, lsn, record); /* new item on the right */
else if (info == XLOG_BTREE_SPLEFT)
- btree_xlog_split(true, true, lsn, record); /* new item on the left */
+ btree_xlog_split(true, true, lsn, record); /* new item on the left */
else if (info == XLOG_BTREE_NEWROOT)
btree_xlog_newroot(true, lsn, record);
else
@@ -1057,7 +1068,7 @@ btree_redo(XLogRecPtr lsn, XLogRecord *record)
void
btree_undo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_DELETE)
@@ -1065,9 +1076,9 @@ btree_undo(XLogRecPtr lsn, XLogRecord *record)
else if (info == XLOG_BTREE_INSERT)
btree_xlog_insert(false, lsn, record);
else if (info == XLOG_BTREE_SPLIT)
- btree_xlog_split(false, false, lsn, record);/* new item on the right */
+ btree_xlog_split(false, false, lsn, record); /* new item on the right */
else if (info == XLOG_BTREE_SPLEFT)
- btree_xlog_split(false, true, lsn, record); /* new item on the left */
+ btree_xlog_split(false, true, lsn, record); /* new item on the left */
else if (info == XLOG_BTREE_NEWROOT)
btree_xlog_newroot(false, lsn, record);
else
@@ -1078,45 +1089,49 @@ static void
out_target(char *buf, xl_btreetid *target)
{
sprintf(buf + strlen(buf), "node %u/%u; tid %u/%u",
- target->node.tblNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.tblNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
-
+
void
-btree_desc(char *buf, uint8 xl_info, char* rec)
+btree_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
info &= ~XLOG_BTREE_LEAF;
if (info == XLOG_BTREE_INSERT)
{
- xl_btree_insert *xlrec = (xl_btree_insert*) rec;
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+
strcat(buf, "insert: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_DELETE)
{
- xl_btree_delete *xlrec = (xl_btree_delete*) rec;
+ xl_btree_delete *xlrec = (xl_btree_delete *) rec;
+
strcat(buf, "delete: ");
out_target(buf, &(xlrec->target));
}
else if (info == XLOG_BTREE_SPLIT || info == XLOG_BTREE_SPLEFT)
{
- xl_btree_split *xlrec = (xl_btree_split*) rec;
- sprintf(buf + strlen(buf), "split(%s): ",
- (info == XLOG_BTREE_SPLIT) ? "right" : "left");
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
+
+ sprintf(buf + strlen(buf), "split(%s): ",
+ (info == XLOG_BTREE_SPLIT) ? "right" : "left");
out_target(buf, &(xlrec->target));
sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- BlockIdGetBlockNumber(&xlrec->otherblk),
- BlockIdGetBlockNumber(&xlrec->rightblk));
+ BlockIdGetBlockNumber(&xlrec->otherblk),
+ BlockIdGetBlockNumber(&xlrec->rightblk));
}
else if (info == XLOG_BTREE_NEWROOT)
{
- xl_btree_newroot *xlrec = (xl_btree_newroot*) rec;
+ xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
+
sprintf(buf + strlen(buf), "root: node %u/%u; blk %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- BlockIdGetBlockNumber(&xlrec->rootblk));
+ xlrec->node.tblNode, xlrec->node.relNode,
+ BlockIdGetBlockNumber(&xlrec->rootblk));
}
else
strcat(buf, "UNKNOWN");
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 6f41ab9c847..d8b8e0682a0 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.63 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.64 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,20 +32,20 @@ static RetrieveIndexResult _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
*
* NOTE that the returned buffer is read-locked regardless of the access
* parameter. However, access = BT_WRITE will allow an empty root page
- * to be created and returned. When access = BT_READ, an empty index
+ * to be created and returned. When access = BT_READ, an empty index
* will result in *bufP being set to InvalidBuffer.
*/
BTStack
_bt_search(Relation rel, int keysz, ScanKey scankey,
Buffer *bufP, int access)
{
- BTStack stack_in = NULL;
+ BTStack stack_in = NULL;
/* Get the root page to start with */
*bufP = _bt_getroot(rel, access);
/* If index is empty and access = BT_READ, no root page is created. */
- if (! BufferIsValid(*bufP))
+ if (!BufferIsValid(*bufP))
return (BTStack) NULL;
/* Loop iterates once per level descended in the tree */
@@ -79,13 +79,13 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
par_blkno = BufferGetBlockNumber(*bufP);
/*
- * We need to save the bit image of the index entry we chose in the
- * parent page on a stack. In case we split the tree, we'll use this
- * bit image to figure out what our real parent page is, in case the
- * parent splits while we're working lower in the tree. See the paper
- * by Lehman and Yao for how this is detected and handled. (We use the
- * child link to disambiguate duplicate keys in the index -- Lehman
- * and Yao disallow duplicate keys.)
+ * We need to save the bit image of the index entry we chose in
+ * the parent page on a stack. In case we split the tree, we'll
+ * use this bit image to figure out what our real parent page is,
+ * in case the parent splits while we're working lower in the
+ * tree. See the paper by Lehman and Yao for how this is detected
+ * and handled. (We use the child link to disambiguate duplicate
+ * keys in the index -- Lehman and Yao disallow duplicate keys.)
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
@@ -98,9 +98,9 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*bufP = _bt_getbuf(rel, blkno, BT_READ);
/*
- * Race -- the page we just grabbed may have split since we read its
- * pointer in the parent. If it has, we may need to move right to its
- * new sibling. Do that.
+ * Race -- the page we just grabbed may have split since we read
+ * its pointer in the parent. If it has, we may need to move
+ * right to its new sibling. Do that.
*/
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
@@ -127,7 +127,7 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
*
* On entry, we have the buffer pinned and a lock of the proper type.
* If we move right, we release the buffer and lock and acquire the
- * same on the right sibling. Return value is the buffer we stop at.
+ * same on the right sibling. Return value is the buffer we stop at.
*/
Buffer
_bt_moveright(Relation rel,
@@ -153,7 +153,7 @@ _bt_moveright(Relation rel,
_bt_compare(rel, keysz, scankey, page, P_HIKEY) > 0)
{
/* step right one page */
- BlockNumber rblkno = opaque->btpo_next;
+ BlockNumber rblkno = opaque->btpo_next;
_bt_relbuf(rel, buf, access);
buf = _bt_getbuf(rel, rblkno, access);
@@ -184,7 +184,7 @@ _bt_moveright(Relation rel,
* find all leaf keys >= given scankey.
*
* This procedure is not responsible for walking right, it just examines
- * the given page. _bt_binsrch() has no lock or refcount side effects
+ * the given page. _bt_binsrch() has no lock or refcount side effects
* on the buffer.
*/
OffsetNumber
@@ -299,7 +299,7 @@ _bt_compare(Relation rel,
* Force result ">" if target item is first data item on an internal
* page --- see NOTE above.
*/
- if (! P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
+ if (!P_ISLEAF(opaque) && offnum == P_FIRSTDATAKEY(opaque))
return 1;
btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum));
@@ -327,7 +327,7 @@ _bt_compare(Relation rel,
datum = index_getattr(itup, entry->sk_attno, itupdesc, &isNull);
/* see comments about NULLs handling in btbuild */
- if (entry->sk_flags & SK_ISNULL) /* key is NULL */
+ if (entry->sk_flags & SK_ISNULL) /* key is NULL */
{
if (isNull)
result = 0; /* NULL "=" NULL */
@@ -458,10 +458,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
_bt_orderkeys(rel, so);
/*
- * Quit now if _bt_orderkeys() discovered that the scan keys can
- * never be satisfied (eg, x == 1 AND x > 2).
+ * Quit now if _bt_orderkeys() discovered that the scan keys can never
+ * be satisfied (eg, x == 1 AND x > 2).
*/
- if (! so->qual_ok)
+ if (!so->qual_ok)
return (RetrieveIndexResult) NULL;
/*
@@ -484,17 +484,16 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
strat = _bt_getstrat(rel, attno,
so->keyData[i].sk_procedure);
+
/*
* Can we use this key as a starting boundary for this attr?
*
- * We can use multiple keys if they look like, say, = >= =
- * but we have to stop after accepting a > or < boundary.
+ * We can use multiple keys if they look like, say, = >= = but we
+ * have to stop after accepting a > or < boundary.
*/
if (strat == strat_total ||
strat == BTEqualStrategyNumber)
- {
nKeyIs[keysCount++] = i;
- }
else if (ScanDirectionIsBackward(dir) &&
(strat == BTLessStrategyNumber ||
strat == BTLessEqualStrategyNumber))
@@ -536,7 +535,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
for (i = 0; i < keysCount; i++)
{
j = nKeyIs[i];
- /* _bt_orderkeys disallows it, but it's place to add some code later */
+
+ /*
+ * _bt_orderkeys disallows it, but it's place to add some code
+ * later
+ */
if (so->keyData[j].sk_flags & SK_ISNULL)
{
pfree(nKeyIs);
@@ -562,7 +565,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/* don't need to keep the stack around... */
_bt_freestack(stack);
- if (! BufferIsValid(buf))
+ if (!BufferIsValid(buf))
{
/* Only get here if index is completely empty */
ItemPointerSetInvalid(current);
@@ -601,6 +604,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
switch (strat_total)
{
case BTLessStrategyNumber:
+
/*
* Back up one to arrive at last item < scankey
*/
@@ -612,6 +616,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTLessEqualStrategyNumber:
+
/*
* We need to find the last item <= scankey, so step forward
* till we find one > scankey, then step back one.
@@ -645,9 +650,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTEqualStrategyNumber:
+
/*
- * Make sure we are on the first equal item; might have to step
- * forward if currently at end of page.
+ * Make sure we are on the first equal item; might have to
+ * step forward if currently at end of page.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@@ -661,7 +667,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
result = _bt_compare(rel, keysCount, scankeys, page, offnum);
if (result != 0)
- goto nomatches; /* no equal items! */
+ goto nomatches; /* no equal items! */
+
/*
* If a backward scan was specified, need to start with last
* equal item not first one.
@@ -685,6 +692,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterEqualStrategyNumber:
+
/*
* We want the first item >= scankey, which is where we are...
* unless we're not anywhere at all...
@@ -700,9 +708,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
break;
case BTGreaterStrategyNumber:
+
/*
- * We want the first item > scankey, so make sure we are on
- * an item and then step over any equal items.
+ * We want the first item > scankey, so make sure we are on an
+ * item and then step over any equal items.
*/
if (offnum > PageGetMaxOffsetNumber(page))
{
@@ -850,11 +859,12 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
*bufP = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
* If the adjacent page just split, then we have to walk
- * right to find the block that's now adjacent to where
- * we were. Because pages only split right, we don't have
- * to worry about this failing to terminate.
+ * right to find the block that's now adjacent to where we
+ * were. Because pages only split right, we don't have to
+ * worry about this failing to terminate.
*/
while (opaque->btpo_next != obknum)
{
@@ -912,12 +922,12 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
/*
* Scan down to the leftmost or rightmost leaf page. This is a
- * simplified version of _bt_search(). We don't maintain a stack
+ * simplified version of _bt_search(). We don't maintain a stack
* since we know we won't need it.
*/
buf = _bt_getroot(rel, BT_READ);
- if (! BufferIsValid(buf))
+ if (!BufferIsValid(buf))
{
/* empty index... */
ItemPointerSetInvalid(current);
@@ -981,7 +991,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
Assert(P_RIGHTMOST(opaque));
start = PageGetMaxOffsetNumber(page);
- if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty page */
+ if (start < P_FIRSTDATAKEY(opaque)) /* watch out for empty
+ * page */
start = P_FIRSTDATAKEY(opaque);
}
else
@@ -995,8 +1006,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
so->btso_curbuf = buf;
/*
- * Left/rightmost page could be empty due to deletions,
- * if so step till we find a nonempty page.
+ * Left/rightmost page could be empty due to deletions, if so step
+ * till we find a nonempty page.
*/
if (start > maxoff)
{
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index e9224a485af..2aca6bf7cfc 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -6,7 +6,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
- * for each level. We load source tuples into leaf-level pages.
+ * for each level. We load source tuples into leaf-level pages.
* Whenever we fill a page at one level, we add a link to it to its
* parent level (starting a new parent level if necessary). When
* done, we write out each final page on each level, adding it to
@@ -35,7 +35,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.59 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.60 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@ struct BTSpool
};
/*
- * Status record for a btree page being built. We have one of these
+ * Status record for a btree page being built. We have one of these
* for each active tree level.
*
* The reason we need to store a copy of the minimum key is that we'll
@@ -73,11 +73,13 @@ typedef struct BTPageState
{
Buffer btps_buf; /* current buffer & page */
Page btps_page;
- BTItem btps_minkey; /* copy of minimum key (first item) on page */
+ BTItem btps_minkey; /* copy of minimum key (first item) on
+ * page */
OffsetNumber btps_lastoff; /* last item offset loaded */
int btps_level; /* tree level (0 = leaf) */
- Size btps_full; /* "full" if less than this much free space */
- struct BTPageState *btps_next; /* link to parent level, if any */
+ Size btps_full; /* "full" if less than this much free
+ * space */
+ struct BTPageState *btps_next; /* link to parent level, if any */
} BTPageState;
@@ -92,7 +94,7 @@ static void _bt_blnewpage(Relation index, Buffer *buf, Page *page, int flags);
static BTPageState *_bt_pagestate(Relation index, int flags, int level);
static void _bt_slideleft(Relation index, Buffer buf, Page page);
static void _bt_sortaddtup(Page page, Size itemsize,
- BTItem btitem, OffsetNumber itup_off);
+ BTItem btitem, OffsetNumber itup_off);
static void _bt_buildadd(Relation index, BTPageState *state, BTItem bti);
static void _bt_uppershutdown(Relation index, BTPageState *state);
static void _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2);
@@ -162,7 +164,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
ShowUsage();
ResetUsage();
}
-#endif /* BTREE_BUILD_STATS */
+#endif /* BTREE_BUILD_STATS */
tuplesort_performsort(btspool->sortstate);
if (btspool2)
@@ -269,9 +271,9 @@ _bt_sortaddtup(Page page,
OffsetNumber itup_off)
{
BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
- BTItemData truncitem;
+ BTItemData truncitem;
- if (! P_ISLEAF(opaque) && itup_off == P_FIRSTKEY)
+ if (!P_ISLEAF(opaque) && itup_off == P_FIRSTKEY)
{
memcpy(&truncitem, btitem, sizeof(BTItemData));
truncitem.bti_itup.t_info = sizeof(BTItemData);
@@ -290,7 +292,7 @@ _bt_sortaddtup(Page page,
* We must be careful to observe the page layout conventions of nbtsearch.c:
* - rightmost pages start data items at P_HIKEY instead of at P_FIRSTKEY.
* - on non-leaf pages, the key portion of the first item need not be
- * stored, we should store only the link.
+ * stored, we should store only the link.
*
* A leaf page being built looks like:
*
@@ -347,11 +349,12 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
*/
if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %lu exceeds maximum %ld",
- (unsigned long)btisz,
- (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
+ (unsigned long) btisz,
+ (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData));
if (pgspc < btisz || pgspc < state->btps_full)
{
+
/*
* Item won't fit on this page, or we feel the page is full enough
* already. Finish off the page and write it out.
@@ -388,9 +391,9 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
/*
- * Link the old buffer into its parent, using its minimum key.
- * If we don't have a parent, we have to create one;
- * this adds a new btree level.
+ * Link the old buffer into its parent, using its minimum key. If
+ * we don't have a parent, we have to create one; this adds a new
+ * btree level.
*/
if (state->btps_next == (BTPageState *) NULL)
{
@@ -405,8 +408,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* Save a copy of the minimum key for the new page. We have to
- * copy it off the old page, not the new one, in case we are
- * not at leaf level.
+ * copy it off the old page, not the new one, in case we are not
+ * at leaf level.
*/
state->btps_minkey = _bt_formitem(&(obti->bti_itup));
@@ -414,13 +417,13 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
* Set the sibling links for both pages, and parent links too.
*
* It's not necessary to set the parent link at all, because it's
- * only used for handling concurrent root splits, but we may as well
- * do it as a debugging aid. Note we set new page's link as well
- * as old's, because if the new page turns out to be the last of
- * the level, _bt_uppershutdown won't change it. The links may be
- * out of date by the time the build finishes, but that's OK; they
- * need only point to a left-sibling of the true parent. See the
- * README file for more info.
+ * only used for handling concurrent root splits, but we may as
+ * well do it as a debugging aid. Note we set new page's link as
+ * well as old's, because if the new page turns out to be the last
+ * of the level, _bt_uppershutdown won't change it. The links may
+ * be out of date by the time the build finishes, but that's OK;
+ * they need only point to a left-sibling of the true parent. See
+ * the README file for more info.
*/
{
BTPageOpaque oopaque = (BTPageOpaque) PageGetSpecialPointer(opage);
@@ -434,7 +437,7 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
}
/*
- * Write out the old page. We never want to see it again, so we
+ * Write out the old page. We never want to see it again, so we
* can give up our lock (if we had one; most likely BuildingBtree
* is set, so we aren't locking).
*/
@@ -449,8 +452,8 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
/*
* If the new item is the first for its page, stash a copy for later.
* Note this will only happen for the first item on a level; on later
- * pages, the first item for a page is copied from the prior page
- * in the code above.
+ * pages, the first item for a page is copied from the prior page in
+ * the code above.
*/
if (last_off == P_HIKEY)
{
@@ -493,8 +496,8 @@ _bt_uppershutdown(Relation index, BTPageState *state)
*
* If we're at the top, it's the root, so attach it to the metapage.
* Otherwise, add an entry for it to its parent using its minimum
- * key. This may cause the last page of the parent level to split,
- * but that's not a problem -- we haven't gotten to it yet.
+ * key. This may cause the last page of the parent level to
+ * split, but that's not a problem -- we haven't gotten to it yet.
*/
if (s->btps_next == (BTPageState *) NULL)
{
@@ -513,7 +516,7 @@ _bt_uppershutdown(Relation index, BTPageState *state)
/*
* This is the rightmost page, so the ItemId array needs to be
- * slid back one slot. Then we can dump out the page.
+ * slid back one slot. Then we can dump out the page.
*/
_bt_slideleft(index, s->btps_buf, s->btps_page);
_bt_wrtbuf(index, s->btps_buf);
@@ -529,22 +532,29 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
{
BTPageState *state = NULL;
bool merge = (btspool2 != NULL);
- BTItem bti, bti2 = NULL;
- bool should_free, should_free2, load1;
+ BTItem bti,
+ bti2 = NULL;
+ bool should_free,
+ should_free2,
+ load1;
TupleDesc tupdes = RelationGetDescr(index);
- int i, keysz = RelationGetNumberOfAttributes(index);
+ int i,
+ keysz = RelationGetNumberOfAttributes(index);
ScanKey indexScanKey = NULL;
if (merge)
{
+
/*
- * Another BTSpool for dead tuples exists.
- * Now we have to merge btspool and btspool2.
- */
- ScanKey entry;
- Datum attrDatum1, attrDatum2;
- bool isFirstNull, isSecondNull;
- int32 compare;
+ * Another BTSpool for dead tuples exists. Now we have to merge
+ * btspool and btspool2.
+ */
+ ScanKey entry;
+ Datum attrDatum1,
+ attrDatum2;
+ bool isFirstNull,
+ isSecondNull;
+ int32 compare;
/* the preparation of merge */
bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free);
@@ -552,7 +562,7 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
indexScanKey = _bt_mkscankey_nodata(index);
for (;;)
{
- load1 = true; /* load BTSpool next ? */
+ load1 = true; /* load BTSpool next ? */
if (NULL == bti2)
{
if (NULL == bti)
@@ -564,8 +574,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
for (i = 1; i <= keysz; i++)
{
entry = indexScanKey + i - 1;
- attrDatum1 = index_getattr((IndexTuple)bti, i, tupdes, &isFirstNull);
- attrDatum2 = index_getattr((IndexTuple)bti2, i, tupdes, &isSecondNull);
+ attrDatum1 = index_getattr((IndexTuple) bti, i, tupdes, &isFirstNull);
+ attrDatum2 = index_getattr((IndexTuple) bti2, i, tupdes, &isSecondNull);
if (isFirstNull)
{
if (!isSecondNull)
@@ -586,7 +596,7 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
}
else if (compare < 0)
break;
- }
+ }
}
}
else
@@ -613,7 +623,8 @@ _bt_load(Relation index, BTSpool *btspool, BTSpool *btspool2)
}
_bt_freeskey(indexScanKey);
}
- else /* merge is unnecessary */
+ else
+/* merge is unnecessary */
{
while (bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, true, &should_free), bti != (BTItem) NULL)
{
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 507205f2be7..2a37147d68e 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.42 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.43 2001/03/22 03:59:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -124,7 +124,7 @@ _bt_freestack(BTStack stack)
* Construct a BTItem from a plain IndexTuple.
*
* This is now useless code, since a BTItem *is* an index tuple with
- * no extra stuff. We hang onto it for the moment to preserve the
+ * no extra stuff. We hang onto it for the moment to preserve the
* notational distinction, in case we want to add some extra stuff
* again someday.
*/
@@ -165,7 +165,7 @@ _bt_formitem(IndexTuple itup)
* are "x = 1 AND y < 4 AND z < 5", then _bt_checkkeys will reject a tuple
* (1,2,7), but we must continue the scan in case there are tuples (1,3,z).
* But once we reach tuples like (1,4,z) we can stop scanning because no
- * later tuples could match. This is reflected by setting
+ * later tuples could match. This is reflected by setting
* so->numberOfRequiredKeys to the number of leading keys that must be
* matched to continue the scan. numberOfRequiredKeys is equal to the
* number of leading "=" keys plus the key(s) for the first non "="
@@ -178,7 +178,7 @@ _bt_formitem(IndexTuple itup)
*
* XXX this routine is one of many places that fail to handle SK_COMMUTE
* scankeys properly. Currently, the planner is careful never to generate
- * any indexquals that would require SK_COMMUTE to be set. Someday we ought
+ * any indexquals that would require SK_COMMUTE to be set. Someday we ought
* to try to fix this, though it's not real critical as long as indexable
* operators all have commutators...
*
@@ -191,7 +191,7 @@ _bt_formitem(IndexTuple itup)
void
_bt_orderkeys(Relation relation, BTScanOpaque so)
{
- ScanKeyData xform[BTMaxStrategyNumber];
+ ScanKeyData xform[BTMaxStrategyNumber];
bool init[BTMaxStrategyNumber];
uint16 numberOfKeys = so->numberOfKeys;
ScanKey key;
@@ -240,14 +240,14 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
/*
* Initialize for processing of keys for attr 1.
*
- * xform[i] holds a copy of the current scan key of strategy type i+1,
- * if any; init[i] is TRUE if we have found such a key for this attr.
+ * xform[i] holds a copy of the current scan key of strategy type i+1, if
+ * any; init[i] is TRUE if we have found such a key for this attr.
*/
attno = 1;
map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
BTMaxStrategyNumber,
attno);
- MemSet(xform, 0, sizeof(xform)); /* not really necessary */
+ MemSet(xform, 0, sizeof(xform)); /* not really necessary */
MemSet(init, 0, sizeof(init));
/*
@@ -255,7 +255,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* pass to handle after-last-key processing. Actual exit from the
* loop is at the "break" statement below.
*/
- for (i = 0; ; cur++, i++)
+ for (i = 0;; cur++, i++)
{
if (i < numberOfKeys)
{
@@ -263,7 +263,9 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (cur->sk_flags & SK_ISNULL)
{
so->qual_ok = false;
- /* Quit processing so we don't try to invoke comparison
+
+ /*
+ * Quit processing so we don't try to invoke comparison
* routines on NULLs.
*/
return;
@@ -271,8 +273,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
/*
- * If we are at the end of the keys for a particular attr,
- * finish up processing and emit the cleaned-up keys.
+ * If we are at the end of the keys for a particular attr, finish
+ * up processing and emit the cleaned-up keys.
*/
if (i == numberOfKeys || cur->sk_attno != attno)
{
@@ -296,7 +298,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
eq = &xform[BTEqualStrategyNumber - 1];
for (j = BTMaxStrategyNumber; --j >= 0;)
{
- if (! init[j] ||
+ if (!init[j] ||
j == (BTEqualStrategyNumber - 1))
continue;
chk = &xform[j];
@@ -313,6 +315,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
}
else
{
+
/*
* No "=" for this key, so we're done with required keys
*/
@@ -355,8 +358,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
* Emit the cleaned-up keys back into the key[] array in the
* correct order. Note we are overwriting our input here!
* It's OK because (a) xform[] is a physical copy of the keys
- * we want, (b) we cannot emit more keys than we input, so
- * we won't overwrite as-yet-unprocessed keys.
+ * we want, (b) we cannot emit more keys than we input, so we
+ * won't overwrite as-yet-unprocessed keys.
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
{
@@ -383,7 +386,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation),
BTMaxStrategyNumber,
attno);
- MemSet(xform, 0, sizeof(xform)); /* not really necessary */
+ MemSet(xform, 0, sizeof(xform)); /* not really necessary */
MemSet(init, 0, sizeof(init));
}
@@ -409,7 +412,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
if (DatumGetBool(test))
xform[j].sk_argument = cur->sk_argument;
else if (j == (BTEqualStrategyNumber - 1))
- so->qual_ok = false; /* key == a && key == b, but a != b */
+ so->qual_ok = false; /* key == a && key == b, but a !=
+ * b */
}
else
{
@@ -473,16 +477,18 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (isNull)
{
+
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual
- * is one of the "must match" subset. On a backward scan,
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. On a backward scan,
* however, we should keep going.
*/
if (keysok < so->numberOfRequiredKeys &&
ScanDirectionIsForward(dir))
*continuescan = false;
+
/*
* In any case, this indextuple doesn't match the qual.
*/
@@ -498,9 +504,10 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
if (DatumGetBool(test) == !!(key->sk_flags & SK_NEGATE))
{
+
/*
- * Tuple fails this qual. If it's a required qual, then
- * we can conclude no further tuples will pass, either.
+ * Tuple fails this qual. If it's a required qual, then we
+ * can conclude no further tuples will pass, either.
*/
if (keysok < so->numberOfRequiredKeys)
*continuescan = false;
diff --git a/src/backend/access/rtree/rtget.c b/src/backend/access/rtree/rtget.c
index df0f5e9c80e..c8fa6b18d68 100644
--- a/src/backend/access/rtree/rtget.c
+++ b/src/backend/access/rtree/rtget.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.24 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.25 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,8 +30,8 @@ static ItemPointer rtheapptr(Relation r, ItemPointer itemp);
Datum
rtgettuple(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
RetrieveIndexResult res;
/* if we have it cached in the scan desc, just return the value */
diff --git a/src/backend/access/rtree/rtproc.c b/src/backend/access/rtree/rtproc.c
index fd610caebe1..400be10ccb3 100644
--- a/src/backend/access/rtree/rtproc.c
+++ b/src/backend/access/rtree/rtproc.c
@@ -6,7 +6,7 @@
* NOTE: for largely-historical reasons, the intersection functions should
* return a NULL pointer (*not* an SQL null value) to indicate "no
* intersection". The size functions must be prepared to accept such
- * a pointer and return 0. This convention means that only pass-by-reference
+ * a pointer and return 0. This convention means that only pass-by-reference
* data types can be used as the output of the union and intersection
* routines, but that's not a big problem.
*
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.31 2001/01/24 19:42:49 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.32 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,6 +70,7 @@ Datum
rt_box_size(PG_FUNCTION_ARGS)
{
BOX *a = PG_GETARG_BOX_P(0);
+
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
@@ -98,8 +99,8 @@ rt_bigbox_size(PG_FUNCTION_ARGS)
Datum
rt_poly_union(PG_FUNCTION_ARGS)
{
- POLYGON *a = PG_GETARG_POLYGON_P(0);
- POLYGON *b = PG_GETARG_POLYGON_P(1);
+ POLYGON *a = PG_GETARG_POLYGON_P(0);
+ POLYGON *b = PG_GETARG_POLYGON_P(1);
POLYGON *p;
p = (POLYGON *) palloc(sizeof(POLYGON));
@@ -122,8 +123,8 @@ rt_poly_union(PG_FUNCTION_ARGS)
Datum
rt_poly_inter(PG_FUNCTION_ARGS)
{
- POLYGON *a = PG_GETARG_POLYGON_P(0);
- POLYGON *b = PG_GETARG_POLYGON_P(1);
+ POLYGON *a = PG_GETARG_POLYGON_P(0);
+ POLYGON *b = PG_GETARG_POLYGON_P(1);
POLYGON *p;
p = (POLYGON *) palloc(sizeof(POLYGON));
@@ -155,13 +156,15 @@ Datum
rt_poly_size(PG_FUNCTION_ARGS)
{
Pointer aptr = PG_GETARG_POINTER(0);
+
/* NB: size is an output argument */
float *size = (float *) PG_GETARG_POINTER(1);
- POLYGON *a;
+ POLYGON *a;
double xdim,
ydim;
- /* Can't just use GETARG because of possibility that input is NULL;
+ /*
+ * Can't just use GETARG because of possibility that input is NULL;
* since POLYGON is toastable, GETARG will try to inspect its value
*/
if (aptr == NULL)
diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c
index 45382d5ef3c..3752a59e99a 100644
--- a/src/backend/access/rtree/rtree.c
+++ b/src/backend/access/rtree/rtree.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.60 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.61 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,12 +68,12 @@ static InsertIndexResult rtdoinsert(Relation r, IndexTuple itup,
static void rttighten(Relation r, RTSTACK *stk, Datum datum, int att_size,
RTSTATE *rtstate);
static InsertIndexResult rtdosplit(Relation r, Buffer buffer, RTSTACK *stack,
- IndexTuple itup, RTSTATE *rtstate);
+ IndexTuple itup, RTSTATE *rtstate);
static void rtintinsert(Relation r, RTSTACK *stk, IndexTuple ltup,
IndexTuple rtup, RTSTATE *rtstate);
static void rtnewroot(Relation r, IndexTuple lt, IndexTuple rt);
static void rtpicksplit(Relation r, Page page, SPLITVEC *v, IndexTuple itup,
- RTSTATE *rtstate);
+ RTSTATE *rtstate);
static void RTInitBuffer(Buffer b, uint32 f);
static OffsetNumber choose(Relation r, Page p, IndexTuple it,
RTSTATE *rtstate);
@@ -84,12 +84,14 @@ static void initRtstate(RTSTATE *rtstate, Relation index);
Datum
rtbuild(PG_FUNCTION_ARGS)
{
- Relation heap = (Relation) PG_GETARG_POINTER(0);
- Relation index = (Relation) PG_GETARG_POINTER(1);
- IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
- Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+ Relation heap = (Relation) PG_GETARG_POINTER(0);
+ Relation index = (Relation) PG_GETARG_POINTER(1);
+ IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
+ Node *oldPred = (Node *) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+ IndexStrategy istrat = (IndexStrategy) PG_GETARG_POINTER(4);
+
#endif
HeapScanDesc hscan;
HeapTuple htup;
@@ -101,9 +103,11 @@ rtbuild(PG_FUNCTION_ARGS)
int nhtups,
nitups;
Node *pred = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult res = NULL;
@@ -171,6 +175,7 @@ rtbuild(PG_FUNCTION_ARGS)
nhtups++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -232,9 +237,7 @@ rtbuild(PG_FUNCTION_ARGS)
#ifndef OMIT_PARTIAL_INDEX
if (pred != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -278,12 +281,14 @@ rtbuild(PG_FUNCTION_ARGS)
Datum
rtinsert(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- Datum *datum = (Datum *) PG_GETARG_POINTER(1);
- char *nulls = (char *) PG_GETARG_POINTER(2);
- ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ Datum *datum = (Datum *) PG_GETARG_POINTER(1);
+ char *nulls = (char *) PG_GETARG_POINTER(2);
+ ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
+
#ifdef NOT_USED
- Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+ Relation heapRel = (Relation) PG_GETARG_POINTER(4);
+
#endif
InsertIndexResult res;
IndexTuple itup;
@@ -412,7 +417,7 @@ rttighten(Relation r,
p = BufferGetPage(b);
oldud = IndexTupleGetDatum(PageGetItem(p,
- PageGetItemId(p, stk->rts_child)));
+ PageGetItemId(p, stk->rts_child)));
FunctionCall2(&rtstate->sizeFn, oldud,
PointerGetDatum(&old_size));
@@ -564,7 +569,7 @@ rtdosplit(Relation r,
res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData));
/* now insert the new index tuple */
- if (*spl_left == maxoff+1)
+ if (*spl_left == maxoff + 1)
{
if (PageAddItem(left, (Item) itup, IndexTupleSize(itup),
leftoff, LP_USED) == InvalidOffsetNumber)
@@ -576,7 +581,7 @@ rtdosplit(Relation r,
}
else
{
- Assert(*spl_right == maxoff+1);
+ Assert(*spl_right == maxoff + 1);
if (PageAddItem(right, (Item) itup, IndexTupleSize(itup),
rightoff, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "rtdosplit: failed to add index item to %s",
@@ -665,10 +670,10 @@ rtintinsert(Relation r,
old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child));
/*
- * This is a hack. Right now, we force rtree internal keys to be constant
- * size. To fix this, need delete the old key and add both left and
- * right for the two new pages. The insertion of left may force a
- * split if the new left key is bigger than the old key.
+ * This is a hack. Right now, we force rtree internal keys to be
+ * constant size. To fix this, need delete the old key and add both
+ * left and right for the two new pages. The insertion of left may
+ * force a split if the new left key is bigger than the old key.
*/
if (IndexTupleSize(old) != IndexTupleSize(ltup))
@@ -734,7 +739,7 @@ rtnewroot(Relation r, IndexTuple lt, IndexTuple rt)
* We return two vectors of index item numbers, one for the items to be
* put on the left page, one for the items to be put on the right page.
* In addition, the item to be added (itup) is listed in the appropriate
- * vector. It is represented by item number N+1 (N = # of items on page).
+ * vector. It is represented by item number N+1 (N = # of items on page).
*
* Both vectors appear in sequence order with a terminating sentinel value
* of InvalidOffsetNumber.
@@ -747,9 +752,9 @@ rtnewroot(Relation r, IndexTuple lt, IndexTuple rt)
*
* We must also deal with a consideration not found in Guttman's algorithm:
* variable-length data. In particular, the incoming item might be
- * large enough that not just any split will work. In the worst case,
+ * large enough that not just any split will work. In the worst case,
* our "split" may have to be the new item on one page and all the existing
- * items on the other. Short of that, we have to take care that we do not
+ * items on the other. Short of that, we have to take care that we do not
* make a split that leaves both pages too full for the new item.
*/
static void
@@ -794,9 +799,10 @@ rtpicksplit(Relation r,
right_avail_space;
/*
- * First, make sure the new item is not so large that we can't possibly
- * fit it on a page, even by itself. (It's sufficient to make this test
- * here, since any oversize tuple must lead to a page split attempt.)
+ * First, make sure the new item is not so large that we can't
+ * possibly fit it on a page, even by itself. (It's sufficient to
+ * make this test here, since any oversize tuple must lead to a page
+ * split attempt.)
*/
newitemsz = IndexTupleTotalSize(itup);
if (newitemsz > RTPageAvailSpace)
@@ -804,7 +810,8 @@ rtpicksplit(Relation r,
(unsigned long) newitemsz, (unsigned long) RTPageAvailSpace);
maxoff = PageGetMaxOffsetNumber(page);
- newitemoff = OffsetNumberNext(maxoff); /* phony index for new item */
+ newitemoff = OffsetNumberNext(maxoff); /* phony index for new
+ * item */
/* Make arrays big enough for worst case, including sentinel */
nbytes = (maxoff + 2) * sizeof(OffsetNumber);
@@ -827,8 +834,8 @@ rtpicksplit(Relation r,
item_2_sz = IndexTupleTotalSize(item_2);
/*
- * Ignore seed pairs that don't leave room for the new item
- * on either split page.
+ * Ignore seed pairs that don't leave room for the new item on
+ * either split page.
*/
if (newitemsz + item_1_sz > RTPageAvailSpace &&
newitemsz + item_2_sz > RTPageAvailSpace)
@@ -841,8 +848,10 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_union));
inter_d = FunctionCall2(&rtstate->interFn,
datum_alpha, datum_beta);
- /* The interFn may return a NULL pointer (not an SQL null!)
- * to indicate no intersection. sizeFn must cope with this.
+
+ /*
+ * The interFn may return a NULL pointer (not an SQL null!) to
+ * indicate no intersection. sizeFn must cope with this.
*/
FunctionCall2(&rtstate->sizeFn, inter_d,
PointerGetDatum(&size_inter));
@@ -869,6 +878,7 @@ rtpicksplit(Relation r,
if (firsttime)
{
+
/*
* There is no possible split except to put the new item on its
* own page. Since we still have to compute the union rectangles,
@@ -916,14 +926,14 @@ rtpicksplit(Relation r,
for (i = FirstOffsetNumber; i <= newitemoff; i = OffsetNumberNext(i))
{
- bool left_feasible,
- right_feasible,
- choose_left;
+ bool left_feasible,
+ right_feasible,
+ choose_left;
/*
* If we've already decided where to place this item, just put it
- * on the correct list. Otherwise, we need to figure out which page
- * needs the least enlargement in order to store the item.
+ * on the correct list. Otherwise, we need to figure out which
+ * page needs the least enlargement in order to store the item.
*/
if (i == seed_1)
@@ -961,12 +971,13 @@ rtpicksplit(Relation r,
PointerGetDatum(&size_beta));
/*
- * We prefer the page that shows smaller enlargement of its union area
- * (Guttman's algorithm), but we must take care that at least one page
- * will still have room for the new item after this one is added.
+ * We prefer the page that shows smaller enlargement of its union
+ * area (Guttman's algorithm), but we must take care that at least
+ * one page will still have room for the new item after this one
+ * is added.
*
- * (We know that all the old items together can fit on one page,
- * so we need not worry about any other problem than failing to fit
+ * (We know that all the old items together can fit on one page, so
+ * we need not worry about any other problem than failing to fit
* the new item.)
*/
left_feasible = (left_avail_space >= item_1_sz &&
@@ -987,7 +998,7 @@ rtpicksplit(Relation r,
else
{
elog(ERROR, "rtpicksplit: failed to find a workable page split");
- choose_left = false; /* keep compiler quiet */
+ choose_left = false;/* keep compiler quiet */
}
if (choose_left)
@@ -1012,7 +1023,7 @@ rtpicksplit(Relation r,
}
}
- *left = *right = InvalidOffsetNumber; /* add ending sentinels */
+ *left = *right = InvalidOffsetNumber; /* add ending sentinels */
v->spl_ldatum = datum_l;
v->spl_rdatum = datum_r;
@@ -1096,8 +1107,8 @@ freestack(RTSTACK *s)
Datum
rtdelete(PG_FUNCTION_ARGS)
{
- Relation r = (Relation) PG_GETARG_POINTER(0);
- ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
+ Relation r = (Relation) PG_GETARG_POINTER(0);
+ ItemPointer tid = (ItemPointer) PG_GETARG_POINTER(1);
BlockNumber blkno;
OffsetNumber offnum;
Buffer buf;
@@ -1203,14 +1214,14 @@ rtree_redo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "rtree_redo: unimplemented");
}
-
+
void
rtree_undo(XLogRecPtr lsn, XLogRecord *record)
{
elog(STOP, "rtree_undo: unimplemented");
}
-
+
void
-rtree_desc(char *buf, uint8 xl_info, char* rec)
+rtree_desc(char *buf, uint8 xl_info, char *rec)
{
}
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index 605d51b5d33..f3e6d52fe67 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.35 2001/01/24 19:42:50 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.36 2001/03/22 03:59:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,9 +75,9 @@ rtbeginscan(PG_FUNCTION_ARGS)
Datum
rtrescan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
- bool fromEnd = PG_GETARG_BOOL(1);
- ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ bool fromEnd = PG_GETARG_BOOL(1);
+ ScanKey key = (ScanKey) PG_GETARG_POINTER(2);
RTreeScanOpaque p;
RegProcedure internal_proc;
int i;
@@ -162,7 +162,7 @@ rtrescan(PG_FUNCTION_ARGS)
Datum
rtmarkpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
RTreeScanOpaque p;
RTSTACK *o,
*n,
@@ -198,7 +198,7 @@ rtmarkpos(PG_FUNCTION_ARGS)
Datum
rtrestrpos(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
RTreeScanOpaque p;
RTSTACK *o,
*n,
@@ -234,7 +234,7 @@ rtrestrpos(PG_FUNCTION_ARGS)
Datum
rtendscan(PG_FUNCTION_ARGS)
{
- IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
+ IndexScanDesc s = (IndexScanDesc) PG_GETARG_POINTER(0);
RTreeScanOpaque p;
p = (RTreeScanOpaque) s->opaque;
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index b25db74da8c..625b0db3202 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -9,21 +9,21 @@
#include "storage/smgr.h"
#include "commands/sequence.h"
-RmgrData RmgrTable[] = {
-{"XLOG", xlog_redo, xlog_undo, xlog_desc},
-{"Transaction", xact_redo, xact_undo, xact_desc},
-{"Storage", smgr_redo, smgr_undo, smgr_desc},
-{"Reserved 3", NULL, NULL, NULL},
-{"Reserved 4", NULL, NULL, NULL},
-{"Reserved 5", NULL, NULL, NULL},
-{"Reserved 6", NULL, NULL, NULL},
-{"Reserved 7", NULL, NULL, NULL},
-{"Reserved 8", NULL, NULL, NULL},
-{"Reserved 9", NULL, NULL, NULL},
-{"Heap", heap_redo, heap_undo, heap_desc},
-{"Btree", btree_redo, btree_undo, btree_desc},
-{"Hash", hash_redo, hash_undo, hash_desc},
-{"Rtree", rtree_redo, rtree_undo, rtree_desc},
-{"Gist", gist_redo, gist_undo, gist_desc},
-{"Sequence", seq_redo, seq_undo, seq_desc}
+RmgrData RmgrTable[] = {
+ {"XLOG", xlog_redo, xlog_undo, xlog_desc},
+ {"Transaction", xact_redo, xact_undo, xact_desc},
+ {"Storage", smgr_redo, smgr_undo, smgr_desc},
+ {"Reserved 3", NULL, NULL, NULL},
+ {"Reserved 4", NULL, NULL, NULL},
+ {"Reserved 5", NULL, NULL, NULL},
+ {"Reserved 6", NULL, NULL, NULL},
+ {"Reserved 7", NULL, NULL, NULL},
+ {"Reserved 8", NULL, NULL, NULL},
+ {"Reserved 9", NULL, NULL, NULL},
+ {"Heap", heap_redo, heap_undo, heap_desc},
+ {"Btree", btree_redo, btree_undo, btree_desc},
+ {"Hash", hash_redo, hash_undo, hash_desc},
+ {"Rtree", rtree_redo, rtree_undo, rtree_desc},
+ {"Gist", gist_redo, gist_undo, gist_desc},
+ {"Sequence", seq_redo, seq_undo, seq_desc}
};
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 64289057926..29e72e84175 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.41 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.42 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -427,8 +427,8 @@ InitializeTransactionLog(void)
TransactionLogUpdate(AmiTransactionId, XID_COMMIT);
TransactionIdStore(AmiTransactionId, &cachedTestXid);
cachedTestXidStatus = XID_COMMIT;
- Assert(!IsUnderPostmaster &&
- ShmemVariableCache->nextXid <= FirstTransactionId);
+ Assert(!IsUnderPostmaster &&
+ ShmemVariableCache->nextXid <= FirstTransactionId);
ShmemVariableCache->nextXid = FirstTransactionId;
}
else if (RecoveryCheckingEnabled())
diff --git a/src/backend/access/transam/transsup.c b/src/backend/access/transam/transsup.c
index e4ff7979cf9..c433506eae6 100644
--- a/src/backend/access/transam/transsup.c
+++ b/src/backend/access/transam/transsup.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.28 2001/01/24 19:42:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.29 2001/03/22 03:59:17 momjian Exp $
*
* NOTES
* This file contains support functions for the high
@@ -186,7 +186,7 @@ TransBlockGetXidStatus(Block tblock,
bits8 bit2;
BitIndex offset;
- tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr));
+ tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ----------------
* calculate the index into the transaction data where
@@ -229,7 +229,7 @@ TransBlockSetXidStatus(Block tblock,
Index index;
BitIndex offset;
- tblock = (Block) ((char*) tblock + sizeof(XLogRecPtr));
+ tblock = (Block) ((char *) tblock + sizeof(XLogRecPtr));
/* ----------------
* calculate the index into the transaction data where
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index d6097b2567c..34c607eab9f 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.37 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.38 2001/03/22 03:59:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -23,8 +23,8 @@
#define VAR_OID_PREFETCH 8192
/* Spinlocks for serializing generation of XIDs and OIDs, respectively */
-SPINLOCK XidGenLockId;
-SPINLOCK OidGenLockId;
+SPINLOCK XidGenLockId;
+SPINLOCK OidGenLockId;
/* pointer to "variable cache" in shared memory (set up by shmem.c) */
VariableCache ShmemVariableCache = NULL;
@@ -32,9 +32,10 @@ VariableCache ShmemVariableCache = NULL;
void
GetNewTransactionId(TransactionId *xid)
{
+
/*
- * During bootstrap initialization, we return the special
- * bootstrap transaction id.
+ * During bootstrap initialization, we return the special bootstrap
+ * transaction id.
*/
if (AMI_OVERRIDE)
{
@@ -60,9 +61,10 @@ GetNewTransactionId(TransactionId *xid)
void
ReadNewTransactionId(TransactionId *xid)
{
+
/*
- * During bootstrap initialization, we return the special
- * bootstrap transaction id.
+ * During bootstrap initialization, we return the special bootstrap
+ * transaction id.
*/
if (AMI_OVERRIDE)
{
@@ -80,7 +82,7 @@ ReadNewTransactionId(TransactionId *xid)
* ----------------------------------------------------------------
*/
-static Oid lastSeenOid = InvalidOid;
+static Oid lastSeenOid = InvalidOid;
void
GetNewObjectId(Oid *oid_return)
@@ -119,10 +121,10 @@ CheckMaxObjectId(Oid assigned_oid)
}
/* If we are in the logged oid range, just bump nextOid up */
- if (assigned_oid <= ShmemVariableCache->nextOid +
- ShmemVariableCache->oidCount - 1)
+ if (assigned_oid <= ShmemVariableCache->nextOid +
+ ShmemVariableCache->oidCount - 1)
{
- ShmemVariableCache->oidCount -=
+ ShmemVariableCache->oidCount -=
assigned_oid - ShmemVariableCache->nextOid + 1;
ShmemVariableCache->nextOid = assigned_oid + 1;
SpinRelease(OidGenLockId);
@@ -130,10 +132,9 @@ CheckMaxObjectId(Oid assigned_oid)
}
/*
- * We have exceeded the logged oid range.
- * We should lock the database and kill all other backends
- * but we are loading oid's that we can not guarantee are unique
- * anyway, so we must rely on the user.
+ * We have exceeded the logged oid range. We should lock the database
+ * and kill all other backends but we are loading oid's that we can
+ * not guarantee are unique anyway, so we must rely on the user.
*/
XLogPutNextOid(assigned_oid + VAR_OID_PREFETCH);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 1331c8e9834..6a8e6c0639f 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.99 2001/03/13 01:17:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.100 2001/03/22 03:59:18 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -222,9 +222,10 @@ int DefaultXactIsoLevel = XACT_READ_COMMITTED;
int XactIsoLevel;
int CommitDelay = 0; /* precommit delay in microseconds */
-int CommitSiblings = 5; /* number of concurrent xacts needed to sleep */
+int CommitSiblings = 5; /* number of concurrent xacts needed to
+ * sleep */
-static void (*_RollbackFunc)(void*) = NULL;
+static void (*_RollbackFunc) (void *) = NULL;
static void *_RollbackData = NULL;
/* ----------------
@@ -666,39 +667,40 @@ RecordTransactionCommit()
if (MyLastRecPtr.xrecoff != 0)
{
- XLogRecData rdata;
- xl_xact_commit xlrec;
- XLogRecPtr recptr;
+ XLogRecData rdata;
+ xl_xact_commit xlrec;
+ XLogRecPtr recptr;
BufmgrCommit();
xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&xlrec);
+ rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactCommit;
rdata.next = NULL;
START_CRIT_SECTION();
+
/*
* SHOULD SAVE ARRAY OF RELFILENODE-s TO DROP
*/
recptr = XLogInsert(RM_XACT_ID, XLOG_XACT_COMMIT, &rdata);
- /*
- * Sleep before commit! So we can flush more than one
- * commit records per single fsync. (The idea is some other
- * backend may do the XLogFlush while we're sleeping. This
- * needs work still, because on most Unixen, the minimum
- * select() delay is 10msec or more, which is way too long.)
+ /*
+ * Sleep before commit! So we can flush more than one commit
+ * records per single fsync. (The idea is some other backend may
+ * do the XLogFlush while we're sleeping. This needs work still,
+ * because on most Unixen, the minimum select() delay is 10msec or
+ * more, which is way too long.)
*
- * We do not sleep if enableFsync is not turned on, nor if there
- * are fewer than CommitSiblings other backends with active
+ * We do not sleep if enableFsync is not turned on, nor if there are
+ * fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
CountActiveBackends() >= CommitSiblings)
{
- struct timeval delay;
+ struct timeval delay;
delay.tv_sec = 0;
delay.tv_usec = CommitDelay;
@@ -812,13 +814,13 @@ RecordTransactionAbort(void)
*/
if (MyLastRecPtr.xrecoff != 0 && !TransactionIdDidCommit(xid))
{
- XLogRecData rdata;
- xl_xact_abort xlrec;
- XLogRecPtr recptr;
+ XLogRecData rdata;
+ xl_xact_abort xlrec;
+ XLogRecPtr recptr;
xlrec.xtime = time(NULL);
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&xlrec);
+ rdata.data = (char *) (&xlrec);
rdata.len = SizeOfXactAbort;
rdata.next = NULL;
@@ -879,7 +881,7 @@ AtAbort_Memory(void)
{
/* ----------------
* Make sure we are in a valid context (not a child of
- * TransactionCommandContext...). Note that it is possible
+ * TransactionCommandContext...). Note that it is possible
* for this code to be called when we aren't in a transaction
* at all; go directly to TopMemoryContext in that case.
* ----------------
@@ -896,9 +898,7 @@ AtAbort_Memory(void)
MemoryContextResetAndDeleteChildren(TransactionCommandContext);
}
else
- {
MemoryContextSwitchTo(TopMemoryContext);
- }
}
@@ -1021,6 +1021,7 @@ CurrentXactInProgress(void)
{
return CurrentTransactionState->state == TRANS_INPROGRESS;
}
+
#endif
/* --------------------------------
@@ -1106,7 +1107,7 @@ CommitTransaction(void)
AtCommit_Memory();
AtEOXact_Files();
- SharedBufferChanged = false; /* safest place to do it */
+ SharedBufferChanged = false;/* safest place to do it */
/* ----------------
* done with commit processing, set current transaction
@@ -1143,15 +1144,16 @@ AbortTransaction(void)
/*
* Release any spinlocks or buffer context locks we might be holding
- * as quickly as possible. (Real locks, however, must be held till
- * we finish aborting.) Releasing spinlocks is critical since we
- * might try to grab them again while cleaning up!
+ * as quickly as possible. (Real locks, however, must be held till we
+ * finish aborting.) Releasing spinlocks is critical since we might
+ * try to grab them again while cleaning up!
*/
ProcReleaseSpins(NULL);
UnlockBuffers();
+
/*
- * Also clean up any open wait for lock, since the lock manager
- * will choke if we try to wait for another lock before doing this.
+ * Also clean up any open wait for lock, since the lock manager will
+ * choke if we try to wait for another lock before doing this.
*/
LockWaitCancel();
@@ -1203,7 +1205,7 @@ AbortTransaction(void)
AtEOXact_Files();
AtAbort_Locks();
- SharedBufferChanged = false; /* safest place to do it */
+ SharedBufferChanged = false;/* safest place to do it */
/* ----------------
* State remains TRANS_ABORT until CleanupTransaction().
@@ -1327,8 +1329,8 @@ StartTransactionCommand(void)
}
/*
- * We must switch to TransactionCommandContext before returning.
- * This is already done if we called StartTransaction, otherwise not.
+ * We must switch to TransactionCommandContext before returning. This
+ * is already done if we called StartTransaction, otherwise not.
*/
Assert(TransactionCommandContext != NULL);
MemoryContextSwitchTo(TransactionCommandContext);
@@ -1757,7 +1759,7 @@ IsTransactionBlock(void)
void
xact_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT)
{
@@ -1765,9 +1767,7 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
/* SHOULD REMOVE FILES OF ALL DROPPED RELATIONS */
}
else if (info == XLOG_XACT_ABORT)
- {
TransactionIdAbort(record->xl_xid);
- }
else
elog(STOP, "xact_redo: unknown op code %u", info);
}
@@ -1775,43 +1775,43 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record)
void
xact_undo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
- if (info == XLOG_XACT_COMMIT) /* shouldn't be called by XLOG */
+ if (info == XLOG_XACT_COMMIT) /* shouldn't be called by XLOG */
elog(STOP, "xact_undo: can't undo committed xaction");
else if (info != XLOG_XACT_ABORT)
elog(STOP, "xact_redo: unknown op code %u", info);
}
-
+
void
-xact_desc(char *buf, uint8 xl_info, char* rec)
+xact_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT)
{
- xl_xact_commit *xlrec = (xl_xact_commit*) rec;
- struct tm *tm = localtime(&xlrec->xtime);
+ xl_xact_commit *xlrec = (xl_xact_commit *) rec;
+ struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "commit: %04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
}
else if (info == XLOG_XACT_ABORT)
{
- xl_xact_abort *xlrec = (xl_xact_abort*) rec;
- struct tm *tm = localtime(&xlrec->xtime);
+ xl_xact_abort *xlrec = (xl_xact_abort *) rec;
+ struct tm *tm = localtime(&xlrec->xtime);
sprintf(buf + strlen(buf), "abort: %04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
}
else
strcat(buf, "UNKNOWN");
}
void
-XactPushRollback(void (*func) (void *), void* data)
+ XactPushRollback(void (*func) (void *), void *data)
{
#ifdef XLOG_II
if (_RollbackFunc != NULL)
diff --git a/src/backend/access/transam/xid.c b/src/backend/access/transam/xid.c
index 6ee28d1a2b0..624d6da850c 100644
--- a/src/backend/access/transam/xid.c
+++ b/src/backend/access/transam/xid.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: xid.c,v 1.29 2001/01/24 19:42:51 momjian Exp $
+ * $Id: xid.c,v 1.30 2001/03/22 03:59:18 momjian Exp $
*
* OLD COMMENTS
* XXX WARNING
@@ -26,8 +26,8 @@
/*
* TransactionId is typedef'd as uint32, so...
*/
-#define PG_GETARG_TRANSACTIONID(n) PG_GETARG_UINT32(n)
-#define PG_RETURN_TRANSACTIONID(x) PG_RETURN_UINT32(x)
+#define PG_GETARG_TRANSACTIONID(n) PG_GETARG_UINT32(n)
+#define PG_RETURN_TRANSACTIONID(x) PG_RETURN_UINT32(x)
extern TransactionId NullTransactionId;
@@ -49,6 +49,7 @@ Datum
xidout(PG_FUNCTION_ARGS)
{
TransactionId transactionId = PG_GETARG_TRANSACTIONID(0);
+
/* maximum 32 bit unsigned integer representation takes 10 chars */
char *representation = palloc(11);
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 9994025dd69..59d783264bb 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.62 2001/03/18 20:18:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.63 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,57 +45,60 @@
/*
* This chunk of hackery attempts to determine which file sync methods
* are available on the current platform, and to choose an appropriate
- * default method. We assume that fsync() is always available, and that
+ * default method. We assume that fsync() is always available, and that
* configure determined whether fdatasync() is.
*/
#define SYNC_METHOD_FSYNC 0
#define SYNC_METHOD_FDATASYNC 1
-#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and O_DSYNC */
+#define SYNC_METHOD_OPEN 2 /* used for both O_SYNC and
+ * O_DSYNC */
#if defined(O_SYNC)
-# define OPEN_SYNC_FLAG O_SYNC
+#define OPEN_SYNC_FLAG O_SYNC
#else
-# if defined(O_FSYNC)
-# define OPEN_SYNC_FLAG O_FSYNC
-# endif
+#if defined(O_FSYNC)
+#define OPEN_SYNC_FLAG O_FSYNC
+#endif
#endif
#if defined(OPEN_SYNC_FLAG)
-# if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG)
-# define OPEN_DATASYNC_FLAG O_DSYNC
-# endif
+#if defined(O_DSYNC) && (O_DSYNC != OPEN_SYNC_FLAG)
+#define OPEN_DATASYNC_FLAG O_DSYNC
+#endif
#endif
#if defined(OPEN_DATASYNC_FLAG)
-# define DEFAULT_SYNC_METHOD_STR "open_datasync"
-# define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
-# define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
+#define DEFAULT_SYNC_METHOD_STR "open_datasync"
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_OPEN
+#define DEFAULT_SYNC_FLAGBIT OPEN_DATASYNC_FLAG
#else
-# if defined(HAVE_FDATASYNC)
-# define DEFAULT_SYNC_METHOD_STR "fdatasync"
-# define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
-# define DEFAULT_SYNC_FLAGBIT 0
-# else
-# define DEFAULT_SYNC_METHOD_STR "fsync"
-# define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC
-# define DEFAULT_SYNC_FLAGBIT 0
-# endif
+#if defined(HAVE_FDATASYNC)
+#define DEFAULT_SYNC_METHOD_STR "fdatasync"
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_FDATASYNC
+#define DEFAULT_SYNC_FLAGBIT 0
+#else
+#define DEFAULT_SYNC_METHOD_STR "fsync"
+#define DEFAULT_SYNC_METHOD SYNC_METHOD_FSYNC
+#define DEFAULT_SYNC_FLAGBIT 0
+#endif
#endif
/* Max time to wait to acquire XLog activity locks */
-#define XLOG_LOCK_TIMEOUT (5*60*1000000) /* 5 minutes */
+#define XLOG_LOCK_TIMEOUT (5*60*1000000) /* 5 minutes */
/* Max time to wait to acquire checkpoint lock */
-#define CHECKPOINT_LOCK_TIMEOUT (20*60*1000000) /* 20 minutes */
+#define CHECKPOINT_LOCK_TIMEOUT (20*60*1000000) /* 20 minutes */
/* User-settable parameters */
int CheckPointSegments = 3;
int XLOGbuffers = 8;
-int XLOGfiles = 0; /* how many files to pre-allocate during ckpt */
+int XLOGfiles = 0; /* how many files to pre-allocate during
+ * ckpt */
int XLOG_DEBUG = 0;
char *XLOG_sync_method = NULL;
const char XLOG_sync_method_default[] = DEFAULT_SYNC_METHOD_STR;
-char XLOG_archive_dir[MAXPGPATH]; /* null string means delete 'em */
+char XLOG_archive_dir[MAXPGPATH]; /* null string means
+ * delete 'em */
/* these are derived from XLOG_sync_method by assign_xlog_sync_method */
static int sync_method = DEFAULT_SYNC_METHOD;
@@ -135,7 +138,7 @@ static XLogRecPtr ProcLastRecPtr = {0, 0};
/*
* RedoRecPtr is this backend's local copy of the REDO record pointer
* (which is almost but not quite the same as a pointer to the most recent
- * CHECKPOINT record). We update this from the shared-memory copy,
+ * CHECKPOINT record). We update this from the shared-memory copy,
* XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
* hold the Insert spinlock). See XLogInsert for details.
*/
@@ -164,12 +167,12 @@ SPINLOCK ControlFileLockId;
*
* XLogCtl->LogwrtResult and XLogCtl->Write.LogwrtResult are both "always
* right", since both are updated by a write or flush operation before
- * it releases logwrt_lck. The point of keeping XLogCtl->Write.LogwrtResult
+ * it releases logwrt_lck. The point of keeping XLogCtl->Write.LogwrtResult
* is that it can be examined/modified by code that already holds logwrt_lck
* without needing to grab info_lck as well.
*
* XLogCtl->Insert.LogwrtResult may lag behind the reality of the other two,
- * but is updated when convenient. Again, it exists for the convenience of
+ * but is updated when convenient. Again, it exists for the convenience of
* code that is already holding insert_lck but not the other locks.
*
* The unshared LogwrtResult may lag behind any or all of these, and again
@@ -187,25 +190,25 @@ typedef struct XLogwrtRqst
{
XLogRecPtr Write; /* last byte + 1 to write out */
XLogRecPtr Flush; /* last byte + 1 to flush */
-} XLogwrtRqst;
+} XLogwrtRqst;
typedef struct XLogwrtResult
{
XLogRecPtr Write; /* last byte + 1 written out */
XLogRecPtr Flush; /* last byte + 1 flushed */
-} XLogwrtResult;
+} XLogwrtResult;
/*
* Shared state data for XLogInsert.
*/
typedef struct XLogCtlInsert
{
- XLogwrtResult LogwrtResult; /* a recent value of LogwrtResult */
- XLogRecPtr PrevRecord; /* start of previously-inserted record */
- uint16 curridx; /* current block index in cache */
- XLogPageHeader currpage; /* points to header of block in cache */
- char *currpos; /* current insertion point in cache */
- XLogRecPtr RedoRecPtr; /* current redo point for insertions */
+ XLogwrtResult LogwrtResult; /* a recent value of LogwrtResult */
+ XLogRecPtr PrevRecord; /* start of previously-inserted record */
+ uint16 curridx; /* current block index in cache */
+ XLogPageHeader currpage; /* points to header of block in cache */
+ char *currpos; /* current insertion point in cache */
+ XLogRecPtr RedoRecPtr; /* current redo point for insertions */
} XLogCtlInsert;
/*
@@ -213,8 +216,8 @@ typedef struct XLogCtlInsert
*/
typedef struct XLogCtlWrite
{
- XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
- uint16 curridx; /* cache index of next block to write */
+ XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
+ uint16 curridx; /* cache index of next block to write */
} XLogCtlWrite;
/*
@@ -223,30 +226,31 @@ typedef struct XLogCtlWrite
typedef struct XLogCtlData
{
/* Protected by insert_lck: */
- XLogCtlInsert Insert;
+ XLogCtlInsert Insert;
/* Protected by info_lck: */
- XLogwrtRqst LogwrtRqst;
- XLogwrtResult LogwrtResult;
+ XLogwrtRqst LogwrtRqst;
+ XLogwrtResult LogwrtResult;
/* Protected by logwrt_lck: */
- XLogCtlWrite Write;
+ XLogCtlWrite Write;
+
/*
* These values do not change after startup, although the pointed-to
- * pages and xlblocks values certainly do. Permission to read/write
+ * pages and xlblocks values certainly do. Permission to read/write
* the pages and xlblocks values depends on insert_lck and logwrt_lck.
*/
- char *pages; /* buffers for unwritten XLOG pages */
- XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
- uint32 XLogCacheByte; /* # bytes in xlog buffers */
- uint32 XLogCacheBlck; /* highest allocated xlog buffer index */
- StartUpID ThisStartUpID;
+ char *pages; /* buffers for unwritten XLOG pages */
+ XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
+ uint32 XLogCacheByte; /* # bytes in xlog buffers */
+ uint32 XLogCacheBlck; /* highest allocated xlog buffer index */
+ StartUpID ThisStartUpID;
/* This value is not protected by *any* spinlock... */
- XLogRecPtr RedoRecPtr; /* see SetRedoRecPtr/GetRedoRecPtr */
+ XLogRecPtr RedoRecPtr; /* see SetRedoRecPtr/GetRedoRecPtr */
- slock_t insert_lck; /* XLogInsert lock */
- slock_t info_lck; /* locks shared LogwrtRqst/LogwrtResult */
- slock_t logwrt_lck; /* XLogWrite/XLogFlush lock */
- slock_t chkp_lck; /* checkpoint lock */
+ slock_t insert_lck; /* XLogInsert lock */
+ slock_t info_lck; /* locks shared LogwrtRqst/LogwrtResult */
+ slock_t logwrt_lck; /* XLogWrite/XLogFlush lock */
+ slock_t chkp_lck; /* checkpoint lock */
} XLogCtlData;
static XLogCtlData *XLogCtl = NULL;
@@ -271,7 +275,7 @@ static ControlFileData *ControlFile = NULL;
( \
(recptr).xlogid = XLogCtl->xlblocks[curridx].xlogid, \
(recptr).xrecoff = \
- XLogCtl->xlblocks[curridx].xrecoff - INSERT_FREESPACE(Insert) \
+ XLogCtl->xlblocks[curridx].xrecoff - INSERT_FREESPACE(Insert) \
)
@@ -303,7 +307,7 @@ static ControlFileData *ControlFile = NULL;
* Compute ID and segment from an XLogRecPtr.
*
* For XLByteToSeg, do the computation at face value. For XLByteToPrevSeg,
- * a boundary byte is taken to be in the previous segment. This is suitable
+ * a boundary byte is taken to be in the previous segment. This is suitable
* for deciding which segment to write given a pointer to a record end,
* for example.
*/
@@ -354,8 +358,8 @@ static ControlFileData *ControlFile = NULL;
/* File path names */
-static char XLogDir[MAXPGPATH];
-static char ControlFilePath[MAXPGPATH];
+static char XLogDir[MAXPGPATH];
+static char ControlFilePath[MAXPGPATH];
/*
* Private, possibly out-of-date copy of shared LogwrtResult.
@@ -384,8 +388,10 @@ static int readFile = -1;
static uint32 readId = 0;
static uint32 readSeg = 0;
static uint32 readOff = 0;
+
/* Buffer for currently read page (BLCKSZ bytes) */
static char *readBuf = NULL;
+
/* State information for XLOG reading */
static XLogRecPtr ReadRecPtr;
static XLogRecPtr EndRecPtr;
@@ -397,16 +403,16 @@ static bool InRedo = false;
static bool AdvanceXLInsertBuffer(void);
static void XLogWrite(XLogwrtRqst WriteRqst);
-static int XLogFileInit(uint32 log, uint32 seg,
- bool *use_existent, bool use_lock);
+static int XLogFileInit(uint32 log, uint32 seg,
+ bool *use_existent, bool use_lock);
static int XLogFileOpen(uint32 log, uint32 seg, bool econt);
static void PreallocXlogFiles(XLogRecPtr endptr);
static void MoveOfflineLogs(uint32 log, uint32 seg);
static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer);
static bool ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI);
static XLogRecord *ReadCheckpointRecord(XLogRecPtr RecPtr,
- const char *whichChkpt,
- char *buffer);
+ const char *whichChkpt,
+ char *buffer);
static void WriteControlFile(void);
static void ReadControlFile(void);
static char *str_time(time_t tnow);
@@ -432,44 +438,44 @@ static void issue_xlog_fsync(void);
XLogRecPtr
XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
{
- XLogCtlInsert *Insert = &XLogCtl->Insert;
- XLogRecord *record;
+ XLogCtlInsert *Insert = &XLogCtl->Insert;
+ XLogRecord *record;
XLogContRecord *contrecord;
- XLogRecPtr RecPtr;
- XLogRecPtr WriteRqst;
- uint32 freespace;
- uint16 curridx;
- XLogRecData *rdt;
- Buffer dtbuf[XLR_MAX_BKP_BLOCKS];
- bool dtbuf_bkp[XLR_MAX_BKP_BLOCKS];
- BkpBlock dtbuf_xlg[XLR_MAX_BKP_BLOCKS];
- XLogRecPtr dtbuf_lsn[XLR_MAX_BKP_BLOCKS];
- XLogRecData dtbuf_rdt[2 * XLR_MAX_BKP_BLOCKS];
- crc64 rdata_crc;
- uint32 len,
- write_len;
- unsigned i;
- bool do_logwrt;
- bool updrqst;
- bool no_tran = (rmid == RM_XLOG_ID) ? true : false;
+ XLogRecPtr RecPtr;
+ XLogRecPtr WriteRqst;
+ uint32 freespace;
+ uint16 curridx;
+ XLogRecData *rdt;
+ Buffer dtbuf[XLR_MAX_BKP_BLOCKS];
+ bool dtbuf_bkp[XLR_MAX_BKP_BLOCKS];
+ BkpBlock dtbuf_xlg[XLR_MAX_BKP_BLOCKS];
+ XLogRecPtr dtbuf_lsn[XLR_MAX_BKP_BLOCKS];
+ XLogRecData dtbuf_rdt[2 * XLR_MAX_BKP_BLOCKS];
+ crc64 rdata_crc;
+ uint32 len,
+ write_len;
+ unsigned i;
+ bool do_logwrt;
+ bool updrqst;
+ bool no_tran = (rmid == RM_XLOG_ID) ? true : false;
if (info & XLR_INFO_MASK)
{
if ((info & XLR_INFO_MASK) != XLOG_NO_TRAN)
- elog(STOP, "XLogInsert: invalid info mask %02X",
+ elog(STOP, "XLogInsert: invalid info mask %02X",
(info & XLR_INFO_MASK));
no_tran = true;
info &= ~XLR_INFO_MASK;
}
/*
- * In bootstrap mode, we don't actually log anything but XLOG resources;
- * return a phony record pointer.
+ * In bootstrap mode, we don't actually log anything but XLOG
+ * resources; return a phony record pointer.
*/
if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
{
RecPtr.xlogid = 0;
- RecPtr.xrecoff = SizeOfXLogPHD; /* start of 1st checkpoint record */
+ RecPtr.xrecoff = SizeOfXLogPHD; /* start of 1st checkpoint record */
return (RecPtr);
}
@@ -479,16 +485,17 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* header isn't added into the CRC yet since we don't know the final
* length or info bits quite yet.
*
- * We may have to loop back to here if a race condition is detected below.
- * We could prevent the race by doing all this work while holding the
- * insert spinlock, but it seems better to avoid doing CRC calculations
- * while holding the lock. This means we have to be careful about
- * modifying the rdata list until we know we aren't going to loop back
- * again. The only change we allow ourselves to make earlier is to set
- * rdt->data = NULL in list items we have decided we will have to back
- * up the whole buffer for. This is OK because we will certainly decide
- * the same thing again for those items if we do it over; doing it here
- * saves an extra pass over the list later.
+ * We may have to loop back to here if a race condition is detected
+ * below. We could prevent the race by doing all this work while
+ * holding the insert spinlock, but it seems better to avoid doing CRC
+ * calculations while holding the lock. This means we have to be
+ * careful about modifying the rdata list until we know we aren't
+ * going to loop back again. The only change we allow ourselves to
+ * make earlier is to set rdt->data = NULL in list items we have
+ * decided we will have to back up the whole buffer for. This is OK
+ * because we will certainly decide the same thing again for those
+ * items if we do it over; doing it here saves an extra pass over the
+ * list later.
*/
begin:;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -499,7 +506,7 @@ begin:;
INIT_CRC64(rdata_crc);
len = 0;
- for (rdt = rdata; ; )
+ for (rdt = rdata;;)
{
if (rdt->buffer == InvalidBuffer)
{
@@ -528,13 +535,14 @@ begin:;
{
/* OK, put it in this slot */
dtbuf[i] = rdt->buffer;
+
/*
* XXX We assume page LSN is first data on page
*/
- dtbuf_lsn[i] = *((XLogRecPtr*)BufferGetBlock(rdt->buffer));
+ dtbuf_lsn[i] = *((XLogRecPtr *) BufferGetBlock(rdt->buffer));
if (XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
- crc64 dtcrc;
+ crc64 dtcrc;
dtbuf_bkp[i] = true;
rdt->data = NULL;
@@ -545,7 +553,7 @@ begin:;
dtbuf_xlg[i].node = BufferGetFileNode(dtbuf[i]);
dtbuf_xlg[i].block = BufferGetBlockNumber(dtbuf[i]);
COMP_CRC64(dtcrc,
- (char*) &(dtbuf_xlg[i]) + sizeof(crc64),
+ (char *) &(dtbuf_xlg[i]) + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(dtcrc);
dtbuf_xlg[i].crc = dtcrc;
@@ -571,7 +579,7 @@ begin:;
/*
* NOTE: the test for len == 0 here is somewhat fishy, since in theory
* all of the rmgr data might have been suppressed in favor of backup
- * blocks. Currently, all callers of XLogInsert provide at least some
+ * blocks. Currently, all callers of XLogInsert provide at least some
* not-in-a-buffer data and so len == 0 should never happen, but that
* may not be true forever. If you need to remove the len == 0 check,
* also remove the check for xl_len == 0 in ReadRecord, below.
@@ -589,16 +597,16 @@ begin:;
/* try to update LogwrtResult while waiting for insert lock */
if (!TAS(&(XLogCtl->info_lck)))
{
- XLogwrtRqst LogwrtRqst;
+ XLogwrtRqst LogwrtRqst;
LogwrtRqst = XLogCtl->LogwrtRqst;
LogwrtResult = XLogCtl->LogwrtResult;
S_UNLOCK(&(XLogCtl->info_lck));
/*
- * If cache is half filled then try to acquire logwrt lock
- * and do LOGWRT work, but only once per XLogInsert call.
- * Ignore any fractional blocks in performing this check.
+ * If cache is half filled then try to acquire logwrt lock and
+ * do LOGWRT work, but only once per XLogInsert call. Ignore
+ * any fractional blocks in performing this check.
*/
LogwrtRqst.Write.xrecoff -= LogwrtRqst.Write.xrecoff % BLCKSZ;
if (do_logwrt &&
@@ -625,8 +633,9 @@ begin:;
/*
* Check to see if my RedoRecPtr is out of date. If so, may have to
- * go back and recompute everything. This can only happen just after a
- * checkpoint, so it's better to be slow in this case and fast otherwise.
+ * go back and recompute everything. This can only happen just after
+ * a checkpoint, so it's better to be slow in this case and fast
+ * otherwise.
*/
if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
{
@@ -640,9 +649,10 @@ begin:;
if (dtbuf_bkp[i] == false &&
XLByteLE(dtbuf_lsn[i], RedoRecPtr))
{
+
/*
- * Oops, this buffer now needs to be backed up, but we didn't
- * think so above. Start over.
+ * Oops, this buffer now needs to be backed up, but we
+ * didn't think so above. Start over.
*/
S_UNLOCK(&(XLogCtl->insert_lck));
END_CRIT_SECTION();
@@ -658,8 +668,9 @@ begin:;
* this loop, write_len includes the backup block data.
*
* Also set the appropriate info bits to show which buffers were backed
- * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
- * buffer value (ignoring InvalidBuffer) appearing in the rdata list.
+ * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th
+ * distinct buffer value (ignoring InvalidBuffer) appearing in the
+ * rdata list.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -671,13 +682,13 @@ begin:;
rdt->next = &(dtbuf_rdt[2 * i]);
- dtbuf_rdt[2 * i].data = (char*) &(dtbuf_xlg[i]);
+ dtbuf_rdt[2 * i].data = (char *) &(dtbuf_xlg[i]);
dtbuf_rdt[2 * i].len = sizeof(BkpBlock);
write_len += sizeof(BkpBlock);
rdt = dtbuf_rdt[2 * i].next = &(dtbuf_rdt[2 * i + 1]);
- dtbuf_rdt[2 * i + 1].data = (char*) BufferGetBlock(dtbuf[i]);
+ dtbuf_rdt[2 * i + 1].data = (char *) BufferGetBlock(dtbuf[i]);
dtbuf_rdt[2 * i + 1].len = BLCKSZ;
write_len += BLCKSZ;
dtbuf_rdt[2 * i + 1].next = NULL;
@@ -711,7 +722,7 @@ begin:;
record->xl_rmid = rmid;
/* Now we can finish computing the main CRC */
- COMP_CRC64(rdata_crc, (char*) record + sizeof(crc64),
+ COMP_CRC64(rdata_crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(rdata_crc);
record->xl_crc = rdata_crc;
@@ -729,7 +740,7 @@ begin:;
if (XLOG_DEBUG)
{
- char buf[8192];
+ char buf[8192];
sprintf(buf, "INSERT @ %u/%u: ", RecPtr.xlogid, RecPtr.xrecoff);
xlog_outrec(buf, record);
@@ -791,18 +802,19 @@ begin:;
/* Ensure next record will be properly aligned */
Insert->currpos = (char *) Insert->currpage +
- MAXALIGN(Insert->currpos - (char *) Insert->currpage);
+ MAXALIGN(Insert->currpos - (char *) Insert->currpage);
freespace = INSERT_FREESPACE(Insert);
/*
- * The recptr I return is the beginning of the *next* record.
- * This will be stored as LSN for changed data pages...
+ * The recptr I return is the beginning of the *next* record. This
+ * will be stored as LSN for changed data pages...
*/
INSERT_RECPTR(RecPtr, Insert, curridx);
/* Need to update shared LogwrtRqst if some block was filled up */
if (freespace < SizeOfXLogRecord)
- updrqst = true; /* curridx is filled and available for writing out */
+ updrqst = true; /* curridx is filled and available for
+ * writing out */
else
curridx = PrevBufIdx(curridx);
WriteRqst = XLogCtl->xlblocks[curridx];
@@ -850,9 +862,9 @@ AdvanceXLInsertBuffer(void)
LogwrtResult = Insert->LogwrtResult;
/*
- * Get ending-offset of the buffer page we need to replace (this may be
- * zero if the buffer hasn't been used yet). Fall through if it's already
- * written out.
+ * Get ending-offset of the buffer page we need to replace (this may
+ * be zero if the buffer hasn't been used yet). Fall through if it's
+ * already written out.
*/
OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
if (!XLByteLE(OldPageRqstPtr, LogwrtResult.Write))
@@ -870,7 +882,7 @@ AdvanceXLInsertBuffer(void)
{
if (XLByteLT(XLogCtl->LogwrtRqst.Write, FinishedPageRqstPtr))
XLogCtl->LogwrtRqst.Write = FinishedPageRqstPtr;
- update_needed = false; /* Did the shared-request update */
+ update_needed = false; /* Did the shared-request update */
LogwrtResult = XLogCtl->LogwrtResult;
S_UNLOCK(&(XLogCtl->info_lck));
@@ -883,8 +895,8 @@ AdvanceXLInsertBuffer(void)
}
/*
- * LogwrtResult lock is busy or we know the page is still dirty.
- * Try to acquire logwrt lock and write full blocks.
+ * LogwrtResult lock is busy or we know the page is still
+ * dirty. Try to acquire logwrt lock and write full blocks.
*/
if (!TAS(&(XLogCtl->logwrt_lck)))
{
@@ -896,9 +908,10 @@ AdvanceXLInsertBuffer(void)
Insert->LogwrtResult = LogwrtResult;
break;
}
+
/*
- * Have to write buffers while holding insert lock.
- * This is not good, so only write as much as we absolutely
+ * Have to write buffers while holding insert lock. This
+ * is not good, so only write as much as we absolutely
* must.
*/
WriteRqst.Write = OldPageRqstPtr;
@@ -933,14 +946,15 @@ AdvanceXLInsertBuffer(void)
}
Insert->curridx = nextidx;
Insert->currpage = (XLogPageHeader) (XLogCtl->pages + nextidx * BLCKSZ);
- Insert->currpos = ((char*) Insert->currpage) + SizeOfXLogPHD;
+ Insert->currpos = ((char *) Insert->currpage) + SizeOfXLogPHD;
+
/*
- * Be sure to re-zero the buffer so that bytes beyond what we've written
- * will look like zeroes and not valid XLOG records...
+ * Be sure to re-zero the buffer so that bytes beyond what we've
+ * written will look like zeroes and not valid XLOG records...
*/
- MemSet((char*) Insert->currpage, 0, BLCKSZ);
+ MemSet((char *) Insert->currpage, 0, BLCKSZ);
Insert->currpage->xlp_magic = XLOG_PAGE_MAGIC;
- /* Insert->currpage->xlp_info = 0; */ /* done by memset */
+ /* Insert->currpage->xlp_info = 0; *//* done by memset */
Insert->currpage->xlp_sui = ThisStartUpID;
return update_needed;
@@ -959,11 +973,15 @@ XLogWrite(XLogwrtRqst WriteRqst)
bool ispartialpage;
bool use_existent;
- /* Update local LogwrtResult (caller probably did this already, but...) */
+ /*
+ * Update local LogwrtResult (caller probably did this already,
+ * but...)
+ */
LogwrtResult = Write->LogwrtResult;
while (XLByteLT(LogwrtResult.Write, WriteRqst.Write))
{
+
/*
* Make sure we're not ahead of the insert process. This could
* happen if we're passed a bogus WriteRqst.Write that is past the
@@ -979,6 +997,7 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
+
/*
* Switch to new logfile segment.
*/
@@ -1011,11 +1030,12 @@ XLogWrite(XLogwrtRqst WriteRqst)
ControlFile->logSeg = openLogSeg + 1;
ControlFile->time = time(NULL);
UpdateControlFile();
+
/*
- * Signal postmaster to start a checkpoint if it's been too
- * long since the last one. (We look at local copy of
- * RedoRecPtr which might be a little out of date, but should
- * be close enough for this purpose.)
+ * Signal postmaster to start a checkpoint if it's been
+ * too long since the last one. (We look at local copy of
+ * RedoRecPtr which might be a little out of date, but
+ * should be close enough for this purpose.)
*/
if (IsUnderPostmaster &&
(openLogId != RedoRecPtr.xlogid ||
@@ -1056,14 +1076,14 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* If we just wrote the whole last page of a logfile segment,
* fsync the segment immediately. This avoids having to go back
- * and re-open prior segments when an fsync request comes along later.
- * Doing it here ensures that one and only one backend will perform
- * this fsync.
+ * and re-open prior segments when an fsync request comes along
+ * later. Doing it here ensures that one and only one backend will
+ * perform this fsync.
*/
if (openLogOff >= XLogSegSize && !ispartialpage)
{
issue_xlog_fsync();
- LogwrtResult.Flush = LogwrtResult.Write; /* end of current page */
+ LogwrtResult.Flush = LogwrtResult.Write; /* end of current page */
}
if (ispartialpage)
@@ -1081,15 +1101,16 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (XLByteLT(LogwrtResult.Flush, WriteRqst.Flush) &&
XLByteLT(LogwrtResult.Flush, LogwrtResult.Write))
{
+
/*
- * Could get here without iterating above loop, in which case
- * we might have no open file or the wrong one. However, we do
- * not need to fsync more than one file.
+ * Could get here without iterating above loop, in which case we
+ * might have no open file or the wrong one. However, we do not
+ * need to fsync more than one file.
*/
if (sync_method != SYNC_METHOD_OPEN)
{
if (openLogFile >= 0 &&
- !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
+ !XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
if (close(openLogFile) != 0)
elog(STOP, "close(logfile %u seg %u) failed: %m",
@@ -1110,8 +1131,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
/*
* Update shared-memory status
*
- * We make sure that the shared 'request' values do not fall behind
- * the 'result' values. This is not absolutely essential, but it saves
+ * We make sure that the shared 'request' values do not fall behind the
+ * 'result' values. This is not absolutely essential, but it saves
* some code in a couple of places.
*/
S_LOCK(&(XLogCtl->info_lck));
@@ -1163,8 +1184,9 @@ XLogFlush(XLogRecPtr record)
* Since fsync is usually a horribly expensive operation, we try to
* piggyback as much data as we can on each fsync: if we see any more
* data entered into the xlog buffer, we'll write and fsync that too,
- * so that the final value of LogwrtResult.Flush is as large as possible.
- * This gives us some chance of avoiding another fsync immediately after.
+ * so that the final value of LogwrtResult.Flush is as large as
+ * possible. This gives us some chance of avoiding another fsync
+ * immediately after.
*/
/* initialize to given target; may increase below */
@@ -1192,9 +1214,7 @@ XLogFlush(XLogRecPtr record)
uint32 freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord) /* buffer is full */
- {
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
- }
else
{
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
@@ -1232,7 +1252,7 @@ XLogFlush(XLogRecPtr record)
* log, seg: identify segment to be created/opened.
*
* *use_existent: if TRUE, OK to use a pre-existing file (else, any
- * pre-existing file will be deleted). On return, TRUE if a pre-existing
+ * pre-existing file will be deleted). On return, TRUE if a pre-existing
* file was used.
*
* use_lock: if TRUE, acquire ControlFileLock spinlock while moving file into
@@ -1257,7 +1277,8 @@ XLogFileInit(uint32 log, uint32 seg,
XLogFileName(path, log, seg);
/*
- * Try to use existent file (checkpoint maker may have created it already)
+ * Try to use existent file (checkpoint maker may have created it
+ * already)
*/
if (*use_existent)
{
@@ -1270,14 +1291,14 @@ XLogFileInit(uint32 log, uint32 seg,
log, seg);
}
else
- return(fd);
+ return (fd);
}
/*
- * Initialize an empty (all zeroes) segment. NOTE: it is possible that
- * another process is doing the same thing. If so, we will end up
- * pre-creating an extra log segment. That seems OK, and better than
- * holding the spinlock throughout this lengthy process.
+ * Initialize an empty (all zeroes) segment. NOTE: it is possible
+ * that another process is doing the same thing. If so, we will end
+ * up pre-creating an extra log segment. That seems OK, and better
+ * than holding the spinlock throughout this lengthy process.
*/
snprintf(tmppath, MAXPGPATH, "%s%cxlogtemp.%d",
XLogDir, SEP_CHAR, (int) getpid());
@@ -1291,10 +1312,10 @@ XLogFileInit(uint32 log, uint32 seg,
elog(STOP, "InitCreate(%s) failed: %m", tmppath);
/*
- * Zero-fill the file. We have to do this the hard way to ensure that
+ * Zero-fill the file. We have to do this the hard way to ensure that
* all the file space has really been allocated --- on platforms that
* allow "holes" in files, just seeking to the end doesn't allocate
- * intermediate space. This way, we know that we have all the space
+ * intermediate space. This way, we know that we have all the space
* and (after the fsync below) that all the indirect blocks are down
* on disk. Therefore, fdatasync(2) or O_DSYNC will be sufficient to
* sync future writes to the log file.
@@ -1304,9 +1325,12 @@ XLogFileInit(uint32 log, uint32 seg,
{
if ((int) write(fd, zbuffer, sizeof(zbuffer)) != (int) sizeof(zbuffer))
{
- int save_errno = errno;
+ int save_errno = errno;
- /* If we fail to make the file, delete it to release disk space */
+ /*
+ * If we fail to make the file, delete it to release disk
+ * space
+ */
unlink(tmppath);
errno = save_errno;
@@ -1336,10 +1360,8 @@ XLogFileInit(uint32 log, uint32 seg,
targseg = seg;
strcpy(targpath, path);
- if (! *use_existent)
- {
+ if (!*use_existent)
unlink(targpath);
- }
else
{
while ((fd = BasicOpenFile(targpath, O_RDWR | PG_BINARY,
@@ -1451,10 +1473,10 @@ PreallocXlogFiles(XLogRecPtr endptr)
static void
MoveOfflineLogs(uint32 log, uint32 seg)
{
- DIR *xldir;
- struct dirent *xlde;
- char lastoff[32];
- char path[MAXPGPATH];
+ DIR *xldir;
+ struct dirent *xlde;
+ char lastoff[32];
+ char path[MAXPGPATH];
Assert(XLOG_archive_dir[0] == 0); /* ! implemented yet */
@@ -1471,9 +1493,9 @@ MoveOfflineLogs(uint32 log, uint32 seg)
strspn(xlde->d_name, "0123456789ABCDEF") == 16 &&
strcmp(xlde->d_name, lastoff) <= 0)
{
- elog(LOG, "MoveOfflineLogs: %s %s", (XLOG_archive_dir[0]) ?
+ elog(LOG, "MoveOfflineLogs: %s %s", (XLOG_archive_dir[0]) ?
"archive" : "remove", xlde->d_name);
- sprintf(path, "%s%c%s", XLogDir, SEP_CHAR, xlde->d_name);
+ sprintf(path, "%s%c%s", XLogDir, SEP_CHAR, xlde->d_name);
if (XLOG_archive_dir[0] == 0)
unlink(path);
}
@@ -1499,13 +1521,13 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
char *blk;
int i;
- blk = (char*)XLogRecGetData(record) + record->xl_len;
+ blk = (char *) XLogRecGetData(record) + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
continue;
- memcpy((char*)&bkpb, blk, sizeof(BkpBlock));
+ memcpy((char *) &bkpb, blk, sizeof(BkpBlock));
blk += sizeof(BkpBlock);
reln = XLogOpenRelation(true, record->xl_rmid, bkpb.node);
@@ -1516,7 +1538,7 @@ RestoreBkpBlocks(XLogRecord *record, XLogRecPtr lsn)
if (BufferIsValid(buffer))
{
page = (Page) BufferGetPage(buffer);
- memcpy((char*)page, blk, BLCKSZ);
+ memcpy((char *) page, blk, BLCKSZ);
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
UnlockAndWriteBuffer(buffer);
@@ -1546,7 +1568,7 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
/* Check CRC of rmgr data and record header */
INIT_CRC64(crc);
COMP_CRC64(crc, XLogRecGetData(record), len);
- COMP_CRC64(crc, (char*) record + sizeof(crc64),
+ COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc);
@@ -1554,11 +1576,11 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
{
elog(emode, "ReadRecord: bad rmgr data CRC in record at %u/%u",
recptr.xlogid, recptr.xrecoff);
- return(false);
+ return (false);
}
/* Check CRCs of backup blocks, if any */
- blk = (char*)XLogRecGetData(record) + len;
+ blk = (char *) XLogRecGetData(record) + len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (!(record->xl_info & XLR_SET_BKP_BLOCK(i)))
@@ -1569,18 +1591,19 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
COMP_CRC64(crc, blk + sizeof(crc64),
sizeof(BkpBlock) - sizeof(crc64));
FIN_CRC64(crc);
- memcpy((char*)&cbuf, blk, sizeof(crc64)); /* don't assume alignment */
+ memcpy((char *) &cbuf, blk, sizeof(crc64)); /* don't assume
+ * alignment */
if (!EQ_CRC64(cbuf, crc))
{
elog(emode, "ReadRecord: bad bkp block %d CRC in record at %u/%u",
i + 1, recptr.xlogid, recptr.xrecoff);
- return(false);
+ return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
}
- return(true);
+ return (true);
}
/*
@@ -1609,13 +1632,14 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
if (readBuf == NULL)
{
+
/*
* First time through, permanently allocate readBuf. We do it
* this way, rather than just making a static array, for two
- * reasons: (1) no need to waste the storage in most instantiations
- * of the backend; (2) a static char array isn't guaranteed to
- * have any particular alignment, whereas malloc() will provide
- * MAXALIGN'd storage.
+ * reasons: (1) no need to waste the storage in most
+ * instantiations of the backend; (2) a static char array isn't
+ * guaranteed to have any particular alignment, whereas malloc()
+ * will provide MAXALIGN'd storage.
*/
readBuf = (char *) malloc(BLCKSZ);
Assert(readBuf != NULL);
@@ -1656,7 +1680,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
readFile = XLogFileOpen(readId, readSeg, (emode == LOG));
if (readFile < 0)
goto next_record_is_invalid;
- readOff = (uint32) (-1); /* force read to occur below */
+ readOff = (uint32) (-1);/* force read to occur below */
}
targetPageOff = ((RecPtr->xrecoff % XLogSegSize) / BLCKSZ) * BLCKSZ;
@@ -1688,9 +1712,10 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer)
record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
got_record:;
+
/*
- * Currently, xl_len == 0 must be bad data, but that might not be
- * true forever. See note in XLogInsert.
+ * Currently, xl_len == 0 must be bad data, but that might not be true
+ * forever. See note in XLogInsert.
*/
if (record->xl_len == 0)
{
@@ -1698,8 +1723,10 @@ got_record:;
RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
+
/*
- * Compute total length of record including any appended backup blocks.
+ * Compute total length of record including any appended backup
+ * blocks.
*/
total_len = SizeOfXLogRecord + record->xl_len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -1708,6 +1735,7 @@ got_record:;
continue;
total_len += sizeof(BkpBlock) + BLCKSZ;
}
+
/*
* Make sure it will fit in buffer (currently, it is mechanically
* impossible for this test to fail, but it seems like a good idea
@@ -1731,7 +1759,7 @@ got_record:;
{
/* Need to reassemble record */
XLogContRecord *contrecord;
- uint32 gotlen = len;
+ uint32 gotlen = len;
memcpy(buffer, record, len);
record = (XLogRecord *) buffer;
@@ -1764,7 +1792,7 @@ got_record:;
goto next_record_is_invalid;
}
contrecord = (XLogContRecord *) ((char *) readBuf + SizeOfXLogPHD);
- if (contrecord->xl_rem_len == 0 ||
+ if (contrecord->xl_rem_len == 0 ||
total_len != (contrecord->xl_rem_len + gotlen))
{
elog(emode, "ReadRecord: invalid cont-record len %u in logfile %u seg %u off %u",
@@ -1774,7 +1802,7 @@ got_record:;
len = BLCKSZ - SizeOfXLogPHD - SizeOfXLogContRecord;
if (contrecord->xl_rem_len > len)
{
- memcpy(buffer, (char *)contrecord + SizeOfXLogContRecord, len);
+ memcpy(buffer, (char *) contrecord + SizeOfXLogContRecord, len);
gotlen += len;
buffer += len;
continue;
@@ -1788,12 +1816,12 @@ got_record:;
if (BLCKSZ - SizeOfXLogRecord >= SizeOfXLogPHD +
SizeOfXLogContRecord + MAXALIGN(contrecord->xl_rem_len))
{
- nextRecord = (XLogRecord *) ((char *) contrecord +
+ nextRecord = (XLogRecord *) ((char *) contrecord +
SizeOfXLogContRecord + MAXALIGN(contrecord->xl_rem_len));
}
EndRecPtr.xlogid = readId;
EndRecPtr.xrecoff = readSeg * XLogSegSize + readOff +
- SizeOfXLogPHD + SizeOfXLogContRecord +
+ SizeOfXLogPHD + SizeOfXLogContRecord +
MAXALIGN(contrecord->xl_rem_len);
ReadRecPtr = *RecPtr;
return record;
@@ -1822,7 +1850,7 @@ next_record_is_invalid:;
* Check whether the xlog header of a page just read in looks valid.
*
* This is just a convenience subroutine to avoid duplicated code in
- * ReadRecord. It's not intended for use from anywhere else.
+ * ReadRecord. It's not intended for use from anywhere else.
*/
static bool
ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
@@ -1839,14 +1867,16 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_info, readId, readSeg, readOff);
return false;
}
+
/*
- * We disbelieve a SUI less than the previous page's SUI, or more
- * than a few counts greater. In theory as many as 512 shutdown
- * checkpoint records could appear on a 32K-sized xlog page, so
- * that's the most differential there could legitimately be.
+ * We disbelieve a SUI less than the previous page's SUI, or more than
+ * a few counts greater. In theory as many as 512 shutdown checkpoint
+ * records could appear on a 32K-sized xlog page, so that's the most
+ * differential there could legitimately be.
*
* Note this check can only be applied when we are reading the next page
- * in sequence, so ReadRecord passes a flag indicating whether to check.
+ * in sequence, so ReadRecord passes a flag indicating whether to
+ * check.
*/
if (checkSUI)
{
@@ -1866,7 +1896,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
* I/O routines for pg_control
*
* *ControlFile is a buffer in shared memory that holds an image of the
- * contents of pg_control. WriteControlFile() initializes pg_control
+ * contents of pg_control. WriteControlFile() initializes pg_control
* given a preloaded buffer, ReadControlFile() loads the buffer from
* the pg_control file (during postmaster or standalone-backend startup),
* and UpdateControlFile() rewrites pg_control after we modify xlog state.
@@ -1890,9 +1920,11 @@ static void
WriteControlFile(void)
{
int fd;
- char buffer[BLCKSZ]; /* need not be aligned */
+ char buffer[BLCKSZ]; /* need not be aligned */
+
#ifdef USE_LOCALE
char *localeptr;
+
#endif
/*
@@ -1911,16 +1943,17 @@ WriteControlFile(void)
if (!localeptr)
elog(STOP, "Invalid LC_CTYPE setting");
StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
+
/*
* Issue warning notice if initdb'ing in a locale that will not permit
- * LIKE index optimization. This is not a clean place to do it, but
- * I don't see a better place either...
+ * LIKE index optimization. This is not a clean place to do it, but I
+ * don't see a better place either...
*/
if (!locale_is_like_safe())
elog(NOTICE, "Initializing database with %s collation order."
"\n\tThis locale setting will prevent use of index optimization for"
"\n\tLIKE and regexp searches. If you are concerned about speed of"
- "\n\tsuch queries, you may wish to set LC_COLLATE to \"C\" and"
+ "\n\tsuch queries, you may wish to set LC_COLLATE to \"C\" and"
"\n\tre-initdb. For more information see the Administrator's Guide.",
ControlFile->lc_collate);
#else
@@ -1930,17 +1963,17 @@ WriteControlFile(void)
/* Contents are protected with a CRC */
INIT_CRC64(ControlFile->crc);
- COMP_CRC64(ControlFile->crc,
- (char*) ControlFile + sizeof(crc64),
+ COMP_CRC64(ControlFile->crc,
+ (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc);
/*
- * We write out BLCKSZ bytes into pg_control, zero-padding the
- * excess over sizeof(ControlFileData). This reduces the odds
- * of premature-EOF errors when reading pg_control. We'll still
- * fail when we check the contents of the file, but hopefully with
- * a more specific error than "couldn't read pg_control".
+ * We write out BLCKSZ bytes into pg_control, zero-padding the excess
+ * over sizeof(ControlFileData). This reduces the odds of
+ * premature-EOF errors when reading pg_control. We'll still fail
+ * when we check the contents of the file, but hopefully with a more
+ * specific error than "couldn't read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
elog(STOP, "sizeof(ControlFileData) is too large ... fix xlog.c");
@@ -1993,8 +2026,8 @@ ReadControlFile(void)
/* Now check the CRC. */
INIT_CRC64(crc);
- COMP_CRC64(crc,
- (char*) ControlFile + sizeof(crc64),
+ COMP_CRC64(crc,
+ (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(crc);
@@ -2002,14 +2035,15 @@ ReadControlFile(void)
elog(STOP, "Invalid CRC in control file");
/*
- * Do compatibility checking immediately. We do this here for 2 reasons:
+ * Do compatibility checking immediately. We do this here for 2
+ * reasons:
*
- * (1) if the database isn't compatible with the backend executable,
- * we want to abort before we can possibly do any damage;
+ * (1) if the database isn't compatible with the backend executable, we
+ * want to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file
- * for themselves. (These locale settings are considered critical
+ * for themselves. (These locale settings are considered critical
* compatibility items because they can affect sort order of indexes.)
*/
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
@@ -2042,8 +2076,8 @@ UpdateControlFile(void)
int fd;
INIT_CRC64(ControlFile->crc);
- COMP_CRC64(ControlFile->crc,
- (char*) ControlFile + sizeof(crc64),
+ COMP_CRC64(ControlFile->crc,
+ (char *) ControlFile + sizeof(crc64),
sizeof(ControlFileData) - sizeof(crc64));
FIN_CRC64(ControlFile->crc);
@@ -2096,6 +2130,7 @@ XLOGShmemInit(void)
Assert(!found);
memset(XLogCtl, 0, sizeof(XLogCtlData));
+
/*
* Since XLogCtlData contains XLogRecPtr fields, its sizeof should be
* a multiple of the alignment for same, so no extra alignment padding
@@ -2104,9 +2139,10 @@ XLOGShmemInit(void)
XLogCtl->xlblocks = (XLogRecPtr *)
(((char *) XLogCtl) + sizeof(XLogCtlData));
memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers);
+
/*
- * Here, on the other hand, we must MAXALIGN to ensure the page buffers
- * have worst-case alignment.
+ * Here, on the other hand, we must MAXALIGN to ensure the page
+ * buffers have worst-case alignment.
*/
XLogCtl->pages =
((char *) XLogCtl) + MAXALIGN(sizeof(XLogCtlData) +
@@ -2114,8 +2150,8 @@ XLOGShmemInit(void)
memset(XLogCtl->pages, 0, BLCKSZ * XLOGbuffers);
/*
- * Do basic initialization of XLogCtl shared data.
- * (StartupXLOG will fill in additional info.)
+ * Do basic initialization of XLogCtl shared data. (StartupXLOG will
+ * fill in additional info.)
*/
XLogCtl->XLogCacheByte = BLCKSZ * XLOGbuffers;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
@@ -2145,7 +2181,7 @@ BootStrapXLOG(void)
char *buffer;
XLogPageHeader page;
XLogRecord *record;
- bool use_existent;
+ bool use_existent;
crc64 crc;
/* Use malloc() to ensure buffer is MAXALIGNED */
@@ -2180,7 +2216,7 @@ BootStrapXLOG(void)
INIT_CRC64(crc);
COMP_CRC64(crc, &checkPoint, sizeof(checkPoint));
- COMP_CRC64(crc, (char*) record + sizeof(crc64),
+ COMP_CRC64(crc, (char *) record + sizeof(crc64),
SizeOfXLogRecord - sizeof(crc64));
FIN_CRC64(crc);
record->xl_crc = crc;
@@ -2246,8 +2282,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
- * Note: in most control paths, *ControlFile is already valid and we
- * need not do ReadControlFile() here, but might as well do it to be sure.
+ * Note: in most control paths, *ControlFile is already valid and we need
+ * not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@@ -2297,9 +2333,7 @@ StartupXLOG(void)
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
- {
elog(STOP, "Unable to locate a valid CheckPoint record");
- }
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
@@ -2320,7 +2354,7 @@ StartupXLOG(void)
ShmemVariableCache->oidCount = 0;
ThisStartUpID = checkPoint.ThisStartUpID;
- RedoRecPtr = XLogCtl->Insert.RedoRecPtr =
+ RedoRecPtr = XLogCtl->Insert.RedoRecPtr =
XLogCtl->RedoRecPtr = checkPoint.redo;
if (XLByteLT(RecPtr, checkPoint.redo))
@@ -2328,7 +2362,7 @@ StartupXLOG(void)
if (checkPoint.undo.xrecoff == 0)
checkPoint.undo = RecPtr;
- if (XLByteLT(checkPoint.undo, RecPtr) ||
+ if (XLByteLT(checkPoint.undo, RecPtr) ||
XLByteLT(checkPoint.redo, RecPtr))
{
if (wasShutdown)
@@ -2336,9 +2370,7 @@ StartupXLOG(void)
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
- {
InRecovery = true;
- }
/* REDO */
if (InRecovery)
@@ -2355,7 +2387,8 @@ StartupXLOG(void)
/* Is REDO required ? */
if (XLByteLT(checkPoint.redo, RecPtr))
record = ReadRecord(&(checkPoint.redo), STOP, buffer);
- else /* read past CheckPoint record */
+ else
+/* read past CheckPoint record */
record = ReadRecord(NULL, LOG, buffer);
if (record != NULL)
@@ -2369,15 +2402,15 @@ StartupXLOG(void)
ShmemVariableCache->nextXid = record->xl_xid + 1;
if (XLOG_DEBUG)
{
- char buf[8192];
+ char buf[8192];
- sprintf(buf, "REDO @ %u/%u; LSN %u/%u: ",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
- EndRecPtr.xlogid, EndRecPtr.xrecoff);
+ sprintf(buf, "REDO @ %u/%u; LSN %u/%u: ",
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
+ EndRecPtr.xlogid, EndRecPtr.xrecoff);
xlog_outrec(buf, record);
strcat(buf, " - ");
- RmgrTable[record->xl_rmid].rm_desc(buf,
- record->xl_info, XLogRecGetData(record));
+ RmgrTable[record->xl_rmid].rm_desc(buf,
+ record->xl_info, XLogRecGetData(record));
fprintf(stderr, "%s\n", buf);
}
@@ -2411,8 +2444,11 @@ StartupXLOG(void)
XLogCtl->xlblocks[0].xrecoff =
((EndOfLog.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
Insert = &XLogCtl->Insert;
- /* Tricky point here: readBuf contains the *last* block that the LastRec
- * record spans, not the one it starts in, which is what we want.
+
+ /*
+ * Tricky point here: readBuf contains the *last* block that the
+ * LastRec record spans, not the one it starts in, which is what we
+ * want.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@@ -2458,6 +2494,7 @@ StartupXLOG(void)
if (InRecovery)
{
+
/*
* In case we had to use the secondary checkpoint, make sure that
* it will still be shown as the secondary checkpoint after this
@@ -2554,7 +2591,7 @@ SetThisStartUpID(void)
/*
* CheckPoint process called by postmaster saves copy of new RedoRecPtr
- * in shmem (using SetRedoRecPtr). When checkpointer completes, postmaster
+ * in shmem (using SetRedoRecPtr). When checkpointer completes, postmaster
* calls GetRedoRecPtr to update its own copy of RedoRecPtr, so that
* subsequently-spawned backends will start out with a reasonably up-to-date
* local RedoRecPtr. Since these operations are not protected by any spinlock
@@ -2605,7 +2642,7 @@ CreateCheckPoint(bool shutdown)
CheckPoint checkPoint;
XLogRecPtr recptr;
XLogCtlInsert *Insert = &XLogCtl->Insert;
- XLogRecData rdata;
+ XLogRecData rdata;
uint32 freespace;
uint32 _logId;
uint32 _logSeg;
@@ -2613,7 +2650,7 @@ CreateCheckPoint(bool shutdown)
if (MyLastRecPtr.xrecoff != 0)
elog(ERROR, "CreateCheckPoint: cannot be called inside transaction block");
-
+
START_CRIT_SECTION();
/* Grab lock, using larger than normal sleep between tries (1 sec) */
@@ -2639,17 +2676,17 @@ CreateCheckPoint(bool shutdown)
/*
* If this isn't a shutdown, and we have not inserted any XLOG records
* since the start of the last checkpoint, skip the checkpoint. The
- * idea here is to avoid inserting duplicate checkpoints when the system
- * is idle. That wastes log space, and more importantly it exposes us to
- * possible loss of both current and previous checkpoint records if the
- * machine crashes just as we're writing the update. (Perhaps it'd make
- * even more sense to checkpoint only when the previous checkpoint record
- * is in a different xlog page?)
+ * idea here is to avoid inserting duplicate checkpoints when the
+ * system is idle. That wastes log space, and more importantly it
+ * exposes us to possible loss of both current and previous checkpoint
+ * records if the machine crashes just as we're writing the update.
+ * (Perhaps it'd make even more sense to checkpoint only when the
+ * previous checkpoint record is in a different xlog page?)
*
* We have to make two tests to determine that nothing has happened since
- * the start of the last checkpoint: current insertion point must match
- * the end of the last checkpoint record, and its redo pointer must point
- * to itself.
+ * the start of the last checkpoint: current insertion point must
+ * match the end of the last checkpoint record, and its redo pointer
+ * must point to itself.
*/
if (!shutdown)
{
@@ -2677,7 +2714,7 @@ CreateCheckPoint(bool shutdown)
* NB: this is NOT necessarily where the checkpoint record itself will
* be, since other backends may insert more XLOG records while we're
* off doing the buffer flush work. Those XLOG records are logically
- * after the checkpoint, even though physically before it. Got that?
+ * after the checkpoint, even though physically before it. Got that?
*/
freespace = INSERT_FREESPACE(Insert);
if (freespace < SizeOfXLogRecord)
@@ -2687,16 +2724,18 @@ CreateCheckPoint(bool shutdown)
freespace = BLCKSZ - SizeOfXLogPHD;
}
INSERT_RECPTR(checkPoint.redo, Insert, Insert->curridx);
+
/*
* Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock.
*/
RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
+
/*
- * Get UNDO record ptr - this is oldest of PROC->logRec values.
- * We do this while holding insert lock to ensure that we won't miss
- * any about-to-commit transactions (UNDO must include all xacts that
- * have commits after REDO point).
+ * Get UNDO record ptr - this is oldest of PROC->logRec values. We do
+ * this while holding insert lock to ensure that we won't miss any
+ * about-to-commit transactions (UNDO must include all xacts that have
+ * commits after REDO point).
*/
checkPoint.undo = GetUndoRecPtr();
@@ -2720,8 +2759,8 @@ CreateCheckPoint(bool shutdown)
SpinRelease(OidGenLockId);
/*
- * Having constructed the checkpoint record, ensure all shmem disk buffers
- * are flushed to disk.
+ * Having constructed the checkpoint record, ensure all shmem disk
+ * buffers are flushed to disk.
*/
FlushBufferPool();
@@ -2729,7 +2768,7 @@ CreateCheckPoint(bool shutdown)
* Now insert the checkpoint record into XLOG.
*/
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&checkPoint);
+ rdata.data = (char *) (&checkPoint);
rdata.len = sizeof(checkPoint);
rdata.next = NULL;
@@ -2748,11 +2787,11 @@ CreateCheckPoint(bool shutdown)
elog(STOP, "XLog concurrent activity while data base is shutting down");
/*
- * Remember location of prior checkpoint's earliest info.
- * Oldest item is redo or undo, whichever is older; but watch out
- * for case that undo = 0.
+ * Remember location of prior checkpoint's earliest info. Oldest item
+ * is redo or undo, whichever is older; but watch out for case that
+ * undo = 0.
*/
- if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
+ if (ControlFile->checkPointCopy.undo.xrecoff != 0 &&
XLByteLT(ControlFile->checkPointCopy.undo,
ControlFile->checkPointCopy.redo))
XLByteToSeg(ControlFile->checkPointCopy.undo, _logId, _logSeg);
@@ -2801,10 +2840,10 @@ CreateCheckPoint(bool shutdown)
void
XLogPutNextOid(Oid nextOid)
{
- XLogRecData rdata;
+ XLogRecData rdata;
rdata.buffer = InvalidBuffer;
- rdata.data = (char *)(&nextOid);
+ rdata.data = (char *) (&nextOid);
rdata.len = sizeof(Oid);
rdata.next = NULL;
(void) XLogInsert(RM_XLOG_ID, XLOG_NEXTOID, &rdata);
@@ -2816,11 +2855,11 @@ XLogPutNextOid(Oid nextOid)
void
xlog_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
if (info == XLOG_NEXTOID)
{
- Oid nextOid;
+ Oid nextOid;
memcpy(&nextOid, XLogRecGetData(record), sizeof(Oid));
if (ShmemVariableCache->nextOid < nextOid)
@@ -2846,9 +2885,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
/* In an ONLINE checkpoint, treat the counters like NEXTOID */
if (ShmemVariableCache->nextXid < checkPoint.nextXid)
- {
ShmemVariableCache->nextXid = checkPoint.nextXid;
- }
if (ShmemVariableCache->nextOid < checkPoint.nextOid)
{
ShmemVariableCache->nextOid = checkPoint.nextOid;
@@ -2856,32 +2893,33 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
}
}
}
-
+
void
xlog_undo(XLogRecPtr lsn, XLogRecord *record)
{
}
-
+
void
-xlog_desc(char *buf, uint8 xl_info, char* rec)
+xlog_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_CHECKPOINT_SHUTDOWN ||
info == XLOG_CHECKPOINT_ONLINE)
{
- CheckPoint *checkpoint = (CheckPoint*) rec;
+ CheckPoint *checkpoint = (CheckPoint *) rec;
+
sprintf(buf + strlen(buf), "checkpoint: redo %u/%u; undo %u/%u; "
- "sui %u; xid %u; oid %u; %s",
- checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
- checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
- checkpoint->ThisStartUpID, checkpoint->nextXid,
- checkpoint->nextOid,
- (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
+ "sui %u; xid %u; oid %u; %s",
+ checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
+ checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
+ checkpoint->ThisStartUpID, checkpoint->nextXid,
+ checkpoint->nextOid,
+ (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
}
else if (info == XLOG_NEXTOID)
{
- Oid nextOid;
+ Oid nextOid;
memcpy(&nextOid, rec, sizeof(Oid));
sprintf(buf + strlen(buf), "nextOid: %u", nextOid);
@@ -2893,13 +2931,13 @@ xlog_desc(char *buf, uint8 xl_info, char* rec)
static void
xlog_outrec(char *buf, XLogRecord *record)
{
- int bkpb;
- int i;
+ int bkpb;
+ int i;
sprintf(buf + strlen(buf), "prev %u/%u; xprev %u/%u; xid %u",
- record->xl_prev.xlogid, record->xl_prev.xrecoff,
- record->xl_xact_prev.xlogid, record->xl_xact_prev.xrecoff,
- record->xl_xid);
+ record->xl_prev.xlogid, record->xl_prev.xrecoff,
+ record->xl_xact_prev.xlogid, record->xl_xact_prev.xrecoff,
+ record->xl_xid);
for (i = 0, bkpb = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
@@ -2912,7 +2950,7 @@ xlog_outrec(char *buf, XLogRecord *record)
sprintf(buf + strlen(buf), "; bkpb %d", bkpb);
sprintf(buf + strlen(buf), ": %s",
- RmgrTable[record->xl_rmid].rm_name);
+ RmgrTable[record->xl_rmid].rm_name);
}
@@ -2923,15 +2961,19 @@ xlog_outrec(char *buf, XLogRecord *record)
bool
check_xlog_sync_method(const char *method)
{
- if (strcasecmp(method, "fsync") == 0) return true;
+ if (strcasecmp(method, "fsync") == 0)
+ return true;
#ifdef HAVE_FDATASYNC
- if (strcasecmp(method, "fdatasync") == 0) return true;
+ if (strcasecmp(method, "fdatasync") == 0)
+ return true;
#endif
#ifdef OPEN_SYNC_FLAG
- if (strcasecmp(method, "open_sync") == 0) return true;
+ if (strcasecmp(method, "open_sync") == 0)
+ return true;
#endif
#ifdef OPEN_DATASYNC_FLAG
- if (strcasecmp(method, "open_datasync") == 0) return true;
+ if (strcasecmp(method, "open_datasync") == 0)
+ return true;
#endif
return false;
}
@@ -2939,8 +2981,8 @@ check_xlog_sync_method(const char *method)
void
assign_xlog_sync_method(const char *method)
{
- int new_sync_method;
- int new_sync_bit;
+ int new_sync_method;
+ int new_sync_bit;
if (strcasecmp(method, "fsync") == 0)
{
@@ -2978,11 +3020,12 @@ assign_xlog_sync_method(const char *method)
if (sync_method != new_sync_method || open_sync_bit != new_sync_bit)
{
+
/*
- * To ensure that no blocks escape unsynced, force an fsync on
- * the currently open log segment (if any). Also, if the open
- * flag is changing, close the log file so it will be reopened
- * (with new flag bit) at next use.
+ * To ensure that no blocks escape unsynced, force an fsync on the
+ * currently open log segment (if any). Also, if the open flag is
+ * changing, close the log file so it will be reopened (with new
+ * flag bit) at next use.
*/
if (openLogFile >= 0)
{
@@ -3011,7 +3054,7 @@ issue_xlog_fsync(void)
{
switch (sync_method)
{
- case SYNC_METHOD_FSYNC:
+ case SYNC_METHOD_FSYNC:
if (pg_fsync(openLogFile) != 0)
elog(STOP, "fsync(logfile %u seg %u) failed: %m",
openLogId, openLogSeg);
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index 8b80c326cab..a3f440ca5f9 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.14 2001/03/13 01:17:05 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.15 2001/03/22 03:59:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,26 +37,26 @@
* xaction/command and return
*
* - -1 if not
- * - 0 if there is no tuple at all
- * - 1 if yes
+ * - 0 if there is no tuple at all
+ * - 1 if yes
*/
int
-XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
- TransactionId xid, CommandId cid)
+XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
+ TransactionId xid, CommandId cid)
{
- Relation reln;
- Buffer buffer;
- Page page;
- ItemId lp;
- HeapTupleHeader htup;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ ItemId lp;
+ HeapTupleHeader htup;
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln))
- return(0);
+ return (0);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer))
- return(0);
+ return (0);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer);
@@ -64,13 +64,13 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{
UnlockAndReleaseBuffer(buffer);
- return(0);
+ return (0);
}
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{
UnlockAndReleaseBuffer(buffer);
- return(0);
+ return (0);
}
htup = (HeapTupleHeader) PageGetItem(page, lp);
@@ -79,11 +79,11 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
if (htup->t_xmin != xid || htup->t_cmin != cid)
{
UnlockAndReleaseBuffer(buffer);
- return(-1);
+ return (-1);
}
UnlockAndReleaseBuffer(buffer);
- return(1);
+ return (1);
}
/*
@@ -95,19 +95,19 @@ XLogIsOwnerOfTuple(RelFileNode hnode, ItemPointer iptr,
bool
XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
{
- Relation reln;
- Buffer buffer;
- Page page;
- ItemId lp;
- HeapTupleHeader htup;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ ItemId lp;
+ HeapTupleHeader htup;
reln = XLogOpenRelation(false, RM_HEAP_ID, hnode);
if (!RelationIsValid(reln))
- return(false);
+ return (false);
buffer = ReadBuffer(reln, ItemPointerGetBlockNumber(iptr));
if (!BufferIsValid(buffer))
- return(false);
+ return (false);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = (Page) BufferGetPage(buffer);
@@ -115,21 +115,21 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
ItemPointerGetOffsetNumber(iptr) > PageGetMaxOffsetNumber(page))
{
UnlockAndReleaseBuffer(buffer);
- return(false);
+ return (false);
}
if (PageGetSUI(page) != ThisStartUpID)
{
Assert(PageGetSUI(page) < ThisStartUpID);
UnlockAndReleaseBuffer(buffer);
- return(true);
+ return (true);
}
lp = PageGetItemId(page, ItemPointerGetOffsetNumber(iptr));
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
{
UnlockAndReleaseBuffer(buffer);
- return(false);
+ return (false);
}
htup = (HeapTupleHeader) PageGetItem(page, lp);
@@ -140,22 +140,22 @@ XLogIsValidTuple(RelFileNode hnode, ItemPointer iptr)
{
if (htup->t_infomask & HEAP_XMIN_INVALID ||
(htup->t_infomask & HEAP_MOVED_IN &&
- TransactionIdDidAbort((TransactionId)htup->t_cmin)) ||
+ TransactionIdDidAbort((TransactionId) htup->t_cmin)) ||
TransactionIdDidAbort(htup->t_xmin))
{
UnlockAndReleaseBuffer(buffer);
- return(false);
+ return (false);
}
}
UnlockAndReleaseBuffer(buffer);
- return(true);
+ return (true);
}
/*
* Open pg_log in recovery
*/
-extern Relation LogRelation; /* pg_log relation */
+extern Relation LogRelation; /* pg_log relation */
void
XLogOpenLogRelation(void)
@@ -189,32 +189,32 @@ XLogOpenLogRelation(void)
Buffer
XLogReadBuffer(bool extend, Relation reln, BlockNumber blkno)
{
- BlockNumber lastblock = RelationGetNumberOfBlocks(reln);
+ BlockNumber lastblock = RelationGetNumberOfBlocks(reln);
Buffer buffer;
if (blkno >= lastblock)
{
buffer = InvalidBuffer;
- if (extend) /* we do this in recovery only - no locks */
+ if (extend) /* we do this in recovery only - no locks */
{
Assert(InRecovery);
while (lastblock <= blkno)
{
if (buffer != InvalidBuffer)
- ReleaseBuffer(buffer); /* must be WriteBuffer()? */
+ ReleaseBuffer(buffer); /* must be WriteBuffer()? */
buffer = ReadBuffer(reln, P_NEW);
lastblock++;
}
}
if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
- return(buffer);
+ return (buffer);
}
buffer = ReadBuffer(reln, blkno);
if (buffer != InvalidBuffer)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
- return(buffer);
+ return (buffer);
}
/*
@@ -223,32 +223,33 @@ XLogReadBuffer(bool extend, Relation reln, BlockNumber blkno)
typedef struct XLogRelDesc
{
- RelationData reldata;
- struct XLogRelDesc *lessRecently;
- struct XLogRelDesc *moreRecently;
+ RelationData reldata;
+ struct XLogRelDesc *lessRecently;
+ struct XLogRelDesc *moreRecently;
} XLogRelDesc;
typedef struct XLogRelCacheEntry
{
- RelFileNode rnode;
- XLogRelDesc *rdesc;
+ RelFileNode rnode;
+ XLogRelDesc *rdesc;
} XLogRelCacheEntry;
-static HTAB *_xlrelcache;
-static XLogRelDesc *_xlrelarr = NULL;
-static Form_pg_class _xlpgcarr = NULL;
-static int _xlast = 0;
-static int _xlcnt = 0;
-#define _XLOG_RELCACHESIZE 512
+static HTAB *_xlrelcache;
+static XLogRelDesc *_xlrelarr = NULL;
+static Form_pg_class _xlpgcarr = NULL;
+static int _xlast = 0;
+static int _xlcnt = 0;
+
+#define _XLOG_RELCACHESIZE 512
static void
_xl_init_rel_cache(void)
{
- HASHCTL ctl;
+ HASHCTL ctl;
_xlcnt = _XLOG_RELCACHESIZE;
_xlast = 0;
- _xlrelarr = (XLogRelDesc*) malloc(sizeof(XLogRelDesc) * _xlcnt);
+ _xlrelarr = (XLogRelDesc *) malloc(sizeof(XLogRelDesc) * _xlcnt);
memset(_xlrelarr, 0, sizeof(XLogRelDesc) * _xlcnt);
_xlpgcarr = (Form_pg_class) malloc(sizeof(FormData_pg_class) * _xlcnt);
memset(_xlpgcarr, 0, sizeof(FormData_pg_class) * _xlcnt);
@@ -258,26 +259,26 @@ _xl_init_rel_cache(void)
memset(&ctl, 0, (int) sizeof(ctl));
ctl.keysize = sizeof(RelFileNode);
- ctl.datasize = sizeof(XLogRelDesc*);
+ ctl.datasize = sizeof(XLogRelDesc *);
ctl.hash = tag_hash;
_xlrelcache = hash_create(_XLOG_RELCACHESIZE, &ctl,
- HASH_ELEM | HASH_FUNCTION);
+ HASH_ELEM | HASH_FUNCTION);
}
static void
_xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
{
- XLogRelCacheEntry *hentry;
- bool found;
- XLogRelDesc *rdesc = *edata;
- Form_pg_class tpgc = rdesc->reldata.rd_rel;
+ XLogRelCacheEntry *hentry;
+ bool found;
+ XLogRelDesc *rdesc = *edata;
+ Form_pg_class tpgc = rdesc->reldata.rd_rel;
rdesc->lessRecently->moreRecently = rdesc->moreRecently;
rdesc->moreRecently->lessRecently = rdesc->lessRecently;
- hentry = (XLogRelCacheEntry*) hash_search(_xlrelcache,
- (char*)&(rdesc->reldata.rd_node), HASH_REMOVE, &found);
+ hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
+ (char *) &(rdesc->reldata.rd_node), HASH_REMOVE, &found);
if (hentry == NULL)
elog(STOP, "_xl_remove_hash_entry: can't delete from cache");
@@ -294,16 +295,16 @@ _xl_remove_hash_entry(XLogRelDesc **edata, Datum dummy)
return;
}
-static XLogRelDesc*
+static XLogRelDesc *
_xl_new_reldesc(void)
{
- XLogRelDesc *res;
+ XLogRelDesc *res;
_xlast++;
if (_xlast < _xlcnt)
{
_xlrelarr[_xlast].reldata.rd_rel = &(_xlpgcarr[_xlast]);
- return(&(_xlrelarr[_xlast]));
+ return (&(_xlrelarr[_xlast]));
}
/* reuse */
@@ -312,7 +313,7 @@ _xl_new_reldesc(void)
_xl_remove_hash_entry(&res, 0);
_xlast--;
- return(res);
+ return (res);
}
@@ -344,12 +345,12 @@ XLogCloseRelationCache(void)
Relation
XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
{
- XLogRelDesc *res;
- XLogRelCacheEntry *hentry;
- bool found;
+ XLogRelDesc *res;
+ XLogRelCacheEntry *hentry;
+ bool found;
- hentry = (XLogRelCacheEntry*)
- hash_search(_xlrelcache, (char*)&rnode, HASH_FIND, &found);
+ hentry = (XLogRelCacheEntry *)
+ hash_search(_xlrelcache, (char *) &rnode, HASH_FIND, &found);
if (hentry == NULL)
elog(STOP, "XLogOpenRelation: error in cache");
@@ -372,8 +373,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode;
res->reldata.rd_node = rnode;
- hentry = (XLogRelCacheEntry*)
- hash_search(_xlrelcache, (char*)&rnode, HASH_ENTER, &found);
+ hentry = (XLogRelCacheEntry *)
+ hash_search(_xlrelcache, (char *) &rnode, HASH_ENTER, &found);
if (hentry == NULL)
elog(STOP, "XLogOpenRelation: can't insert into cache");
@@ -385,7 +386,7 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
res->reldata.rd_fd = -1;
res->reldata.rd_fd = smgropen(DEFAULT_SMGR, &(res->reldata),
- true /* allow failure */);
+ true /* allow failure */ );
}
res->moreRecently = &(_xlrelarr[0]);
@@ -393,8 +394,8 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode)
_xlrelarr[0].lessRecently = res;
res->lessRecently->moreRecently = res;
- if (res->reldata.rd_fd < 0) /* file doesn't exist */
- return(NULL);
+ if (res->reldata.rd_fd < 0) /* file doesn't exist */
+ return (NULL);
- return(&(res->reldata));
+ return (&(res->reldata));
}
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index b9bb36bc0ca..bfc4cc2a454 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.46 2001/01/24 19:42:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.47 2001/03/22 03:59:18 momjian Exp $
*
* NOTES
* See acl.h.
@@ -34,7 +34,7 @@
#include "utils/syscache.h"
static int32 aclcheck(char *relname, Acl *acl, AclId id,
- AclIdType idtype, AclMode mode);
+ AclIdType idtype, AclMode mode);
/* warning messages, now more explicit. */
/* MUST correspond to the order of the ACLCHK_* result codes in acl.h. */
@@ -59,7 +59,7 @@ dumpacl(Acl *acl)
for (i = 0; i < ACL_NUM(acl); ++i)
elog(DEBUG, " acl[%d]: %s", i,
DatumGetCString(DirectFunctionCall1(aclitemout,
- PointerGetDatum(aip + i))));
+ PointerGetDatum(aip + i))));
}
#endif
@@ -250,8 +250,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
num;
/*
- * If ACL is null, default to "OK" --- this should not happen,
- * since caller should have inserted appropriate default
+ * If ACL is null, default to "OK" --- this should not happen, since
+ * caller should have inserted appropriate default
*/
if (!acl)
{
@@ -265,8 +265,8 @@ aclcheck(char *relname, Acl *acl, AclId id, AclIdType idtype, AclMode mode)
/*
* We'll treat the empty ACL like that, too, although this is more
* like an error (i.e., you manually blew away your ACL array) -- the
- * system never creates an empty ACL, since there must always be
- * a "world" entry in the first slot.
+ * system never creates an empty ACL, since there must always be a
+ * "world" entry in the first slot.
*/
if (num < 1)
{
@@ -352,7 +352,7 @@ pg_aclcheck(char *relname, Oid userid, AclMode mode)
{
int32 result;
HeapTuple tuple;
- char *usename;
+ char *usename;
Datum aclDatum;
bool isNull;
Acl *acl;
@@ -439,7 +439,7 @@ pg_ownercheck(Oid userid,
{
HeapTuple tuple;
AclId owner_id;
- char *usename;
+ char *usename;
tuple = SearchSysCache(SHADOWSYSID,
ObjectIdGetDatum(userid),
diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c
index 0952a079f77..7d10bdea7a7 100644
--- a/src/backend/catalog/catalog.c
+++ b/src/backend/catalog/catalog.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.39 2001/01/24 19:42:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.40 2001/03/22 03:59:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -105,7 +105,7 @@ relpath_blind(const char *dbname, const char *relname,
return path;
}
-#else /* ! OLD_FILE_NAMING */
+#else /* ! OLD_FILE_NAMING */
/*
* relpath - construct path to a relation's file
@@ -118,7 +118,7 @@ relpath(RelFileNode rnode)
{
char *path;
- if (rnode.tblNode == (Oid) 0) /* "global tablespace" */
+ if (rnode.tblNode == (Oid) 0) /* "global tablespace" */
{
/* Shared system relations live in {datadir}/global */
path = (char *) palloc(strlen(DataDir) + 8 + sizeof(NameData) + 1);
@@ -127,8 +127,8 @@ relpath(RelFileNode rnode)
else
{
path = (char *) palloc(strlen(DataDir) + 6 + 2 * sizeof(NameData) + 3);
- sprintf(path, "%s%cbase%c%u%c%u", DataDir, SEP_CHAR, SEP_CHAR,
- rnode.tblNode, SEP_CHAR, rnode.relNode);
+ sprintf(path, "%s%cbase%c%u%c%u", DataDir, SEP_CHAR, SEP_CHAR,
+ rnode.tblNode, SEP_CHAR, rnode.relNode);
}
return path;
}
@@ -144,7 +144,7 @@ GetDatabasePath(Oid tblNode)
{
char *path;
- if (tblNode == (Oid) 0) /* "global tablespace" */
+ if (tblNode == (Oid) 0) /* "global tablespace" */
{
/* Shared system relations live in {datadir}/global */
path = (char *) palloc(strlen(DataDir) + 8);
@@ -158,7 +158,7 @@ GetDatabasePath(Oid tblNode)
return path;
}
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
/*
* IsSystemRelationName
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index f0aa9042e02..34a22412c39 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.160 2001/02/14 21:34:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.161 2001/03/22 03:59:19 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -68,7 +68,7 @@
static void AddNewRelationTuple(Relation pg_class_desc,
- Relation new_rel_desc, Oid new_rel_oid, Oid new_type_oid,
+ Relation new_rel_desc, Oid new_rel_oid, Oid new_type_oid,
int natts, char relkind, char *temp_relname);
static void DeleteAttributeTuples(Relation rel);
static void DeleteRelationTuple(Relation rel);
@@ -76,7 +76,7 @@ static void DeleteTypeTuple(Relation rel);
static void RelationRemoveIndexes(Relation relation);
static void RelationRemoveInheritance(Relation relation);
static void AddNewRelationType(char *typeName, Oid new_rel_oid,
- Oid new_type_oid);
+ Oid new_type_oid);
static void StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
bool updatePgAttribute);
static void StoreRelCheck(Relation rel, char *ccname, char *ccbin);
@@ -178,13 +178,13 @@ heap_create(char *relname,
{
static unsigned int uniqueId = 0;
- Oid relid;
- Relation rel;
- bool nailme = false;
- int natts = tupDesc->natts;
- int i;
- MemoryContext oldcxt;
- Oid tblNode = MyDatabaseId;
+ Oid relid;
+ Relation rel;
+ bool nailme = false;
+ int natts = tupDesc->natts;
+ int i;
+ MemoryContext oldcxt;
+ Oid tblNode = MyDatabaseId;
/* ----------------
* sanity checks
@@ -270,7 +270,11 @@ heap_create(char *relname,
if (istemp)
{
- /* replace relname of caller with a unique name for a temp relation */
+
+ /*
+ * replace relname of caller with a unique name for a temp
+ * relation
+ */
snprintf(relname, NAMEDATALEN, "pg_temp.%d.%u",
(int) MyProcPid, uniqueId++);
}
@@ -738,6 +742,7 @@ AddNewRelationTuple(Relation pg_class_desc,
static void
AddNewRelationType(char *typeName, Oid new_rel_oid, Oid new_type_oid)
{
+
/*
* The sizes are set to oid size because it makes implementing sets
* MUCH easier, and no one (we hope) uses these fields to figure out
@@ -1025,9 +1030,7 @@ RelationRemoveInheritance(Relation relation)
&entry);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
simple_heap_delete(catalogRelation, &tuple->t_self);
- }
heap_endscan(scan);
heap_close(catalogRelation, RowExclusiveLock);
@@ -1152,8 +1155,8 @@ RelationTruncateIndexes(Oid heapId)
/*
* We have to re-open the heap rel each time through this loop
* because index_build will close it again. We need grab no lock,
- * however, because we assume heap_truncate is holding an exclusive
- * lock on the heap rel.
+ * however, because we assume heap_truncate is holding an
+ * exclusive lock on the heap rel.
*/
heapRelation = heap_open(heapId, NoLock);
@@ -1164,8 +1167,8 @@ RelationTruncateIndexes(Oid heapId)
LockRelation(currentIndex, AccessExclusiveLock);
/*
- * Drop any buffers associated with this index. If they're
- * dirty, they're just dropped without bothering to flush to disk.
+ * Drop any buffers associated with this index. If they're dirty,
+ * they're just dropped without bothering to flush to disk.
*/
DropRelationBuffers(currentIndex);
@@ -1177,6 +1180,7 @@ RelationTruncateIndexes(Oid heapId)
InitIndexStrategy(indexInfo->ii_NumIndexAttrs,
currentIndex, accessMethodId);
index_build(heapRelation, currentIndex, indexInfo, NULL);
+
/*
* index_build will close both the heap and index relations (but
* not give up the locks we hold on them).
@@ -1514,7 +1518,7 @@ heap_drop_with_catalog(const char *relname,
if (has_toasttable)
{
- char toast_relname[NAMEDATALEN];
+ char toast_relname[NAMEDATALEN];
sprintf(toast_relname, "pg_toast_%u", rid);
heap_drop_with_catalog(toast_relname, true);
@@ -1553,16 +1557,16 @@ StoreAttrDefault(Relation rel, AttrNumber attnum, char *adbin,
* deparse it
*/
adsrc = deparse_expression(expr,
- deparse_context_for(RelationGetRelationName(rel),
- RelationGetRelid(rel)),
+ deparse_context_for(RelationGetRelationName(rel),
+ RelationGetRelid(rel)),
false);
values[Anum_pg_attrdef_adrelid - 1] = RelationGetRelid(rel);
values[Anum_pg_attrdef_adnum - 1] = attnum;
values[Anum_pg_attrdef_adbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(adbin));
+ CStringGetDatum(adbin));
values[Anum_pg_attrdef_adsrc - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(adsrc));
+ CStringGetDatum(adsrc));
adrel = heap_openr(AttrDefaultRelationName, RowExclusiveLock);
tuple = heap_formtuple(adrel->rd_att, values, nulls);
heap_insert(adrel, tuple);
@@ -1631,17 +1635,17 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
* deparse it
*/
ccsrc = deparse_expression(expr,
- deparse_context_for(RelationGetRelationName(rel),
- RelationGetRelid(rel)),
+ deparse_context_for(RelationGetRelationName(rel),
+ RelationGetRelid(rel)),
false);
values[Anum_pg_relcheck_rcrelid - 1] = RelationGetRelid(rel);
values[Anum_pg_relcheck_rcname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(ccname));
+ CStringGetDatum(ccname));
values[Anum_pg_relcheck_rcbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(ccbin));
+ CStringGetDatum(ccbin));
values[Anum_pg_relcheck_rcsrc - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(ccsrc));
+ CStringGetDatum(ccsrc));
rcrel = heap_openr(RelCheckRelationName, RowExclusiveLock);
tuple = heap_formtuple(rcrel->rd_att, values, nulls);
heap_insert(rcrel, tuple);
@@ -1981,9 +1985,7 @@ RemoveAttrDefault(Relation rel)
adscan = heap_beginscan(adrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(adscan, 0)))
- {
simple_heap_delete(adrel, &tup->t_self);
- }
heap_endscan(adscan);
heap_close(adrel, RowExclusiveLock);
@@ -2005,9 +2007,7 @@ RemoveRelCheck(Relation rel)
rcscan = heap_beginscan(rcrel, 0, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tup = heap_getnext(rcscan, 0)))
- {
simple_heap_delete(rcrel, &tup->t_self);
- }
heap_endscan(rcscan);
heap_close(rcrel, RowExclusiveLock);
@@ -2044,9 +2044,7 @@ RemoveStatistics(Relation rel)
scan = heap_beginscan(pgstatistic, false, SnapshotNow, 1, &key);
while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
simple_heap_delete(pgstatistic, &tuple->t_self);
- }
heap_endscan(scan);
heap_close(pgstatistic, RowExclusiveLock);
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 6dd28ed02cb..103c4ccc016 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.142 2001/02/23 09:31:52 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.143 2001/03/22 03:59:19 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -63,19 +63,19 @@ static Oid GetHeapRelationOid(char *heapRelationName, char *indexRelationName,
bool istemp);
static TupleDesc BuildFuncTupleDesc(Oid funcOid);
static TupleDesc ConstructTupleDescriptor(Relation heapRelation,
- int numatts, AttrNumber *attNums);
+ int numatts, AttrNumber *attNums);
static void ConstructIndexReldesc(Relation indexRelation, Oid amoid);
static Oid UpdateRelationRelation(Relation indexRelation, char *temp_relname);
static void InitializeAttributeOids(Relation indexRelation,
int numatts, Oid indexoid);
static void AppendAttributeTuples(Relation indexRelation, int numatts);
static void UpdateIndexRelation(Oid indexoid, Oid heapoid,
- IndexInfo *indexInfo,
- Oid *classOids,
- bool islossy, bool primary);
+ IndexInfo *indexInfo,
+ Oid *classOids,
+ bool islossy, bool primary);
static void DefaultBuild(Relation heapRelation, Relation indexRelation,
- IndexInfo *indexInfo, Node *oldPred,
- IndexStrategy indexStrategy);
+ IndexInfo *indexInfo, Node *oldPred,
+ IndexStrategy indexStrategy);
static Oid IndexGetRelation(Oid indexId);
static bool activate_index(Oid indexId, bool activate, bool inplace);
@@ -301,7 +301,8 @@ ConstructTupleDescriptor(Relation heapRelation,
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
/*
- * Fix the stuff that should not be the same as the underlying attr
+ * Fix the stuff that should not be the same as the underlying
+ * attr
*/
to->attnum = i + 1;
@@ -311,9 +312,9 @@ ConstructTupleDescriptor(Relation heapRelation,
to->attcacheoff = -1;
/*
- * We do not yet have the correct relation OID for the index,
- * so just set it invalid for now. InitializeAttributeOids()
- * will fix it later.
+ * We do not yet have the correct relation OID for the index, so
+ * just set it invalid for now. InitializeAttributeOids() will
+ * fix it later.
*/
to->attrelid = InvalidOid;
}
@@ -331,7 +332,7 @@ ConstructTupleDescriptor(Relation heapRelation,
* typically CacheMemoryContext).
*
* There was a note here about adding indexing, but I don't see a need
- * for it. There are so few tuples in pg_am that an indexscan would
+ * for it. There are so few tuples in pg_am that an indexscan would
* surely be slower.
* ----------------------------------------------------------------
*/
@@ -394,7 +395,7 @@ static void
ConstructIndexReldesc(Relation indexRelation, Oid amoid)
{
indexRelation->rd_am = AccessMethodObjectIdGetForm(amoid,
- CacheMemoryContext);
+ CacheMemoryContext);
/* ----------------
* XXX missing the initialization of some other fields
@@ -625,12 +626,12 @@ UpdateIndexRelation(Oid indexoid,
{
predString = nodeToString(indexInfo->ii_Predicate);
predText = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(predString)));
+ CStringGetDatum(predString)));
pfree(predString);
}
else
predText = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum("")));
+ CStringGetDatum("")));
predLen = VARSIZE(predText);
itupLen = predLen + sizeof(FormData_pg_index);
@@ -646,7 +647,7 @@ UpdateIndexRelation(Oid indexoid,
indexForm->indproc = indexInfo->ii_FuncOid;
indexForm->indisclustered = false;
indexForm->indislossy = islossy;
- indexForm->indhaskeytype = true; /* not actually used anymore */
+ indexForm->indhaskeytype = true; /* not actually used anymore */
indexForm->indisunique = indexInfo->ii_Unique;
indexForm->indisprimary = primary;
memcpy((char *) &indexForm->indpred, (char *) predText, predLen);
@@ -747,12 +748,12 @@ UpdateIndexPredicate(Oid indexoid, Node *oldPred, Node *predicate)
{
predString = nodeToString(newPred);
predText = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(predString)));
+ CStringGetDatum(predString)));
pfree(predString);
}
else
predText = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum("")));
+ CStringGetDatum("")));
/* open the index system catalog relation */
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
@@ -911,15 +912,15 @@ index_create(char *heapRelationName,
else
indexTupDesc = ConstructTupleDescriptor(heapRelation,
indexInfo->ii_NumKeyAttrs,
- indexInfo->ii_KeyAttrNumbers);
+ indexInfo->ii_KeyAttrNumbers);
if (istemp)
{
/* save user relation name because heap_create changes it */
- temp_relname = pstrdup(indexRelationName); /* save original value */
+ temp_relname = pstrdup(indexRelationName); /* save original value */
indexRelationName = palloc(NAMEDATALEN);
- strcpy(indexRelationName, temp_relname); /* heap_create will
- * change this */
+ strcpy(indexRelationName, temp_relname); /* heap_create will
+ * change this */
}
/* ----------------
@@ -1008,9 +1009,7 @@ index_create(char *heapRelationName,
/* XXX shouldn't we close the heap and index rels here? */
}
else
- {
index_build(heapRelation, indexRelation, indexInfo, NULL);
- }
}
/* ----------------------------------------------------------------
@@ -1081,12 +1080,12 @@ index_drop(Oid indexId)
heap_freetuple(tuple);
/*
- * Update the pg_class tuple for the owning relation. We are presently
- * too lazy to attempt to compute the new correct value of relhasindex
- * (the next VACUUM will fix it if necessary). But we must send out a
- * shared-cache-inval notice on the owning relation to ensure other
- * backends update their relcache lists of indexes. So, unconditionally
- * do setRelhasindex(true).
+ * Update the pg_class tuple for the owning relation. We are
+ * presently too lazy to attempt to compute the new correct value of
+ * relhasindex (the next VACUUM will fix it if necessary). But we
+ * must send out a shared-cache-inval notice on the owning relation to
+ * ensure other backends update their relcache lists of indexes. So,
+ * unconditionally do setRelhasindex(true).
*/
setRelhasindex(heapId, true);
@@ -1160,11 +1159,11 @@ index_drop(Oid indexId)
*
* IndexInfo stores the information about the index that's needed by
* FormIndexDatum, which is used for both index_build() and later insertion
- * of individual index tuples. Normally we build an IndexInfo for an index
+ * of individual index tuples. Normally we build an IndexInfo for an index
* just once per command, and then use it for (potentially) many tuples.
* ----------------
*/
-IndexInfo *
+IndexInfo *
BuildIndexInfo(HeapTuple indexTuple)
{
Form_pg_index indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
@@ -1199,7 +1198,7 @@ BuildIndexInfo(HeapTuple indexTuple)
{
ii->ii_NumIndexAttrs = 1;
/* Do a lookup on the function, too */
- fmgr_info(indexStruct->indproc, & ii->ii_FuncInfo);
+ fmgr_info(indexStruct->indproc, &ii->ii_FuncInfo);
}
else
ii->ii_NumIndexAttrs = numKeys;
@@ -1213,7 +1212,7 @@ BuildIndexInfo(HeapTuple indexTuple)
char *predString;
predString = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(&indexStruct->indpred)));
+ PointerGetDatum(&indexStruct->indpred)));
ii->ii_Predicate = stringToNode(predString);
pfree(predString);
}
@@ -1262,8 +1261,8 @@ FormIndexDatum(IndexInfo *indexInfo,
* Functional index --- compute the single index attribute
* ----------------
*/
- FunctionCallInfoData fcinfo;
- bool anynull = false;
+ FunctionCallInfoData fcinfo;
+ bool anynull = false;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.flinfo = &indexInfo->ii_FuncInfo;
@@ -1326,8 +1325,8 @@ LockClassinfoForUpdate(Oid relid, HeapTuple rtup,
Relation relationRelation;
/*
- * NOTE: get and hold RowExclusiveLock on pg_class, because caller will
- * probably modify the rel's pg_class tuple later on.
+ * NOTE: get and hold RowExclusiveLock on pg_class, because caller
+ * will probably modify the rel's pg_class tuple later on.
*/
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
classTuple = SearchSysCache(RELOID, PointerGetDatum(relid),
@@ -1342,7 +1341,7 @@ LockClassinfoForUpdate(Oid relid, HeapTuple rtup,
while (1)
{
- ItemPointerData tidsave;
+ ItemPointerData tidsave;
ItemPointerCopy(&(rtup->t_self), &tidsave);
test = heap_mark4update(relationRelation, rtup, buffer);
@@ -1393,7 +1392,7 @@ IndexesAreActive(Oid relid, bool confirmCommitted)
if (!LockClassinfoForUpdate(relid, &tuple, &buffer, confirmCommitted))
elog(ERROR, "IndexesAreActive couldn't lock %u", relid);
if (((Form_pg_class) GETSTRUCT(&tuple))->relkind != RELKIND_RELATION &&
- ((Form_pg_class) GETSTRUCT(&tuple))->relkind != RELKIND_TOASTVALUE)
+ ((Form_pg_class) GETSTRUCT(&tuple))->relkind != RELKIND_TOASTVALUE)
elog(ERROR, "relation %u isn't an indexable relation", relid);
isactive = ((Form_pg_class) GETSTRUCT(&tuple))->relhasindex;
ReleaseBuffer(buffer);
@@ -1438,7 +1437,7 @@ setRelhasindex(Oid relid, bool hasindex)
if (!IsIgnoringSystemIndexes())
#else
if (!IsIgnoringSystemIndexes() && (!IsReindexProcessing() || pg_class->rd_rel->relhasindex))
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
{
tuple = SearchSysCacheCopy(RELOID,
ObjectIdGetDatum(relid),
@@ -1513,18 +1512,19 @@ setRelhasindex(Oid relid, bool hasindex)
void
setNewRelfilenode(Relation relation)
{
- Relation pg_class, idescs[Num_pg_class_indices];
- Oid newrelfilenode;
+ Relation pg_class,
+ idescs[Num_pg_class_indices];
+ Oid newrelfilenode;
bool in_place_update = false;
- HeapTupleData lockTupleData;
- HeapTuple classTuple = NULL;
+ HeapTupleData lockTupleData;
+ HeapTuple classTuple = NULL;
Buffer buffer;
- RelationData workrel;
-
+ RelationData workrel;
+
Assert(!IsSystemRelationName(NameStr(relation->rd_rel->relname)) || relation->rd_rel->relkind == RELKIND_INDEX);
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
- /* Fetch and lock the classTuple associated with this relation */
+ /* Fetch and lock the classTuple associated with this relation */
if (!LockClassinfoForUpdate(relation->rd_id, &lockTupleData, &buffer, true))
elog(ERROR, "setNewRelfilenode impossible to lock class tuple");
if (IsIgnoringSystemIndexes())
@@ -1567,7 +1567,7 @@ setNewRelfilenode(Relation relation)
if (!in_place_update && pg_class->rd_rel->relhasindex)
{
CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices,
- idescs);
+ idescs);
CatalogIndexInsert(idescs, Num_pg_class_indices, pg_class, classTuple);
CatalogCloseIndices(Num_pg_class_indices, idescs);
}
@@ -1577,7 +1577,8 @@ setNewRelfilenode(Relation relation)
/* Make sure the relfilenode change */
CommandCounterIncrement();
}
-#endif /* OLD_FILE_NAMING */
+
+#endif /* OLD_FILE_NAMING */
/* ----------------
* UpdateStats
@@ -1639,7 +1640,7 @@ UpdateStats(Oid relid, long reltuples)
in_place_upd = (IsReindexProcessing() || IsBootstrapProcessingMode());
#else
in_place_upd = (IsIgnoringSystemIndexes() || IsReindexProcessing());
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
if (!in_place_upd)
{
@@ -1713,9 +1714,10 @@ UpdateStats(Oid relid, long reltuples)
*/
if (in_place_upd)
{
+
/*
* At bootstrap time, we don't need to worry about concurrency or
- * visibility of changes, so we cheat. Also cheat if REINDEX.
+ * visibility of changes, so we cheat. Also cheat if REINDEX.
*/
rd_rel = (Form_pg_class) GETSTRUCT(tuple);
LockBuffer(pg_class_scan->rs_cbuf, BUFFER_LOCK_EXCLUSIVE);
@@ -1777,7 +1779,7 @@ DefaultBuild(Relation heapRelation,
Relation indexRelation,
IndexInfo *indexInfo,
Node *oldPred,
- IndexStrategy indexStrategy) /* not used */
+ IndexStrategy indexStrategy) /* not used */
{
HeapScanDesc scan;
HeapTuple heapTuple;
@@ -1787,9 +1789,11 @@ DefaultBuild(Relation heapRelation,
long reltuples,
indtuples;
Node *predicate = indexInfo->ii_Predicate;
+
#ifndef OMIT_PARTIAL_INDEX
TupleTable tupleTable;
TupleTableSlot *slot;
+
#endif
ExprContext *econtext;
InsertIndexResult insertResult;
@@ -1855,6 +1859,7 @@ DefaultBuild(Relation heapRelation,
reltuples++;
#ifndef OMIT_PARTIAL_INDEX
+
/*
* If oldPred != NULL, this is an EXTEND INDEX command, so skip
* this tuple if it was already in the existing partial index
@@ -1906,9 +1911,7 @@ DefaultBuild(Relation heapRelation,
#ifndef OMIT_PARTIAL_INDEX
if (predicate != NULL || oldPred != NULL)
- {
ExecDropTupleTable(tupleTable, true);
- }
#endif /* OMIT_PARTIAL_INDEX */
FreeExprContext(econtext);
@@ -1972,7 +1975,7 @@ index_build(Relation heapRelation,
PointerGetDatum(indexRelation),
PointerGetDatum(indexInfo),
PointerGetDatum(oldPred),
- PointerGetDatum(RelationGetIndexStrategy(indexRelation)));
+ PointerGetDatum(RelationGetIndexStrategy(indexRelation)));
else
DefaultBuild(heapRelation,
indexRelation,
@@ -2087,21 +2090,22 @@ reindex_index(Oid indexId, bool force, bool inplace)
#ifndef OLD_FILE_NAMING
if (!inplace)
- {
- inplace = IsSharedSystemRelationName(NameStr(iRel->rd_rel->relname));
+ {
+ inplace = IsSharedSystemRelationName(NameStr(iRel->rd_rel->relname));
if (!inplace)
setNewRelfilenode(iRel);
}
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
/* Obtain exclusive lock on it, just to be sure */
LockRelation(iRel, AccessExclusiveLock);
if (inplace)
{
+
/*
- * Release any buffers associated with this index. If they're dirty,
- * they're just dropped without bothering to flush to disk.
- */
+ * Release any buffers associated with this index. If they're
+ * dirty, they're just dropped without bothering to flush to disk.
+ */
DropRelationBuffers(iRel);
/* Now truncate the actual data and set blocks to zero */
@@ -2115,7 +2119,7 @@ reindex_index(Oid indexId, bool force, bool inplace)
/*
* index_build will close both the heap and index relations (but not
- * give up the locks we hold on them). So we're done.
+ * give up the locks we hold on them). So we're done.
*/
SetReindexProcessing(old);
@@ -2164,31 +2168,37 @@ reindex_relation(Oid relid, bool force)
bool old,
reindexed;
- bool deactivate_needed, overwrite, upd_pg_class_inplace;
+ bool deactivate_needed,
+ overwrite,
+ upd_pg_class_inplace;
+
#ifdef OLD_FILE_NAMING
- overwrite = upd_pg_class_inplace = deactivate_needed = true;
+ overwrite = upd_pg_class_inplace = deactivate_needed = true;
#else
- Relation rel;
- overwrite = upd_pg_class_inplace = deactivate_needed = false;
+ Relation rel;
+
+ overwrite = upd_pg_class_inplace = deactivate_needed = false;
+
/*
- * avoid heap_update() pg_class tuples while processing
- * reindex for pg_class.
- */
+ * avoid heap_update() pg_class tuples while processing reindex for
+ * pg_class.
+ */
if (IsIgnoringSystemIndexes())
upd_pg_class_inplace = true;
+
/*
* ignore the indexes of the target system relation while processing
* reindex.
- */
+ */
rel = RelationIdGetRelation(relid);
if (!IsIgnoringSystemIndexes() && IsSystemRelationName(NameStr(rel->rd_rel->relname)))
deactivate_needed = true;
-#ifndef ENABLE_REINDEX_NAILED_RELATIONS
- /*
- * nailed relations are never updated.
- * We couldn't keep the consistency between the relation
- * descriptors and pg_class tuples.
- */
+#ifndef ENABLE_REINDEX_NAILED_RELATIONS
+
+ /*
+ * nailed relations are never updated. We couldn't keep the
+ * consistency between the relation descriptors and pg_class tuples.
+ */
if (rel->rd_isnailed)
{
if (IsIgnoringSystemIndexes())
@@ -2199,10 +2209,11 @@ reindex_relation(Oid relid, bool force)
else
elog(ERROR, "the target relation %u is nailed", relid);
}
-#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
+#endif /* ENABLE_REINDEX_NAILED_RELATIONS */
+
/*
- * Shared system indexes must be overwritten because it's
- * impossible to update pg_class tuples of all databases.
+ * Shared system indexes must be overwritten because it's impossible
+ * to update pg_class tuples of all databases.
*/
if (IsSharedSystemRelationName(NameStr(rel->rd_rel->relname)))
{
@@ -2215,7 +2226,7 @@ reindex_relation(Oid relid, bool force)
elog(ERROR, "the target relation %u is shared", relid);
}
RelationClose(rel);
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
old = SetReindexProcessing(true);
if (deactivate_needed)
{
@@ -2252,24 +2263,27 @@ reindex_relation(Oid relid, bool force)
heap_endscan(scan);
heap_close(indexRelation, AccessShareLock);
if (reindexed)
- /*
- * Ok,we could use the reindexed indexes of the target
- * system relation now.
- */
- {
+
+ /*
+ * Ok,we could use the reindexed indexes of the target system
+ * relation now.
+ */
+ {
if (deactivate_needed)
{
if (!overwrite && relid == RelOid_pg_class)
{
- /*
- * For pg_class, relhasindex should be set
- * to true here in place.
+
+ /*
+ * For pg_class, relhasindex should be set to true here in
+ * place.
*/
setRelhasindex(relid, true);
CommandCounterIncrement();
- /*
- * However the following setRelhasindex()
- * is needed to keep consistency with WAL.
+
+ /*
+ * However the following setRelhasindex() is needed to
+ * keep consistency with WAL.
*/
}
setRelhasindex(relid, true);
diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c
index 51337004cbe..8261e9dcfcb 100644
--- a/src/backend/catalog/indexing.c
+++ b/src/backend/catalog/indexing.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.76 2001/01/24 19:42:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.77 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -124,7 +124,7 @@ CatalogCloseIndices(int nIndices, Relation *idescs)
* NOTE: since this routine looks up all the pg_index data on each call,
* it's relatively inefficient for inserting a large number of tuples into
* the same catalog. We use it only for inserting one or a few tuples
- * in a given command. See ExecOpenIndices() and related routines if you
+ * in a given command. See ExecOpenIndices() and related routines if you
* are inserting tuples in bulk.
*
* NOTE: we do not bother to handle partial indices. Nor do we try to
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index e9a0450a7a1..7157ffb2c98 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.37 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.38 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,7 +58,7 @@ AggregateCreate(char *aggName,
Datum values[Natts_pg_aggregate];
Form_pg_proc proc;
Oid transfn;
- Oid finalfn = InvalidOid; /* can be omitted */
+ Oid finalfn = InvalidOid; /* can be omitted */
Oid basetype;
Oid transtype;
Oid finaltype;
@@ -79,8 +79,8 @@ AggregateCreate(char *aggName,
/*
* Handle the aggregate's base type (input data type). This can be
- * specified as 'ANY' for a data-independent transition function,
- * such as COUNT(*).
+ * specified as 'ANY' for a data-independent transition function, such
+ * as COUNT(*).
*/
basetype = GetSysCacheOid(TYPENAME,
PointerGetDatum(aggbasetypeName),
@@ -118,9 +118,7 @@ AggregateCreate(char *aggName,
nargs = 2;
}
else
- {
nargs = 1;
- }
tup = SearchSysCache(PROCNAME,
PointerGetDatum(aggtransfnName),
Int32GetDatum(nargs),
@@ -134,16 +132,17 @@ AggregateCreate(char *aggName,
if (proc->prorettype != transtype)
elog(ERROR, "AggregateCreate: return type of '%s' is not '%s'",
aggtransfnName, aggtranstypeName);
+
/*
- * If the transfn is strict and the initval is NULL, make sure
- * input type and transtype are the same (or at least binary-
- * compatible), so that it's OK to use the first input value
- * as the initial transValue.
+ * If the transfn is strict and the initval is NULL, make sure input
+ * type and transtype are the same (or at least binary- compatible),
+ * so that it's OK to use the first input value as the initial
+ * transValue.
*/
if (proc->proisstrict && agginitval == NULL)
{
if (basetype != transtype &&
- ! IS_BINARY_COMPATIBLE(basetype, transtype))
+ !IS_BINARY_COMPATIBLE(basetype, transtype))
elog(ERROR, "AggregateCreate: must not omit initval when transfn is strict and transtype is not compatible with input type");
}
ReleaseSysCache(tup);
@@ -168,6 +167,7 @@ AggregateCreate(char *aggName,
}
else
{
+
/*
* If no finalfn, aggregate result type is type of the state value
*/
diff --git a/src/backend/catalog/pg_largeobject.c b/src/backend/catalog/pg_largeobject.c
index 688b96ed84b..2becb34929f 100644
--- a/src/backend/catalog/pg_largeobject.c
+++ b/src/backend/catalog/pg_largeobject.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.7 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_largeobject.c,v 1.8 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ LargeObjectCreate(Oid loid)
*/
for (i = 0; i < Natts_pg_largeobject; i++)
{
- values[i] = (Datum)NULL;
+ values[i] = (Datum) NULL;
nulls[i] = ' ';
}
@@ -60,7 +60,7 @@ LargeObjectCreate(Oid loid)
values[i++] = Int32GetDatum(0);
values[i++] = DirectFunctionCall1(byteain,
CStringGetDatum(""));
-
+
ntup = heap_formtuple(pg_largeobject->rd_att, values, nulls);
/*
@@ -77,7 +77,7 @@ LargeObjectCreate(Oid loid)
CatalogIndexInsert(idescs, Num_pg_largeobject_indices, pg_largeobject, ntup);
CatalogCloseIndices(Num_pg_largeobject_indices, idescs);
}
-
+
heap_close(pg_largeobject, RowExclusiveLock);
heap_freetuple(ntup);
@@ -91,9 +91,9 @@ LargeObjectDrop(Oid loid)
bool found = false;
Relation pg_largeobject;
Relation pg_lo_idx;
- ScanKeyData skey[1];
+ ScanKeyData skey[1];
IndexScanDesc sd;
- RetrieveIndexResult indexRes;
+ RetrieveIndexResult indexRes;
HeapTupleData tuple;
Buffer buffer;
@@ -139,9 +139,9 @@ LargeObjectExists(Oid loid)
bool retval = false;
Relation pg_largeobject;
Relation pg_lo_idx;
- ScanKeyData skey[1];
+ ScanKeyData skey[1];
IndexScanDesc sd;
- RetrieveIndexResult indexRes;
+ RetrieveIndexResult indexRes;
HeapTupleData tuple;
Buffer buffer;
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index d9834783414..25ecf12f3b6 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.55 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.56 2001/03/22 03:59:20 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -263,7 +263,7 @@ OperatorShellMakeWithOpenRelation(Relation pg_operator_desc,
values[i++] = NameGetDatum(&oname);
values[i++] = Int32GetDatum(GetUserId());
values[i++] = UInt16GetDatum(0);
- values[i++] = CharGetDatum('b'); /* assume it's binary */
+ values[i++] = CharGetDatum('b'); /* assume it's binary */
values[i++] = BoolGetDatum(false);
values[i++] = BoolGetDatum(false);
values[i++] = ObjectIdGetDatum(leftObjectId); /* <-- left oid */
@@ -595,7 +595,7 @@ OperatorDef(char *operatorName,
*/
if (restrictionName)
{ /* optional */
- Oid restOid;
+ Oid restOid;
MemSet(typeId, 0, FUNC_MAX_ARGS * sizeof(Oid));
typeId[0] = OIDOID; /* operator OID */
@@ -623,7 +623,7 @@ OperatorDef(char *operatorName,
*/
if (joinName)
{ /* optional */
- Oid joinOid;
+ Oid joinOid;
MemSet(typeId, 0, FUNC_MAX_ARGS * sizeof(Oid));
typeId[0] = OIDOID; /* operator OID */
@@ -745,7 +745,7 @@ OperatorDef(char *operatorName,
otherRightTypeName);
if (!OidIsValid(other_oid))
elog(ERROR,
- "OperatorDef: can't create operator shell \"%s\"",
+ "OperatorDef: can't create operator shell \"%s\"",
name[j]);
values[i++] = ObjectIdGetDatum(other_oid);
}
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index e9918baebcd..0872eb6e977 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.53 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.54 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -156,7 +156,7 @@ ProcedureCreate(char *procedureName,
text *prosrctext;
prosrctext = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(prosrc)));
+ CStringGetDatum(prosrc)));
retval = GetSysCacheOid(PROSRC,
PointerGetDatum(prosrctext),
0, 0, 0);
@@ -237,18 +237,18 @@ ProcedureCreate(char *procedureName,
prosrc = procedureName;
if (fmgr_internal_function(prosrc) == InvalidOid)
elog(ERROR,
- "ProcedureCreate: there is no builtin function named \"%s\"",
+ "ProcedureCreate: there is no builtin function named \"%s\"",
prosrc);
}
/*
* If this is a dynamically loadable procedure, make sure that the
* library file exists, is loadable, and contains the specified link
- * symbol. Also check for a valid function information record.
+ * symbol. Also check for a valid function information record.
*
* We used to perform these checks only when the function was first
- * called, but it seems friendlier to verify the library's validity
- * at CREATE FUNCTION time.
+ * called, but it seems friendlier to verify the library's validity at
+ * CREATE FUNCTION time.
*/
if (languageObjectId == ClanguageId)
@@ -355,7 +355,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlist = parse->targetList;
/*
- * The last query must be a SELECT if and only if there is a return type.
+ * The last query must be a SELECT if and only if there is a return
+ * type.
*/
if (rettype == InvalidOid)
{
@@ -375,8 +376,8 @@ checkretval(Oid rettype, List *queryTreeList)
tlistlen = ExecCleanTargetListLength(tlist);
/*
- * For base-type returns, the target list should have exactly one entry,
- * and its type should agree with what the user declared.
+ * For base-type returns, the target list should have exactly one
+ * entry, and its type should agree with what the user declared.
*/
typerelid = typeidTypeRelid(rettype);
if (typerelid == InvalidOid)
@@ -388,7 +389,7 @@ checkretval(Oid rettype, List *queryTreeList)
resnode = (Resdom *) ((TargetEntry *) lfirst(tlist))->resdom;
if (resnode->restype != rettype)
elog(ERROR, "return type mismatch in function: declared to return %s, returns %s",
- typeidTypeName(rettype), typeidTypeName(resnode->restype));
+ typeidTypeName(rettype), typeidTypeName(resnode->restype));
return;
}
@@ -397,8 +398,8 @@ checkretval(Oid rettype, List *queryTreeList)
* If the target list is of length 1, and the type of the varnode in
* the target list is the same as the declared return type, this is
* okay. This can happen, for example, where the body of the function
- * is 'SELECT (x = func2())', where func2 has the same return type
- * as the function that's calling it.
+ * is 'SELECT (x = func2())', where func2 has the same return type as
+ * the function that's calling it.
*/
if (tlistlen == 1)
{
@@ -408,10 +409,10 @@ checkretval(Oid rettype, List *queryTreeList)
}
/*
- * By here, the procedure returns a tuple or set of tuples. This part of
- * the typechecking is a hack. We look up the relation that is the
- * declared return type, and be sure that attributes 1 .. n in the target
- * list match the declared types.
+ * By here, the procedure returns a tuple or set of tuples. This part
+ * of the typechecking is a hack. We look up the relation that is the
+ * declared return type, and be sure that attributes 1 .. n in the
+ * target list match the declared types.
*/
reln = heap_open(typerelid, AccessShareLock);
relid = reln->rd_id;
@@ -436,7 +437,7 @@ checkretval(Oid rettype, List *queryTreeList)
typeidTypeName(rettype),
typeidTypeName(tletype),
typeidTypeName(reln->rd_att->attrs[i]->atttypid),
- i+1);
+ i + 1);
i++;
}
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 714ea737aec..0fbadb55b2d 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.59 2001/02/12 20:07:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.60 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -171,24 +171,24 @@ TypeShellMakeWithOpenRelation(Relation pg_type_desc, char *typeName)
*/
i = 0;
namestrcpy(&name, typeName);
- values[i++] = NameGetDatum(&name); /* 1 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 2 */
- values[i++] = Int16GetDatum(0); /* 3 */
- values[i++] = Int16GetDatum(0); /* 4 */
- values[i++] = BoolGetDatum(false); /* 5 */
- values[i++] = CharGetDatum(0); /* 6 */
- values[i++] = BoolGetDatum(false); /* 7 */
- values[i++] = CharGetDatum(0); /* 8 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 9 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 10 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 11 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 12 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 13 */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* 14 */
- values[i++] = CharGetDatum('i'); /* 15 */
- values[i++] = CharGetDatum('p'); /* 16 */
+ values[i++] = NameGetDatum(&name); /* 1 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 2 */
+ values[i++] = Int16GetDatum(0); /* 3 */
+ values[i++] = Int16GetDatum(0); /* 4 */
+ values[i++] = BoolGetDatum(false); /* 5 */
+ values[i++] = CharGetDatum(0); /* 6 */
+ values[i++] = BoolGetDatum(false); /* 7 */
+ values[i++] = CharGetDatum(0); /* 8 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 9 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 10 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 11 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 12 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 13 */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* 14 */
+ values[i++] = CharGetDatum('i'); /* 15 */
+ values[i++] = CharGetDatum('p'); /* 16 */
values[i++] = DirectFunctionCall1(textin,
- CStringGetDatum(typeName)); /* 17 */
+ CStringGetDatum(typeName)); /* 17 */
/* ----------------
* create a new type tuple with FormHeapTuple
@@ -368,16 +368,16 @@ TypeCreate(char *typeName,
*/
i = 0;
namestrcpy(&name, typeName);
- values[i++] = NameGetDatum(&name); /* 1 */
+ values[i++] = NameGetDatum(&name); /* 1 */
values[i++] = Int32GetDatum(GetUserId()); /* 2 */
values[i++] = Int16GetDatum(internalSize); /* 3 */
values[i++] = Int16GetDatum(externalSize); /* 4 */
values[i++] = BoolGetDatum(passedByValue); /* 5 */
values[i++] = CharGetDatum(typeType); /* 6 */
- values[i++] = BoolGetDatum(true); /* 7 */
+ values[i++] = BoolGetDatum(true); /* 7 */
values[i++] = CharGetDatum(typDelim); /* 8 */
values[i++] = ObjectIdGetDatum(typeType == 'c' ? relationOid : InvalidOid); /* 9 */
- values[i++] = ObjectIdGetDatum(elementObjectId); /* 10 */
+ values[i++] = ObjectIdGetDatum(elementObjectId); /* 10 */
procs[0] = inputProcedure;
procs[1] = outputProcedure;
@@ -386,7 +386,7 @@ TypeCreate(char *typeName,
for (j = 0; j < 4; ++j)
{
- Oid procOid;
+ Oid procOid;
procname = procs[j];
@@ -438,27 +438,27 @@ TypeCreate(char *typeName,
func_error("TypeCreate", procname, 1, argList, NULL);
}
- values[i++] = ObjectIdGetDatum(procOid); /* 11 - 14 */
+ values[i++] = ObjectIdGetDatum(procOid); /* 11 - 14 */
}
/* ----------------
* set default alignment
* ----------------
*/
- values[i++] = CharGetDatum(alignment); /* 15 */
+ values[i++] = CharGetDatum(alignment); /* 15 */
/* ----------------
* set default storage for TOAST
* ----------------
*/
- values[i++] = CharGetDatum(storage); /* 16 */
+ values[i++] = CharGetDatum(storage); /* 16 */
/* ----------------
* initialize the default value for this type.
* ----------------
*/
- values[i] = DirectFunctionCall1(textin, /* 17 */
- CStringGetDatum(defaultTypeValue ? defaultTypeValue : "-"));
+ values[i] = DirectFunctionCall1(textin, /* 17 */
+ CStringGetDatum(defaultTypeValue ? defaultTypeValue : "-"));
/* ----------------
* open pg_type and begin a scan for the type name.
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 4db0068da82..f4e056bd0a7 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.14 2001/02/16 03:16:58 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.15 2001/03/22 03:59:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,9 +86,10 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
CommitTransactionCommand();
return;
}
+
/*
- * We can VACUUM ANALYZE any table except pg_statistic.
- * see update_relstats
+ * We can VACUUM ANALYZE any table except pg_statistic. see
+ * update_relstats
*/
if (strcmp(NameStr(((Form_pg_class) GETSTRUCT(tuple))->relname),
StatisticRelationName) == 0)
@@ -104,10 +105,12 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
if (!pg_ownercheck(GetUserId(), RelationGetRelationName(onerel),
RELNAME))
{
- /* we already did an elog during vacuum
- elog(NOTICE, "Skipping \"%s\" --- only table owner can VACUUM it",
- RelationGetRelationName(onerel));
- */
+
+ /*
+ * we already did an elog during vacuum elog(NOTICE, "Skipping
+ * \"%s\" --- only table owner can VACUUM it",
+ * RelationGetRelationName(onerel));
+ */
heap_close(onerel, NoLock);
CommitTransactionCommand();
return;
@@ -136,7 +139,7 @@ analyze_rel(Oid relid, List *anal_cols2, int MESSAGE_LEVEL)
if (namestrcmp(&(attr[i]->attname), col) == 0)
break;
}
- if (i < attr_cnt) /* found */
+ if (i < attr_cnt) /* found */
attnums[tcnt++] = i;
else
{
@@ -295,15 +298,16 @@ attr_stats(Relation onerel, int attr_cnt, VacAttrStats *vacattrstats, HeapTuple
stats->nonnull_cnt++;
/*
- * If the value is toasted, detoast it to avoid repeated detoastings
- * and resultant memory leakage inside the comparison routines.
+ * If the value is toasted, detoast it to avoid repeated
+ * detoastings and resultant memory leakage inside the comparison
+ * routines.
*/
if (!stats->attr->attbyval && stats->attr->attlen == -1)
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
else
value = origvalue;
- if (! stats->initialized)
+ if (!stats->initialized)
{
bucketcpy(stats->attr, value, &stats->best, &stats->best_len);
/* best_cnt gets incremented below */
@@ -433,7 +437,7 @@ bucketcpy(Form_pg_attribute attr, Datum value, Datum *bucket, int *bucket_len)
* Of course, this only works for fixed-size never-null columns, but
* dispersion is.
*
- * pg_statistic rows are just added normally. This means that
+ * pg_statistic rows are just added normally. This means that
* pg_statistic will probably contain some deleted rows at the
* completion of a vacuum cycle, unless it happens to get vacuumed last.
*
@@ -467,7 +471,7 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
VacAttrStats *stats;
attp = (Form_pg_attribute) GETSTRUCT(atup);
- if (attp->attnum <= 0) /* skip system attributes for now */
+ if (attp->attnum <= 0) /* skip system attributes for now */
continue;
for (i = 0; i < natts; i++)
@@ -476,47 +480,45 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
break;
}
if (i >= natts)
- continue; /* skip attr if no stats collected */
+ continue; /* skip attr if no stats collected */
stats = &(vacattrstats[i]);
if (VacAttrStatsEqValid(stats))
{
- float4 selratio; /* average ratio of rows selected
- * for a random constant */
+ float4 selratio; /* average ratio of rows selected
+ * for a random constant */
/* Compute dispersion */
if (stats->nonnull_cnt == 0 && stats->null_cnt == 0)
{
/*
- * empty relation, so put a dummy value in
- * attdispersion
+ * empty relation, so put a dummy value in attdispersion
*/
selratio = 0;
}
else if (stats->null_cnt <= 1 && stats->best_cnt == 1)
{
+
/*
- * looks like we have a unique-key attribute --- flag
- * this with special -1.0 flag value.
+ * looks like we have a unique-key attribute --- flag this
+ * with special -1.0 flag value.
*
- * The correct dispersion is 1.0/numberOfRows, but since
- * the relation row count can get updated without
- * recomputing dispersion, we want to store a
- * "symbolic" value and figure 1.0/numberOfRows on the
- * fly.
+ * The correct dispersion is 1.0/numberOfRows, but since the
+ * relation row count can get updated without recomputing
+ * dispersion, we want to store a "symbolic" value and
+ * figure 1.0/numberOfRows on the fly.
*/
selratio = -1;
}
else
{
if (VacAttrStatsLtGtValid(stats) &&
- stats->min_cnt + stats->max_cnt == stats->nonnull_cnt)
+ stats->min_cnt + stats->max_cnt == stats->nonnull_cnt)
{
/*
- * exact result when there are just 1 or 2
- * values...
+ * exact result when there are just 1 or 2 values...
*/
double min_cnt_d = stats->min_cnt,
max_cnt_d = stats->max_cnt,
@@ -552,12 +554,12 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
/*
* Create pg_statistic tuples for the relation, if we have
- * gathered the right data. del_stats() previously
- * deleted all the pg_statistic tuples for the rel, so we
- * just have to insert new ones here.
+ * gathered the right data. del_stats() previously deleted
+ * all the pg_statistic tuples for the rel, so we just have to
+ * insert new ones here.
*
- * Note analyze_rel() has seen to it that we won't come here
- * when vacuuming pg_statistic itself.
+ * Note analyze_rel() has seen to it that we won't come here when
+ * vacuuming pg_statistic itself.
*/
if (VacAttrStatsLtGtValid(stats) && stats->initialized)
{
@@ -567,7 +569,7 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
char *out_string;
double best_cnt_d = stats->best_cnt,
null_cnt_d = stats->null_cnt,
- nonnull_cnt_d = stats->nonnull_cnt; /* prevent overflow */
+ nonnull_cnt_d = stats->nonnull_cnt; /* prevent overflow */
Datum values[Natts_pg_statistic];
char nulls[Natts_pg_statistic];
Relation irelations[Num_pg_statistic_indices];
@@ -585,31 +587,31 @@ update_attstats(Oid relid, int natts, VacAttrStats *vacattrstats)
* ----------------
*/
i = 0;
- values[i++] = ObjectIdGetDatum(relid); /* starelid */
- values[i++] = Int16GetDatum(attp->attnum); /* staattnum */
- values[i++] = ObjectIdGetDatum(stats->op_cmplt); /* staop */
- values[i++] = Float4GetDatum(nullratio); /* stanullfrac */
- values[i++] = Float4GetDatum(bestratio); /* stacommonfrac */
+ values[i++] = ObjectIdGetDatum(relid); /* starelid */
+ values[i++] = Int16GetDatum(attp->attnum); /* staattnum */
+ values[i++] = ObjectIdGetDatum(stats->op_cmplt); /* staop */
+ values[i++] = Float4GetDatum(nullratio); /* stanullfrac */
+ values[i++] = Float4GetDatum(bestratio); /* stacommonfrac */
out_string = DatumGetCString(FunctionCall3(&out_function,
- stats->best,
- ObjectIdGetDatum(stats->typelem),
- Int32GetDatum(stats->attr->atttypmod)));
- values[i++] = DirectFunctionCall1(textin, /* stacommonval */
- CStringGetDatum(out_string));
+ stats->best,
+ ObjectIdGetDatum(stats->typelem),
+ Int32GetDatum(stats->attr->atttypmod)));
+ values[i++] = DirectFunctionCall1(textin, /* stacommonval */
+ CStringGetDatum(out_string));
pfree(out_string);
out_string = DatumGetCString(FunctionCall3(&out_function,
- stats->min,
- ObjectIdGetDatum(stats->typelem),
- Int32GetDatum(stats->attr->atttypmod)));
- values[i++] = DirectFunctionCall1(textin, /* staloval */
- CStringGetDatum(out_string));
+ stats->min,
+ ObjectIdGetDatum(stats->typelem),
+ Int32GetDatum(stats->attr->atttypmod)));
+ values[i++] = DirectFunctionCall1(textin, /* staloval */
+ CStringGetDatum(out_string));
pfree(out_string);
out_string = DatumGetCString(FunctionCall3(&out_function,
- stats->max,
- ObjectIdGetDatum(stats->typelem),
- Int32GetDatum(stats->attr->atttypmod)));
- values[i++] = DirectFunctionCall1(textin, /* stahival */
- CStringGetDatum(out_string));
+ stats->max,
+ ObjectIdGetDatum(stats->typelem),
+ Int32GetDatum(stats->attr->atttypmod)));
+ values[i++] = DirectFunctionCall1(textin, /* stahival */
+ CStringGetDatum(out_string));
pfree(out_string);
stup = heap_formtuple(sd->rd_att, values, nulls);
@@ -682,6 +684,3 @@ del_stats(Oid relid, int attcnt, int *attnums)
*/
heap_close(pgstatistic, NoLock);
}
-
-
-
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 134f3b7af0e..1eb29dcc99a 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.76 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.77 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -130,7 +130,7 @@ static void NotifyMyFrontEnd(char *relname, int32 listenerPID);
static int AsyncExistsPendingNotify(char *relname);
static void ClearPendingNotifies(void);
-bool Trace_notify = false;
+bool Trace_notify = false;
/*
@@ -161,6 +161,7 @@ Async_Notify(char *relname)
/* no point in making duplicate entries in the list ... */
if (!AsyncExistsPendingNotify(relname))
{
+
/*
* We allocate list memory from the global malloc pool to ensure
* that it will live until we want to use it. This is probably
@@ -349,9 +350,7 @@ Async_UnlistenAll()
sRel = heap_beginscan(lRel, 0, SnapshotNow, 1, key);
while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0)))
- {
simple_heap_delete(lRel, &lTuple->t_self);
- }
heap_endscan(sRel);
heap_close(lRel, AccessExclusiveLock);
@@ -499,6 +498,7 @@ AtCommit_Notify()
*/
if (kill(listenerPID, SIGUSR2) < 0)
{
+
/*
* Get rid of pg_listener entry if it refers to a PID
* that no longer exists. Presumably, that backend
@@ -794,7 +794,7 @@ ProcessIncomingNotify(void)
if (Trace_notify)
elog(DEBUG, "ProcessIncomingNotify: received %s from %d",
- relname, (int) sourcePID);
+ relname, (int) sourcePID);
NotifyMyFrontEnd(relname, sourcePID);
/* Rewrite the tuple with 0 in notification column */
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 76f805ca86c..826407c8eb6 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.64 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.65 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@
#include "utils/temprel.h"
-static Oid copy_heap(Oid OIDOldHeap, char *NewName, bool istemp);
+static Oid copy_heap(Oid OIDOldHeap, char *NewName, bool istemp);
static void copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName);
static void rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
@@ -75,8 +75,8 @@ cluster(char *oldrelname, char *oldindexname)
StrNCpy(saveoldindexname, oldindexname, NAMEDATALEN);
/*
- * We grab exclusive access to the target rel and index for the duration
- * of the transaction.
+ * We grab exclusive access to the target rel and index for the
+ * duration of the transaction.
*/
OldHeap = heap_openr(saveoldrelname, AccessExclusiveLock);
OIDOldHeap = RelationGetRelid(OldHeap);
@@ -154,8 +154,8 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
OldHeapDesc = RelationGetDescr(OldHeap);
/*
- * Need to make a copy of the tuple descriptor,
- * since heap_create_with_catalog modifies it.
+ * Need to make a copy of the tuple descriptor, since
+ * heap_create_with_catalog modifies it.
*/
tupdesc = CreateTupleDescCopyConstr(OldHeapDesc);
@@ -164,16 +164,15 @@ copy_heap(Oid OIDOldHeap, char *NewName, bool istemp)
allowSystemTableMods);
/*
- * Advance command counter so that the newly-created
- * relation's catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's
+ * catalog tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the new relation.
- * Note that AlterTableCreateToastTable ends with
- * CommandCounterIncrement(), so that the TOAST table will
- * be visible for insertion.
+ * If necessary, create a TOAST table for the new relation. Note that
+ * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
+ * that the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(NewName, true);
@@ -198,12 +197,12 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName)
/*
* Create a new index like the old one. To do this I get the info
- * from pg_index, and add a new index with a temporary name (that
- * will be changed later).
+ * from pg_index, and add a new index with a temporary name (that will
+ * be changed later).
*
- * NOTE: index_create will cause the new index to be a temp relation
- * if its parent table is, so we don't need to do anything special
- * for the temp-table case here.
+ * NOTE: index_create will cause the new index to be a temp relation if
+ * its parent table is, so we don't need to do anything special for
+ * the temp-table case here.
*/
Old_pg_index_Tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(OIDOldIndex),
@@ -214,7 +213,7 @@ copy_index(Oid OIDOldIndex, Oid OIDNewHeap, char *NewIndexName)
indexInfo = BuildIndexInfo(Old_pg_index_Tuple);
Old_pg_index_relation_Tuple = SearchSysCache(RELOID,
- ObjectIdGetDatum(OIDOldIndex),
+ ObjectIdGetDatum(OIDOldIndex),
0, 0, 0);
Assert(Old_pg_index_relation_Tuple);
Old_pg_index_relation_Form = (Form_pg_class) GETSTRUCT(Old_pg_index_relation_Tuple);
@@ -266,13 +265,15 @@ rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
LocalHeapTuple.t_datamcxt = NULL;
LocalHeapTuple.t_data = NULL;
heap_fetch(LocalOldHeap, SnapshotNow, &LocalHeapTuple, &LocalBuffer);
- if (LocalHeapTuple.t_data != NULL) {
+ if (LocalHeapTuple.t_data != NULL)
+ {
+
/*
* We must copy the tuple because heap_insert() will overwrite
* the commit-status fields of the tuple it's handed, and the
* retrieved tuple will actually be in a disk buffer! Thus,
- * the source relation would get trashed, which is bad news
- * if we abort later on. (This was a bug in releases thru 7.0)
+ * the source relation would get trashed, which is bad news if
+ * we abort later on. (This was a bug in releases thru 7.0)
*/
HeapTuple copiedTuple = heap_copytuple(&LocalHeapTuple);
diff --git a/src/backend/commands/command.c b/src/backend/commands/command.c
index 8a3be15a052..49d1edf4c4b 100644
--- a/src/backend/commands/command.c
+++ b/src/backend/commands/command.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.122 2001/02/27 22:07:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.123 2001/03/22 03:59:21 momjian Exp $
*
* NOTES
* The PerformAddAttribute() code, like most of the relation
@@ -173,29 +173,29 @@ PerformPortalFetch(char *name,
* at the end of the available tuples in that direction. If so, do
* nothing. (This check exists because not all plan node types are
* robust about being called again if they've already returned NULL
- * once.) If it's OK to do the fetch, call the executor. Then,
+ * once.) If it's OK to do the fetch, call the executor. Then,
* update the atStart/atEnd state depending on the number of tuples
* that were retrieved.
* ----------------
*/
if (forward)
{
- if (! portal->atEnd)
+ if (!portal->atEnd)
{
ExecutorRun(queryDesc, estate, EXEC_FOR, (long) count);
if (estate->es_processed > 0)
- portal->atStart = false; /* OK to back up now */
+ portal->atStart = false; /* OK to back up now */
if (count <= 0 || (int) estate->es_processed < count)
- portal->atEnd = true; /* we retrieved 'em all */
+ portal->atEnd = true; /* we retrieved 'em all */
}
}
else
{
- if (! portal->atStart)
+ if (!portal->atStart)
{
ExecutorRun(queryDesc, estate, EXEC_BACK, (long) count);
if (estate->es_processed > 0)
- portal->atEnd = false; /* OK to go forward now */
+ portal->atEnd = false; /* OK to go forward now */
if (count <= 0 || (int) estate->es_processed < count)
portal->atStart = true; /* we retrieved 'em all */
}
@@ -502,8 +502,8 @@ AlterTableAddColumn(const char *relationName,
heap_close(rel, NoLock);
/*
- * Automatically create the secondary relation for TOAST
- * if it formerly had no such but now has toastable attributes.
+ * Automatically create the secondary relation for TOAST if it
+ * formerly had no such but now has toastable attributes.
*/
CommandCounterIncrement();
AlterTableCreateToastTable(relationName, true);
@@ -842,7 +842,7 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
relcheck = (Form_pg_relcheck) GETSTRUCT(htup);
ccbin = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(&relcheck->rcbin)));
+ PointerGetDatum(&relcheck->rcbin)));
node = stringToNode(ccbin);
pfree(ccbin);
if (find_attribute_in_node(node, attnum))
@@ -890,7 +890,7 @@ RemoveColumnReferences(Oid reloid, int attnum, bool checkonly, HeapTuple reltup)
else
{
htup = SearchSysCache(RELOID,
- ObjectIdGetDatum(index->indexrelid),
+ ObjectIdGetDatum(index->indexrelid),
0, 0, 0);
RemoveIndex(NameStr(((Form_pg_class) GETSTRUCT(htup))->relname));
ReleaseSysCache(htup);
@@ -1106,339 +1106,361 @@ AlterTableAddConstraint(char *relationName,
#endif
/* Disallow ADD CONSTRAINT on views, indexes, sequences, etc */
- if (! is_relation(relationName))
+ if (!is_relation(relationName))
elog(ERROR, "ALTER TABLE ADD CONSTRAINT: %s is not a table",
relationName);
switch (nodeTag(newConstraint))
{
case T_Constraint:
- {
- Constraint *constr = (Constraint *) newConstraint;
-
- switch (constr->contype)
{
- case CONSTR_CHECK:
+ Constraint *constr = (Constraint *) newConstraint;
+
+ switch (constr->contype)
{
- ParseState *pstate;
- bool successful = true;
- HeapScanDesc scan;
- ExprContext *econtext;
- TupleTableSlot *slot;
- HeapTuple tuple;
- RangeTblEntry *rte;
- List *qual;
- List *constlist;
- Relation rel;
- Node *expr;
- char *name;
-
- if (constr->name)
- name = constr->name;
- else
- name = "<unnamed>";
-
- constlist = makeList1(constr);
-
- rel = heap_openr(relationName, AccessExclusiveLock);
-
- /* make sure it is not a view */
- if (rel->rd_rel->relkind == RELKIND_VIEW)
- elog(ERROR, "ALTER TABLE: cannot add constraint to a view");
-
- /*
- * Scan all of the rows, looking for a false match
- */
- scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
- AssertState(scan != NULL);
-
- /*
- * We need to make a parse state and range table to allow
- * us to transformExpr and fix_opids to get a version of
- * the expression we can pass to ExecQual
- */
- pstate = make_parsestate(NULL);
- rte = addRangeTableEntry(pstate, relationName, NULL,
- false, true);
- addRTEtoQuery(pstate, rte, true, true);
-
- /* Convert the A_EXPR in raw_expr into an EXPR */
- expr = transformExpr(pstate, constr->raw_expr,
- EXPR_COLUMN_FIRST);
-
- /*
- * Make sure it yields a boolean result.
- */
- if (exprType(expr) != BOOLOID)
- elog(ERROR, "CHECK '%s' does not yield boolean result",
- name);
-
- /*
- * Make sure no outside relations are referred to.
- */
- if (length(pstate->p_rtable) != 1)
- elog(ERROR, "Only relation '%s' can be referenced in CHECK",
- relationName);
-
- /*
- * Might as well try to reduce any constant expressions.
- */
- expr = eval_const_expressions(expr);
-
- /* And fix the opids */
- fix_opids(expr);
-
- qual = makeList1(expr);
-
- /* Make tuple slot to hold tuples */
- slot = MakeTupleTableSlot();
- ExecSetSlotDescriptor(slot, RelationGetDescr(rel), false);
- /* Make an expression context for ExecQual */
- econtext = MakeExprContext(slot, CurrentMemoryContext);
-
- /*
- * Scan through the rows now, checking the expression
- * at each row.
- */
- while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
- ExecStoreTuple(tuple, slot, InvalidBuffer, false);
- if (!ExecQual(qual, econtext, true))
+ case CONSTR_CHECK:
{
- successful=false;
- break;
- }
- ResetExprContext(econtext);
- }
+ ParseState *pstate;
+ bool successful = true;
+ HeapScanDesc scan;
+ ExprContext *econtext;
+ TupleTableSlot *slot;
+ HeapTuple tuple;
+ RangeTblEntry *rte;
+ List *qual;
+ List *constlist;
+ Relation rel;
+ Node *expr;
+ char *name;
+
+ if (constr->name)
+ name = constr->name;
+ else
+ name = "<unnamed>";
+
+ constlist = makeList1(constr);
+
+ rel = heap_openr(relationName, AccessExclusiveLock);
+
+ /* make sure it is not a view */
+ if (rel->rd_rel->relkind == RELKIND_VIEW)
+ elog(ERROR, "ALTER TABLE: cannot add constraint to a view");
+
+ /*
+ * Scan all of the rows, looking for a false
+ * match
+ */
+ scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
+ AssertState(scan != NULL);
+
+ /*
+ * We need to make a parse state and range
+ * table to allow us to transformExpr and
+ * fix_opids to get a version of the
+ * expression we can pass to ExecQual
+ */
+ pstate = make_parsestate(NULL);
+ rte = addRangeTableEntry(pstate, relationName, NULL,
+ false, true);
+ addRTEtoQuery(pstate, rte, true, true);
+
+ /* Convert the A_EXPR in raw_expr into an EXPR */
+ expr = transformExpr(pstate, constr->raw_expr,
+ EXPR_COLUMN_FIRST);
+
+ /*
+ * Make sure it yields a boolean result.
+ */
+ if (exprType(expr) != BOOLOID)
+ elog(ERROR, "CHECK '%s' does not yield boolean result",
+ name);
+
+ /*
+ * Make sure no outside relations are referred
+ * to.
+ */
+ if (length(pstate->p_rtable) != 1)
+ elog(ERROR, "Only relation '%s' can be referenced in CHECK",
+ relationName);
+
+ /*
+ * Might as well try to reduce any constant
+ * expressions.
+ */
+ expr = eval_const_expressions(expr);
+
+ /* And fix the opids */
+ fix_opids(expr);
+
+ qual = makeList1(expr);
+
+ /* Make tuple slot to hold tuples */
+ slot = MakeTupleTableSlot();
+ ExecSetSlotDescriptor(slot, RelationGetDescr(rel), false);
+ /* Make an expression context for ExecQual */
+ econtext = MakeExprContext(slot, CurrentMemoryContext);
+
+ /*
+ * Scan through the rows now, checking the
+ * expression at each row.
+ */
+ while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
+ {
+ ExecStoreTuple(tuple, slot, InvalidBuffer, false);
+ if (!ExecQual(qual, econtext, true))
+ {
+ successful = false;
+ break;
+ }
+ ResetExprContext(econtext);
+ }
- FreeExprContext(econtext);
- pfree(slot);
+ FreeExprContext(econtext);
+ pfree(slot);
- heap_endscan(scan);
+ heap_endscan(scan);
- if (!successful)
- {
- heap_close(rel, NoLock);
- elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
- }
- /*
- * Call AddRelationRawConstraints to do the real adding --
- * It duplicates some of the above, but does not check the
- * validity of the constraint against tuples already in
- * the table.
- */
- AddRelationRawConstraints(rel, NIL, constlist);
- heap_close(rel, NoLock);
- pfree(constlist);
-
- break;
+ if (!successful)
+ {
+ heap_close(rel, NoLock);
+ elog(ERROR, "AlterTableAddConstraint: rejected due to CHECK constraint %s", name);
+ }
+
+ /*
+ * Call AddRelationRawConstraints to do the
+ * real adding -- It duplicates some of the
+ * above, but does not check the validity of
+ * the constraint against tuples already in
+ * the table.
+ */
+ AddRelationRawConstraints(rel, NIL, constlist);
+ heap_close(rel, NoLock);
+ pfree(constlist);
+
+ break;
+ }
+ default:
+ elog(ERROR, "ALTER TABLE / ADD CONSTRAINT is not implemented for that constraint type.");
}
- default:
- elog(ERROR, "ALTER TABLE / ADD CONSTRAINT is not implemented for that constraint type.");
+ break;
}
- break;
- }
case T_FkConstraint:
- {
- FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
- Relation rel, pkrel;
- HeapScanDesc scan;
- HeapTuple tuple;
- Trigger trig;
- List *list;
- int count;
- List *indexoidlist,
- *indexoidscan;
- Form_pg_attribute *rel_attrs = NULL;
- int i;
- bool found = false;
-
- if (is_temp_rel_name(fkconstraint->pktable_name) &&
- !is_temp_rel_name(relationName))
- elog(ERROR, "ALTER TABLE / ADD CONSTRAINT: Unable to reference temporary table from permanent table constraint.");
-
- /*
- * Grab an exclusive lock on the pk table, so that someone
- * doesn't delete rows out from under us.
- */
-
- pkrel = heap_openr(fkconstraint->pktable_name, AccessExclusiveLock);
- if (pkrel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "referenced table \"%s\" not a relation",
- fkconstraint->pktable_name);
-
- /*
- * Grab an exclusive lock on the fk table, and then scan
- * through each tuple, calling the RI_FKey_Match_Ins
- * (insert trigger) as if that tuple had just been
- * inserted. If any of those fail, it should elog(ERROR)
- * and that's that.
- */
- rel = heap_openr(relationName, AccessExclusiveLock);
- if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "referencing table \"%s\" not a relation",
- relationName);
-
- /* First we check for limited correctness of the constraint */
-
- rel_attrs = pkrel->rd_att->attrs;
- indexoidlist = RelationGetIndexList(pkrel);
-
- foreach(indexoidscan, indexoidlist)
{
- Oid indexoid = lfirsti(indexoidscan);
- HeapTuple indexTuple;
- Form_pg_index indexStruct;
-
- indexTuple = SearchSysCache(INDEXRELID,
- ObjectIdGetDatum(indexoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(indexTuple))
- elog(ERROR, "transformFkeyGetPrimaryKey: index %u not found",
- indexoid);
- indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
-
- if (indexStruct->indisunique)
+ FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
+ Relation rel,
+ pkrel;
+ HeapScanDesc scan;
+ HeapTuple tuple;
+ Trigger trig;
+ List *list;
+ int count;
+ List *indexoidlist,
+ *indexoidscan;
+ Form_pg_attribute *rel_attrs = NULL;
+ int i;
+ bool found = false;
+
+ if (is_temp_rel_name(fkconstraint->pktable_name) &&
+ !is_temp_rel_name(relationName))
+ elog(ERROR, "ALTER TABLE / ADD CONSTRAINT: Unable to reference temporary table from permanent table constraint.");
+
+ /*
+ * Grab an exclusive lock on the pk table, so that someone
+ * doesn't delete rows out from under us.
+ */
+
+ pkrel = heap_openr(fkconstraint->pktable_name, AccessExclusiveLock);
+ if (pkrel->rd_rel->relkind != RELKIND_RELATION)
+ elog(ERROR, "referenced table \"%s\" not a relation",
+ fkconstraint->pktable_name);
+
+ /*
+ * Grab an exclusive lock on the fk table, and then scan
+ * through each tuple, calling the RI_FKey_Match_Ins
+ * (insert trigger) as if that tuple had just been
+ * inserted. If any of those fail, it should elog(ERROR)
+ * and that's that.
+ */
+ rel = heap_openr(relationName, AccessExclusiveLock);
+ if (rel->rd_rel->relkind != RELKIND_RELATION)
+ elog(ERROR, "referencing table \"%s\" not a relation",
+ relationName);
+
+ /*
+ * First we check for limited correctness of the
+ * constraint
+ */
+
+ rel_attrs = pkrel->rd_att->attrs;
+ indexoidlist = RelationGetIndexList(pkrel);
+
+ foreach(indexoidscan, indexoidlist)
{
- List *attrl;
-
- /* Make sure this index has the same number of keys -- It obviously
- * won't match otherwise. */
- for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
- if (i!=length(fkconstraint->pk_attrs))
- found=false;
- else {
- /* go through the fkconstraint->pk_attrs list */
- foreach(attrl, fkconstraint->pk_attrs)
- {
- Ident *attr=lfirst(attrl);
+ Oid indexoid = lfirsti(indexoidscan);
+ HeapTuple indexTuple;
+ Form_pg_index indexStruct;
+
+ indexTuple = SearchSysCache(INDEXRELID,
+ ObjectIdGetDatum(indexoid),
+ 0, 0, 0);
+ if (!HeapTupleIsValid(indexTuple))
+ elog(ERROR, "transformFkeyGetPrimaryKey: index %u not found",
+ indexoid);
+ indexStruct = (Form_pg_index) GETSTRUCT(indexTuple);
+
+ if (indexStruct->indisunique)
+ {
+ List *attrl;
+
+ /*
+ * Make sure this index has the same number of
+ * keys -- It obviously won't match otherwise.
+ */
+ for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++);
+ if (i != length(fkconstraint->pk_attrs))
found = false;
- for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
+ else
+ {
+ /* go through the fkconstraint->pk_attrs list */
+ foreach(attrl, fkconstraint->pk_attrs)
{
- int pkattno = indexStruct->indkey[i];
- if (pkattno>0)
+ Ident *attr = lfirst(attrl);
+
+ found = false;
+ for (i = 0; i < INDEX_MAX_KEYS && indexStruct->indkey[i] != 0; i++)
{
- char *name = NameStr(rel_attrs[pkattno-1]->attname);
- if (strcmp(name, attr->name)==0)
+ int pkattno = indexStruct->indkey[i];
+
+ if (pkattno > 0)
{
- found = true;
- break;
+ char *name = NameStr(rel_attrs[pkattno - 1]->attname);
+
+ if (strcmp(name, attr->name) == 0)
+ {
+ found = true;
+ break;
+ }
}
}
+ if (!found)
+ break;
}
- if (!found)
- break;
}
}
+ ReleaseSysCache(indexTuple);
+ if (found)
+ break;
}
- ReleaseSysCache(indexTuple);
- if (found)
- break;
- }
- if (!found)
- elog(ERROR, "UNIQUE constraint matching given keys for referenced table \"%s\" not found",
- fkconstraint->pktable_name);
+ if (!found)
+ elog(ERROR, "UNIQUE constraint matching given keys for referenced table \"%s\" not found",
+ fkconstraint->pktable_name);
- freeList(indexoidlist);
- heap_close(pkrel, NoLock);
+ freeList(indexoidlist);
+ heap_close(pkrel, NoLock);
- rel_attrs = rel->rd_att->attrs;
- if (fkconstraint->fk_attrs!=NIL) {
- List *fkattrs;
- Ident *fkattr;
+ rel_attrs = rel->rd_att->attrs;
+ if (fkconstraint->fk_attrs != NIL)
+ {
+ List *fkattrs;
+ Ident *fkattr;
- found = false;
- foreach(fkattrs, fkconstraint->fk_attrs) {
- int count;
found = false;
- fkattr=lfirst(fkattrs);
- for (count = 0; count < rel->rd_att->natts; count++) {
- char *name = NameStr(rel->rd_att->attrs[count]->attname);
- if (strcmp(name, fkattr->name)==0) {
- found = true;
- break;
+ foreach(fkattrs, fkconstraint->fk_attrs)
+ {
+ int count;
+
+ found = false;
+ fkattr = lfirst(fkattrs);
+ for (count = 0; count < rel->rd_att->natts; count++)
+ {
+ char *name = NameStr(rel->rd_att->attrs[count]->attname);
+
+ if (strcmp(name, fkattr->name) == 0)
+ {
+ found = true;
+ break;
+ }
}
+ if (!found)
+ break;
}
if (!found)
- break;
+ elog(ERROR, "columns referenced in foreign key constraint not found.");
}
- if (!found)
- elog(ERROR, "columns referenced in foreign key constraint not found.");
- }
- trig.tgoid = 0;
- if (fkconstraint->constr_name)
- trig.tgname = fkconstraint->constr_name;
- else
- trig.tgname = "<unknown>";
- trig.tgfoid = 0;
- trig.tgtype = 0;
- trig.tgenabled = TRUE;
- trig.tgisconstraint = TRUE;
- trig.tginitdeferred = FALSE;
- trig.tgdeferrable = FALSE;
-
- trig.tgargs = (char **) palloc(
- sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
- + length(fkconstraint->pk_attrs)));
-
- if (fkconstraint->constr_name)
- trig.tgargs[0] = fkconstraint->constr_name;
- else
- trig.tgargs[0] = "<unknown>";
- trig.tgargs[1] = (char *) relationName;
- trig.tgargs[2] = fkconstraint->pktable_name;
- trig.tgargs[3] = fkconstraint->match_type;
- count = 4;
- foreach(list, fkconstraint->fk_attrs)
+ trig.tgoid = 0;
+ if (fkconstraint->constr_name)
+ trig.tgname = fkconstraint->constr_name;
+ else
+ trig.tgname = "<unknown>";
+ trig.tgfoid = 0;
+ trig.tgtype = 0;
+ trig.tgenabled = TRUE;
+ trig.tgisconstraint = TRUE;
+ trig.tginitdeferred = FALSE;
+ trig.tgdeferrable = FALSE;
+
+ trig.tgargs = (char **) palloc(
+ sizeof(char *) * (4 + length(fkconstraint->fk_attrs)
+ + length(fkconstraint->pk_attrs)));
+
+ if (fkconstraint->constr_name)
+ trig.tgargs[0] = fkconstraint->constr_name;
+ else
+ trig.tgargs[0] = "<unknown>";
+ trig.tgargs[1] = (char *) relationName;
+ trig.tgargs[2] = fkconstraint->pktable_name;
+ trig.tgargs[3] = fkconstraint->match_type;
+ count = 4;
+ foreach(list, fkconstraint->fk_attrs)
{
Ident *fk_at = lfirst(list);
trig.tgargs[count] = fk_at->name;
- count+=2;
+ count += 2;
}
- count = 5;
- foreach(list, fkconstraint->pk_attrs)
+ count = 5;
+ foreach(list, fkconstraint->pk_attrs)
{
Ident *pk_at = lfirst(list);
trig.tgargs[count] = pk_at->name;
- count+=2;
+ count += 2;
}
- trig.tgnargs = count-1;
+ trig.tgnargs = count - 1;
- scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
- AssertState(scan != NULL);
+ scan = heap_beginscan(rel, false, SnapshotNow, 0, NULL);
+ AssertState(scan != NULL);
- while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
- {
- /* Make a call to the check function */
- /* No parameters are passed, but we do set a context */
- FunctionCallInfoData fcinfo;
- TriggerData trigdata;
+ while (HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
+ {
+ /* Make a call to the check function */
+ /* No parameters are passed, but we do set a context */
+ FunctionCallInfoData fcinfo;
+ TriggerData trigdata;
- MemSet(&fcinfo, 0, sizeof(fcinfo));
- /* We assume RI_FKey_check_ins won't look at flinfo... */
+ MemSet(&fcinfo, 0, sizeof(fcinfo));
+ /* We assume RI_FKey_check_ins won't look at flinfo... */
- trigdata.type = T_TriggerData;
- trigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW;
- trigdata.tg_relation = rel;
- trigdata.tg_trigtuple = tuple;
- trigdata.tg_newtuple = NULL;
- trigdata.tg_trigger = &trig;
+ trigdata.type = T_TriggerData;
+ trigdata.tg_event = TRIGGER_EVENT_INSERT | TRIGGER_EVENT_ROW;
+ trigdata.tg_relation = rel;
+ trigdata.tg_trigtuple = tuple;
+ trigdata.tg_newtuple = NULL;
+ trigdata.tg_trigger = &trig;
- fcinfo.context = (Node *) &trigdata;
+ fcinfo.context = (Node *) &trigdata;
- RI_FKey_check_ins(&fcinfo);
- }
- heap_endscan(scan);
- heap_close(rel, NoLock); /* close rel but keep
- * lock! */
+ RI_FKey_check_ins(&fcinfo);
+ }
+ heap_endscan(scan);
+ heap_close(rel, NoLock); /* close rel but keep
+ * lock! */
- pfree(trig.tgargs);
- break;
- }
+ pfree(trig.tgargs);
+ break;
+ }
default:
elog(ERROR, "ALTER TABLE / ADD CONSTRAINT unable to determine type of constraint passed");
}
@@ -1464,15 +1486,15 @@ AlterTableDropConstraint(const char *relationName,
void
AlterTableOwner(const char *relationName, const char *newOwnerName)
{
- Relation class_rel;
- HeapTuple tuple;
+ Relation class_rel;
+ HeapTuple tuple;
int32 newOwnerSysid;
Relation idescs[Num_pg_class_indices];
/*
* first check that we are a superuser
*/
- if (! superuser())
+ if (!superuser())
elog(ERROR, "ALTER TABLE: permission denied");
/*
@@ -1537,21 +1559,21 @@ AlterTableOwner(const char *relationName, const char *newOwnerName)
void
AlterTableCreateToastTable(const char *relationName, bool silent)
{
- Relation rel;
- Oid myrelid;
- HeapTuple reltup;
- HeapTupleData classtuple;
- TupleDesc tupdesc;
- Relation class_rel;
- Buffer buffer;
- Relation ridescs[Num_pg_class_indices];
- Oid toast_relid;
- Oid toast_idxid;
- char toast_relname[NAMEDATALEN + 1];
- char toast_idxname[NAMEDATALEN + 1];
- Relation toast_idxrel;
- IndexInfo *indexInfo;
- Oid classObjectId[1];
+ Relation rel;
+ Oid myrelid;
+ HeapTuple reltup;
+ HeapTupleData classtuple;
+ TupleDesc tupdesc;
+ Relation class_rel;
+ Buffer buffer;
+ Relation ridescs[Num_pg_class_indices];
+ Oid toast_relid;
+ Oid toast_idxid;
+ char toast_relname[NAMEDATALEN + 1];
+ char toast_idxname[NAMEDATALEN + 1];
+ Relation toast_idxrel;
+ IndexInfo *indexInfo;
+ Oid classObjectId[1];
/*
* permissions checking. XXX exactly what is appropriate here?
@@ -1618,7 +1640,7 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
/*
* Check to see whether the table actually needs a TOAST table.
*/
- if (! needs_toast_table(rel))
+ if (!needs_toast_table(rel))
{
if (silent)
{
@@ -1652,10 +1674,11 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
"chunk_data",
BYTEAOID,
-1, 0, false);
+
/*
- * Ensure that the toast table doesn't itself get toasted,
- * or we'll be toast :-(. This is essential for chunk_data because
- * type bytea is toastable; hit the other two just to be sure.
+ * Ensure that the toast table doesn't itself get toasted, or we'll be
+ * toast :-(. This is essential for chunk_data because type bytea is
+ * toastable; hit the other two just to be sure.
*/
tupdesc->attrs[0]->attstorage = 'p';
tupdesc->attrs[1]->attstorage = 'p';
@@ -1733,7 +1756,7 @@ AlterTableCreateToastTable(const char *relationName, bool silent)
}
/*
- * Check to see whether the table needs a TOAST table. It does only if
+ * Check to see whether the table needs a TOAST table. It does only if
* (1) there are any toastable attributes, and (2) the maximum length
* of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to
* create a toast table for something like "f1 varchar(20)".)
@@ -1745,7 +1768,7 @@ needs_toast_table(Relation rel)
bool maxlength_unknown = false;
bool has_toastable_attrs = false;
TupleDesc tupdesc;
- Form_pg_attribute *att;
+ Form_pg_attribute *att;
int32 tuple_length;
int i;
@@ -1762,8 +1785,8 @@ needs_toast_table(Relation rel)
}
else
{
- int32 maxlen = type_maximum_size(att[i]->atttypid,
- att[i]->atttypmod);
+ int32 maxlen = type_maximum_size(att[i]->atttypid,
+ att[i]->atttypmod);
if (maxlen < 0)
maxlength_unknown = true;
@@ -1798,7 +1821,7 @@ LockTableCommand(LockStmt *lockstmt)
rel = heap_openr(lockstmt->relname, NoLock);
if (rel->rd_rel->relkind != RELKIND_RELATION)
- elog(ERROR, "LOCK TABLE: %s is not a table", lockstmt->relname);
+ elog(ERROR, "LOCK TABLE: %s is not a table", lockstmt->relname);
if (lockstmt->mode == AccessShareLock)
aclresult = pg_aclcheck(lockstmt->relname, GetUserId(), ACL_RD);
@@ -1817,9 +1840,9 @@ LockTableCommand(LockStmt *lockstmt)
static bool
is_relation(char *name)
{
- Relation rel = heap_openr(name, NoLock);
+ Relation rel = heap_openr(name, NoLock);
- bool retval = (rel->rd_rel->relkind == RELKIND_RELATION);
+ bool retval = (rel->rd_rel->relkind == RELKIND_RELATION);
heap_close(rel, NoLock);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index 46e8b8057ec..06397ab323f 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1999, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.26 2001/01/23 04:32:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.27 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@ static void CommentAggregate(char *aggregate, List *arguments, char *comment);
static void CommentProc(char *function, List *arguments, char *comment);
static void CommentOperator(char *opname, List *arguments, char *comment);
static void CommentTrigger(char *trigger, char *relation, char *comments);
-static void CreateComments(Oid oid, char *comment);
+static void CreateComments(Oid oid, char *comment);
/*------------------------------------------------------------------
* CommentObject --
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index b518ef572e1..f586869b078 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.134 2001/03/14 21:47:50 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.135 2001/03/22 03:59:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,6 +76,7 @@ static StringInfoData attribute_buf;
#ifdef MULTIBYTE
static int client_encoding;
static int server_encoding;
+
#endif
@@ -285,6 +286,7 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
elog(ERROR, "You must have Postgres superuser privilege to do a COPY "
"directly to or from a file. Anyone can COPY to stdout or "
"from stdin. Psql's \\copy command also works for anyone.");
+
/*
* This restriction is unfortunate, but necessary until the frontend
* COPY protocol is redesigned to be binary-safe...
@@ -344,8 +346,8 @@ DoCopy(char *relname, bool binary, bool oids, bool from, bool pipe,
mode_t oumask; /* Pre-existing umask value */
/*
- * Prevent write to relative path ... too easy to shoot oneself
- * in the foot by overwriting a database file ...
+ * Prevent write to relative path ... too easy to shoot
+ * oneself in the foot by overwriting a database file ...
*/
if (filename[0] != '/')
elog(ERROR, "Relative path not allowed for server side"
@@ -408,7 +410,10 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
attr_count = rel->rd_att->natts;
attr = rel->rd_att->attrs;
- /* For binary copy we really only need isvarlena, but compute it all... */
+ /*
+ * For binary copy we really only need isvarlena, but compute it
+ * all...
+ */
out_functions = (FmgrInfo *) palloc(attr_count * sizeof(FmgrInfo));
elements = (Oid *) palloc(attr_count * sizeof(Oid));
isvarlena = (bool *) palloc(attr_count * sizeof(bool));
@@ -417,7 +422,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
Oid out_func_oid;
if (!getTypeOutputInfo(attr[i]->atttypid,
- &out_func_oid, &elements[i], &isvarlena[i]))
+ &out_func_oid, &elements[i], &isvarlena[i]))
elog(ERROR, "COPY: couldn't lookup info for type %u",
attr[i]->atttypid);
fmgr_info(out_func_oid, &out_functions[i]);
@@ -454,7 +459,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (binary)
{
/* Binary per-tuple header */
- int16 fld_count = attr_count;
+ int16 fld_count = attr_count;
CopySendData(&fld_count, sizeof(int16), fp);
/* Send OID if wanted --- note fld_count doesn't include it */
@@ -471,7 +476,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (oids)
{
string = DatumGetCString(DirectFunctionCall1(oidout,
- ObjectIdGetDatum(tuple->t_data->t_oid)));
+ ObjectIdGetDatum(tuple->t_data->t_oid)));
CopySendString(string, fp);
pfree(string);
need_delim = true;
@@ -497,20 +502,22 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
{
if (!binary)
{
- CopySendString(null_print, fp); /* null indicator */
+ CopySendString(null_print, fp); /* null indicator */
}
else
{
- fld_size = 0; /* null marker */
+ fld_size = 0; /* null marker */
CopySendData(&fld_size, sizeof(int16), fp);
}
}
else
{
+
/*
- * If we have a toasted datum, forcibly detoast it to avoid
- * memory leakage inside the type's output routine (or
- * for binary case, becase we must output untoasted value).
+ * If we have a toasted datum, forcibly detoast it to
+ * avoid memory leakage inside the type's output routine
+ * (or for binary case, becase we must output untoasted
+ * value).
*/
if (isvarlena[i])
value = PointerGetDatum(PG_DETOAST_DATUM(origvalue));
@@ -520,9 +527,9 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (!binary)
{
string = DatumGetCString(FunctionCall3(&out_functions[i],
- value,
- ObjectIdGetDatum(elements[i]),
- Int32GetDatum(attr[i]->atttypmod)));
+ value,
+ ObjectIdGetDatum(elements[i]),
+ Int32GetDatum(attr[i]->atttypmod)));
CopyAttributeOut(fp, string, delim);
pfree(string);
}
@@ -552,8 +559,9 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
- * We need this horsing around because we don't know
- * how shorter data values are aligned within a Datum.
+ * We need this horsing around because we don't
+ * know how shorter data values are aligned within
+ * a Datum.
*/
store_att_byval(&datumBuf, value, fld_size);
CopySendData(&datumBuf,
@@ -577,7 +585,7 @@ CopyTo(Relation rel, bool binary, bool oids, FILE *fp,
if (binary)
{
/* Generate trailer for a binary copy */
- int16 fld_count = -1;
+ int16 fld_count = -1;
CopySendData(&fld_count, sizeof(int16), fp);
}
@@ -609,7 +617,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
int done = 0;
char *string;
ResultRelInfo *resultRelInfo;
- EState *estate = CreateExecutorState(); /* for ExecConstraints() */
+ EState *estate = CreateExecutorState(); /* for ExecConstraints() */
TupleTable tupleTable;
TupleTableSlot *slot;
Oid loaded_oid = InvalidOid;
@@ -622,11 +630,11 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
/*
* We need a ResultRelInfo so we can use the regular executor's
- * index-entry-making machinery. (There used to be a huge amount
- * of code here that basically duplicated execUtils.c ...)
+ * index-entry-making machinery. (There used to be a huge amount of
+ * code here that basically duplicated execUtils.c ...)
*/
resultRelInfo = makeNode(ResultRelInfo);
- resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
+ resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
resultRelInfo->ri_RelationDesc = rel;
ExecOpenIndices(resultRelInfo);
@@ -673,7 +681,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: bogus file header (missing flags)");
file_has_oids = (tmp & (1 << 16)) != 0;
- tmp &= ~ (1 << 16);
+ tmp &= ~(1 << 16);
if ((tmp >> 16) != 0)
elog(ERROR, "COPY BINARY: unrecognized critical flags in header");
/* Header extension length */
@@ -727,7 +735,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
else
{
loaded_oid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(string)));
+ CStringGetDatum(string)));
if (loaded_oid == InvalidOid)
elog(ERROR, "COPY TEXT: Invalid Oid");
}
@@ -747,8 +755,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
{
values[i] = FunctionCall3(&in_functions[i],
CStringGetDatum(string),
- ObjectIdGetDatum(elements[i]),
- Int32GetDatum(attr[i]->atttypmod));
+ ObjectIdGetDatum(elements[i]),
+ Int32GetDatum(attr[i]->atttypmod));
nulls[i] = ' ';
}
}
@@ -757,8 +765,8 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
}
else
{ /* binary */
- int16 fld_count,
- fld_size;
+ int16 fld_count,
+ fld_size;
CopyGetData(&fld_count, sizeof(int16), fp);
if (CopyGetEof(fp) ||
@@ -791,15 +799,15 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
if (CopyGetEof(fp))
elog(ERROR, "COPY BINARY: unexpected EOF");
if (fld_size == 0)
- continue; /* it's NULL; nulls[i] already set */
+ continue; /* it's NULL; nulls[i] already set */
if (fld_size != attr[i]->attlen)
elog(ERROR, "COPY BINARY: sizeof(field %d) is %d, expected %d",
- i+1, (int) fld_size, (int) attr[i]->attlen);
+ i + 1, (int) fld_size, (int) attr[i]->attlen);
if (fld_size == -1)
{
/* varlena field */
- int32 varlena_size;
- Pointer varlena_ptr;
+ int32 varlena_size;
+ Pointer varlena_ptr;
CopyGetData(&varlena_size, sizeof(int32), fp);
if (CopyGetEof(fp))
@@ -818,7 +826,7 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
else if (!attr[i]->attbyval)
{
/* fixed-length pass-by-reference */
- Pointer refval_ptr;
+ Pointer refval_ptr;
Assert(fld_size > 0);
refval_ptr = (Pointer) palloc(fld_size);
@@ -833,8 +841,9 @@ CopyFrom(Relation rel, bool binary, bool oids, FILE *fp,
Datum datumBuf;
/*
- * We need this horsing around because we don't know
- * how shorter data values are aligned within a Datum.
+ * We need this horsing around because we don't
+ * know how shorter data values are aligned within
+ * a Datum.
*/
Assert(fld_size > 0 && fld_size <= sizeof(Datum));
CopyGetData(&datumBuf, fld_size, fp);
@@ -1163,6 +1172,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
char *string_start;
int mblen;
int i;
+
#endif
#ifdef MULTIBYTE
@@ -1182,7 +1192,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
#endif
#ifdef MULTIBYTE
- for (; (mblen = (server_encoding == client_encoding? 1 : pg_encoding_mblen(client_encoding, string))) &&
+ for (; (mblen = (server_encoding == client_encoding ? 1 : pg_encoding_mblen(client_encoding, string))) &&
((c = *string) != '\0'); string += mblen)
#else
for (; (c = *string) != '\0'; string++)
@@ -1199,7 +1209,7 @@ CopyAttributeOut(FILE *fp, char *server_string, char *delim)
}
#ifdef MULTIBYTE
- if (client_encoding != server_encoding)
+ if (client_encoding != server_encoding)
pfree(string_start); /* pfree pg_server_to_client result */
#endif
}
diff --git a/src/backend/commands/creatinh.c b/src/backend/commands/creatinh.c
index a043cf0b8e0..c4a5eaa00e9 100644
--- a/src/backend/commands/creatinh.c
+++ b/src/backend/commands/creatinh.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.72 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.73 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@
static int checkAttrExists(const char *attributeName,
const char *attributeType, List *schema);
static List *MergeAttributes(List *schema, List *supers, bool istemp,
- List **supOids, List **supconstr);
+ List **supOids, List **supconstr);
static void StoreCatalogInheritance(Oid relationId, List *supers);
static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
@@ -150,10 +150,10 @@ DefineRelation(CreateStmt *stmt, char relkind)
CommandCounterIncrement();
/*
- * Open the new relation and acquire exclusive lock on it. This isn't
+ * Open the new relation and acquire exclusive lock on it. This isn't
* really necessary for locking out other backends (since they can't
- * see the new rel anyway until we commit), but it keeps the lock manager
- * from complaining about deadlock risks.
+ * see the new rel anyway until we commit), but it keeps the lock
+ * manager from complaining about deadlock risks.
*/
rel = heap_openr(relname, AccessExclusiveLock);
@@ -242,7 +242,7 @@ TruncateRelation(char *name)
* Varattnos of pg_relcheck.rcbin should be rewritten when
* subclasses inherit the constraints from the super class.
* Note that these functions rewrite varattnos while walking
- * through a node tree.
+ * through a node tree.
*/
static bool
change_varattnos_walker(Node *node, const AttrNumber *newattno)
@@ -251,15 +251,15 @@ change_varattnos_walker(Node *node, const AttrNumber *newattno)
return false;
if (IsA(node, Var))
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
if (var->varlevelsup == 0 && var->varno == 1)
{
+
/*
- * ??? the following may be a problem when the
- * node is multiply referenced though
- * stringToNode() doesn't create such a node
- * currently.
+ * ??? the following may be a problem when the node is
+ * multiply referenced though stringToNode() doesn't create
+ * such a node currently.
*/
Assert(newattno[var->varattno - 1] > 0);
var->varattno = newattno[var->varattno - 1];
@@ -373,9 +373,12 @@ MergeAttributes(List *schema, List *supers, bool istemp,
AttrNumber attrno;
TupleDesc tupleDesc;
TupleConstr *constr;
- AttrNumber *newattno, *partialAttidx;
- Node *expr;
- int i, attidx, attno_exist;
+ AttrNumber *newattno,
+ *partialAttidx;
+ Node *expr;
+ int i,
+ attidx,
+ attno_exist;
relation = heap_openr(name, AccessShareLock);
@@ -385,7 +388,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && is_temp_rel_name(name))
elog(ERROR, "CREATE TABLE: cannot inherit from temp relation \"%s\"", name);
- /* We should have an UNDER permission flag for this, but for now,
+ /*
+ * We should have an UNDER permission flag for this, but for now,
* demand that creator of a child table own the parent.
*/
if (!pg_ownercheck(GetUserId(), name, RELNAME))
@@ -397,14 +401,15 @@ MergeAttributes(List *schema, List *supers, bool istemp,
/* allocate a new attribute number table and initialize */
newattno = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)
- newattno [i] = 0;
+ newattno[i] = 0;
+
/*
- * searching and storing order are different.
- * another table is needed.
- */
+ * searching and storing order are different. another table is
+ * needed.
+ */
partialAttidx = (AttrNumber *) palloc(tupleDesc->natts * sizeof(AttrNumber));
for (i = 0; i < tupleDesc->natts; i++)
- partialAttidx [i] = 0;
+ partialAttidx[i] = 0;
constr = tupleDesc->constr;
attidx = 0;
@@ -577,9 +582,9 @@ StoreCatalogInheritance(Oid relationId, List *supers)
Datum datum[Natts_pg_inherits];
char nullarr[Natts_pg_inherits];
- datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
- datum[1] = ObjectIdGetDatum(entryOid); /* inhparent */
- datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
+ datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
+ datum[1] = ObjectIdGetDatum(entryOid); /* inhparent */
+ datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
nullarr[0] = ' ';
nullarr[1] = ' ';
@@ -730,7 +735,7 @@ checkAttrExists(const char *attributeName, const char *attributeType,
List *schema)
{
List *s;
- int i = 0;
+ int i = 0;
foreach(s, schema)
{
@@ -756,9 +761,9 @@ checkAttrExists(const char *attributeName, const char *attributeType,
static void
setRelhassubclassInRelation(Oid relationId, bool relhassubclass)
{
- Relation relationRelation;
- HeapTuple tuple;
- Relation idescs[Num_pg_class_indices];
+ Relation relationRelation;
+ HeapTuple tuple;
+ Relation idescs[Num_pg_class_indices];
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index c450f1b400a..cd409781b2b 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.73 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.74 2001/03/22 03:59:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,8 +36,8 @@
/* non-export function prototypes */
static bool get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
- int *encodingP, bool *dbIsTemplateP,
- Oid *dbLastSysOidP, char *dbpath);
+ int *encodingP, bool *dbIsTemplateP,
+ Oid *dbLastSysOidP, char *dbpath);
static bool get_user_info(Oid use_sysid, bool *use_super, bool *use_createdb);
static char *resolve_alt_dbpath(const char *dbpath, Oid dboid);
static bool remove_dbdirs(const char *real_loc, const char *altloc);
@@ -82,12 +82,12 @@ createdb(const char *dbname, const char *dbpath,
elog(ERROR, "CREATE DATABASE: may not be called in a transaction block");
/*
- * Check for db name conflict. There is a race condition here, since
+ * Check for db name conflict. There is a race condition here, since
* another backend could create the same DB name before we commit.
- * However, holding an exclusive lock on pg_database for the whole time
- * we are copying the source database doesn't seem like a good idea,
- * so accept possibility of race to create. We will check again after
- * we grab the exclusive lock.
+ * However, holding an exclusive lock on pg_database for the whole
+ * time we are copying the source database doesn't seem like a good
+ * idea, so accept possibility of race to create. We will check again
+ * after we grab the exclusive lock.
*/
if (get_db_info(dbname, NULL, NULL, NULL, NULL, NULL, NULL))
elog(ERROR, "CREATE DATABASE: database \"%s\" already exists", dbname);
@@ -96,15 +96,16 @@ createdb(const char *dbname, const char *dbpath,
* Lookup database (template) to be cloned.
*/
if (!dbtemplate)
- dbtemplate = "template1"; /* Default template database name */
+ dbtemplate = "template1"; /* Default template database name */
if (!get_db_info(dbtemplate, &src_dboid, &src_owner, &src_encoding,
&src_istemplate, &src_lastsysoid, src_dbpath))
elog(ERROR, "CREATE DATABASE: template \"%s\" does not exist",
dbtemplate);
+
/*
- * Permission check: to copy a DB that's not marked datistemplate,
- * you must be superuser or the owner thereof.
+ * Permission check: to copy a DB that's not marked datistemplate, you
+ * must be superuser or the owner thereof.
*/
if (!src_istemplate)
{
@@ -112,6 +113,7 @@ createdb(const char *dbname, const char *dbpath,
elog(ERROR, "CREATE DATABASE: permission to copy \"%s\" denied",
dbtemplate);
}
+
/*
* Determine physical path of source database
*/
@@ -133,14 +135,16 @@ createdb(const char *dbname, const char *dbpath,
if (encoding < 0)
encoding = src_encoding;
- /*
- * Preassign OID for pg_database tuple, so that we can compute db path.
+ /*
+ * Preassign OID for pg_database tuple, so that we can compute db
+ * path.
*/
dboid = newoid();
/*
- * Compute nominal location (where we will try to access the database),
- * and resolve alternate physical location if one is specified.
+ * Compute nominal location (where we will try to access the
+ * database), and resolve alternate physical location if one is
+ * specified.
*/
nominal_loc = GetDatabasePath(dboid);
alt_loc = resolve_alt_dbpath(dbpath, dboid);
@@ -155,8 +159,8 @@ createdb(const char *dbname, const char *dbpath,
/*
* Force dirty buffers out to disk, to ensure source database is
- * up-to-date for the copy. (We really only need to flush buffers
- * for the source database...)
+ * up-to-date for the copy. (We really only need to flush buffers for
+ * the source database...)
*/
BufferSync();
@@ -231,7 +235,8 @@ createdb(const char *dbname, const char *dbpath,
tuple = heap_formtuple(pg_database_dsc, new_record, new_record_nulls);
- tuple->t_data->t_oid = dboid; /* override heap_insert's OID selection */
+ tuple->t_data->t_oid = dboid; /* override heap_insert's OID
+ * selection */
heap_insert(pg_database_rel, tuple);
@@ -273,9 +278,9 @@ dropdb(const char *dbname)
bool db_istemplate;
bool use_super;
Oid db_id;
- char *alt_loc;
- char *nominal_loc;
- char dbpath[MAXPGPATH];
+ char *alt_loc;
+ char *nominal_loc;
+ char dbpath[MAXPGPATH];
Relation pgdbrel;
HeapScanDesc pgdbscan;
ScanKeyData key;
@@ -311,8 +316,8 @@ dropdb(const char *dbname)
elog(ERROR, "DROP DATABASE: permission denied");
/*
- * Disallow dropping a DB that is marked istemplate. This is just
- * to prevent people from accidentally dropping template0 or template1;
+ * Disallow dropping a DB that is marked istemplate. This is just to
+ * prevent people from accidentally dropping template0 or template1;
* they can do so if they're really determined ...
*/
if (db_istemplate)
@@ -338,6 +343,7 @@ dropdb(const char *dbname)
tup = heap_getnext(pgdbscan, 0);
if (!HeapTupleIsValid(tup))
{
+
/*
* This error should never come up since the existence of the
* database is checked earlier
@@ -437,7 +443,7 @@ get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
{
tmptext = DatumGetTextP(heap_getattr(tuple,
Anum_pg_database_datpath,
- RelationGetDescr(relation),
+ RelationGetDescr(relation),
&isnull));
if (!isnull)
{
@@ -481,11 +487,11 @@ get_user_info(Oid use_sysid, bool *use_super, bool *use_createdb)
static char *
-resolve_alt_dbpath(const char * dbpath, Oid dboid)
+resolve_alt_dbpath(const char *dbpath, Oid dboid)
{
- const char * prefix;
- char * ret;
- size_t len;
+ const char *prefix;
+ char *ret;
+ size_t len;
if (dbpath == NULL || dbpath[0] == '\0')
return NULL;
@@ -502,7 +508,8 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
else
{
/* must be environment variable */
- char * var = getenv(dbpath);
+ char *var = getenv(dbpath);
+
if (!var)
elog(ERROR, "Postmaster environment variable '%s' not set", dbpath);
if (var[0] != '/')
@@ -519,11 +526,11 @@ resolve_alt_dbpath(const char * dbpath, Oid dboid)
static bool
-remove_dbdirs(const char * nominal_loc, const char * alt_loc)
+remove_dbdirs(const char *nominal_loc, const char *alt_loc)
{
- const char *target_dir;
- char buf[MAXPGPATH + 100];
- bool success = true;
+ const char *target_dir;
+ char buf[MAXPGPATH + 100];
+ bool success = true;
target_dir = alt_loc ? alt_loc : nominal_loc;
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index 4f5f8a47f64..c8a2726a8f7 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.52 2001/02/12 20:07:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.53 2001/03/22 03:59:22 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -70,7 +70,7 @@ case_translate_language_name(const char *input, char *output)
--------------------------------------------------------------------------*/
int i;
- for (i = 0; i < NAMEDATALEN-1 && input[i]; ++i)
+ for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
output[i] = tolower((unsigned char) input[i]);
output[i] = '\0';
@@ -110,12 +110,12 @@ compute_full_attributes(List *parameters,
Note: currently, only two of these parameters actually do anything:
* canCache means the optimizer's constant-folder is allowed to
- pre-evaluate the function when all its inputs are constants.
+ pre-evaluate the function when all its inputs are constants.
* isStrict means the function should not be called when any NULL
- inputs are present; instead a NULL result value should be assumed.
+ inputs are present; instead a NULL result value should be assumed.
- The other four parameters are not used anywhere. They used to be
+ The other four parameters are not used anywhere. They used to be
used in the "expensive functions" optimizer, but that's been dead code
for a long time.
@@ -217,21 +217,26 @@ void
CreateFunction(ProcedureStmt *stmt, CommandDest dest)
{
char *probin_str;
+
/* pathname of executable file that executes this function, if any */
char *prosrc_str;
+
/* SQL that executes this function, if any */
char *prorettype;
+
/* Type of return value (or member of set of values) from function */
char languageName[NAMEDATALEN];
+
/*
- * name of language of function, with case adjusted: "C",
- * "internal", "sql", etc.
+ * name of language of function, with case adjusted: "C", "internal",
+ * "sql", etc.
*/
bool returnsSet;
+
/* The function returns a set of values, as opposed to a singleton. */
/*
@@ -257,7 +262,7 @@ CreateFunction(ProcedureStmt *stmt, CommandDest dest)
if (!superuser())
elog(ERROR,
"Only users with Postgres superuser privilege are "
- "permitted to create a function in the '%s' language.\n\t"
+ "permitted to create a function in the '%s' language.\n\t"
"Others may use the 'sql' language "
"or the created procedural languages.",
languageName);
@@ -380,14 +385,14 @@ DefineOperator(char *oprName,
{
typeName1 = defGetString(defel);
if (IsA(defel->arg, TypeName)
- && ((TypeName *) defel->arg)->setof)
+ &&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for leftarg");
}
else if (strcasecmp(defel->defname, "rightarg") == 0)
{
typeName2 = defGetString(defel);
if (IsA(defel->arg, TypeName)
- && ((TypeName *) defel->arg)->setof)
+ &&((TypeName *) defel->arg)->setof)
elog(ERROR, "setof type not implemented for rightarg");
}
else if (strcasecmp(defel->defname, "procedure") == 0)
@@ -478,8 +483,8 @@ DefineAggregate(char *aggName, List *parameters)
DefElem *defel = (DefElem *) lfirst(pl);
/*
- * sfunc1, stype1, and initcond1 are accepted as obsolete spellings
- * for sfunc, stype, initcond.
+ * sfunc1, stype1, and initcond1 are accepted as obsolete
+ * spellings for sfunc, stype, initcond.
*/
if (strcasecmp(defel->defname, "sfunc") == 0)
transfuncName = defGetString(defel);
@@ -515,12 +520,12 @@ DefineAggregate(char *aggName, List *parameters)
/*
* Most of the argument-checking is done inside of AggregateCreate
*/
- AggregateCreate(aggName, /* aggregate name */
- transfuncName, /* step function name */
- finalfuncName, /* final function name */
- baseType, /* type of data being aggregated */
- transType, /* transition data type */
- initval); /* initial condition */
+ AggregateCreate(aggName, /* aggregate name */
+ transfuncName, /* step function name */
+ finalfuncName, /* final function name */
+ baseType, /* type of data being aggregated */
+ transType, /* transition data type */
+ initval); /* initial condition */
}
/*
@@ -543,13 +548,13 @@ DefineType(char *typeName, List *parameters)
char delimiter = DEFAULT_TYPDELIM;
char *shadow_type;
List *pl;
- char alignment = 'i'; /* default alignment */
+ char alignment = 'i';/* default alignment */
char storage = 'p'; /* default storage in TOAST */
/*
- * Type names must be one character shorter than other names,
- * allowing room to create the corresponding array type name with
- * prepended "_".
+ * Type names must be one character shorter than other names, allowing
+ * room to create the corresponding array type name with prepended
+ * "_".
*/
if (strlen(typeName) > (NAMEDATALEN - 2))
{
@@ -692,14 +697,16 @@ defGetString(DefElem *def)
switch (nodeTag(def->arg))
{
case T_Integer:
- {
- char *str = palloc(32);
+ {
+ char *str = palloc(32);
- snprintf(str, 32, "%ld", (long) intVal(def->arg));
- return str;
- }
+ snprintf(str, 32, "%ld", (long) intVal(def->arg));
+ return str;
+ }
case T_Float:
- /* T_Float values are kept in string form, so this type cheat
+
+ /*
+ * T_Float values are kept in string form, so this type cheat
* works (and doesn't risk losing precision)
*/
return strVal(def->arg);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 31f24d88a6f..672ec54cb02 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.64 2001/01/27 01:41:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.65 2001/03/22 03:59:22 momjian Exp $
*
*/
@@ -271,7 +271,7 @@ explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es)
stringStringInfo(rte->relname));
if (strcmp(rte->eref->relname, rte->relname) != 0)
appendStringInfo(str, " %s",
- stringStringInfo(rte->eref->relname));
+ stringStringInfo(rte->eref->relname));
}
break;
case T_SubqueryScan:
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 6497da615b8..2d3e70c427b 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.45 2001/02/23 09:26:14 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.46 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,15 +49,15 @@ static void CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid);
static void CheckPredExpr(Node *predicate, List *rangeTable, Oid baseRelOid);
static void CheckPredClause(Expr *predicate, List *rangeTable, Oid baseRelOid);
static void FuncIndexArgs(IndexInfo *indexInfo, Oid *classOidP,
- IndexElem *funcIndex,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
+ IndexElem *funcIndex,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
static void NormIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
-static Oid GetAttrOpClass(IndexElem *attribute, Oid attrType,
- char *accessMethodName, Oid accessMethodId);
+ List *attList,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
+static Oid GetAttrOpClass(IndexElem *attribute, Oid attrType,
+ char *accessMethodName, Oid accessMethodId);
static char *GetDefaultOpClass(Oid atttypid);
/*
@@ -118,9 +118,9 @@ DefineIndex(char *heapRelationName,
accessMethodName);
/*
- * XXX Hardwired hacks to check for limitations on supported index types.
- * We really ought to be learning this info from entries in the pg_am
- * table, instead of having it wired in here!
+ * XXX Hardwired hacks to check for limitations on supported index
+ * types. We really ought to be learning this info from entries in the
+ * pg_am table, instead of having it wired in here!
*/
if (unique && accessMethodId != BTREE_AM_OID)
elog(ERROR, "DefineIndex: unique indices are only available with the btree access method");
@@ -161,7 +161,8 @@ DefineIndex(char *heapRelationName,
elog(ERROR, "Existing indexes are inactive. REINDEX first");
/*
- * Prepare arguments for index_create, primarily an IndexInfo structure
+ * Prepare arguments for index_create, primarily an IndexInfo
+ * structure
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_Predicate = (Node *) cnfPred;
@@ -207,7 +208,7 @@ DefineIndex(char *heapRelationName,
/*
* We update the relation's pg_class tuple even if it already has
- * relhasindex = true. This is needed to cause a shared-cache-inval
+ * relhasindex = true. This is needed to cause a shared-cache-inval
* message to be sent for the pg_class tuple, which will cause other
* backends to flush their relcache entries and in particular their
* cached lists of the indexes for this relation.
@@ -415,8 +416,8 @@ FuncIndexArgs(IndexInfo *indexInfo,
* has exact-match or binary-compatible input types.
* ----------------
*/
- if (! func_get_detail(funcIndex->name, nargs, argTypes,
- &funcid, &rettype, &retset, &true_typeids))
+ if (!func_get_detail(funcIndex->name, nargs, argTypes,
+ &funcid, &rettype, &retset, &true_typeids))
func_error("DefineIndex", funcIndex->name, nargs, argTypes, NULL);
if (retset)
@@ -425,7 +426,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
for (i = 0; i < nargs; i++)
{
if (argTypes[i] != true_typeids[i] &&
- ! IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i]))
+ !IS_BINARY_COMPATIBLE(argTypes[i], true_typeids[i]))
func_error("DefineIndex", funcIndex->name, nargs, argTypes,
"Index function must be binary-compatible with table datatype");
}
@@ -439,7 +440,7 @@ FuncIndexArgs(IndexInfo *indexInfo,
indexInfo->ii_FuncOid = funcid;
/* Need to do the fmgr function lookup now, too */
- fmgr_info(funcid, & indexInfo->ii_FuncInfo);
+ fmgr_info(funcid, &indexInfo->ii_FuncInfo);
}
static void
@@ -477,7 +478,7 @@ NormIndexAttrs(IndexInfo *indexInfo,
indexInfo->ii_KeyAttrNumbers[attn] = attform->attnum;
classOidP[attn] = GetAttrOpClass(attribute, attform->atttypid,
- accessMethodName, accessMethodId);
+ accessMethodName, accessMethodId);
ReleaseSysCache(atttuple);
attn++;
@@ -515,8 +516,8 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
attribute->class);
/*
- * Assume the opclass is supported by this index access method
- * if we can find at least one relevant entry in pg_amop.
+ * Assume the opclass is supported by this index access method if we
+ * can find at least one relevant entry in pg_amop.
*/
ScanKeyEntryInitialize(&entry[0], 0,
Anum_pg_amop_amopid,
@@ -530,7 +531,7 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
relation = heap_openr(AccessMethodOperatorRelationName, AccessShareLock);
scan = heap_beginscan(relation, false, SnapshotNow, 2, entry);
- if (! HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
+ if (!HeapTupleIsValid(tuple = heap_getnext(scan, 0)))
elog(ERROR, "DefineIndex: opclass \"%s\" not supported by access method \"%s\"",
attribute->class, accessMethodName);
@@ -540,17 +541,18 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
heap_close(relation, AccessShareLock);
/*
- * Make sure the operators associated with this opclass actually accept
- * the column data type. This prevents possible coredumps caused by
- * user errors like applying text_ops to an int4 column. We will accept
- * an opclass as OK if the operator's input datatype is binary-compatible
- * with the actual column datatype. Note we assume that all the operators
- * associated with an opclass accept the same datatypes, so checking the
- * first one we happened to find in the table is sufficient.
+ * Make sure the operators associated with this opclass actually
+ * accept the column data type. This prevents possible coredumps
+ * caused by user errors like applying text_ops to an int4 column. We
+ * will accept an opclass as OK if the operator's input datatype is
+ * binary-compatible with the actual column datatype. Note we assume
+ * that all the operators associated with an opclass accept the same
+ * datatypes, so checking the first one we happened to find in the
+ * table is sufficient.
*
* If the opclass was the default for the datatype, assume we can skip
- * this check --- that saves a few cycles in the most common case.
- * If pg_opclass is wrong then we're probably screwed anyway...
+ * this check --- that saves a few cycles in the most common case. If
+ * pg_opclass is wrong then we're probably screwed anyway...
*/
if (doTypeCheck)
{
@@ -560,11 +562,11 @@ GetAttrOpClass(IndexElem *attribute, Oid attrType,
if (HeapTupleIsValid(tuple))
{
Form_pg_operator optup = (Form_pg_operator) GETSTRUCT(tuple);
- Oid opInputType = (optup->oprkind == 'l') ?
- optup->oprright : optup->oprleft;
+ Oid opInputType = (optup->oprkind == 'l') ?
+ optup->oprright : optup->oprleft;
if (attrType != opInputType &&
- ! IS_BINARY_COMPATIBLE(attrType, opInputType))
+ !IS_BINARY_COMPATIBLE(attrType, opInputType))
elog(ERROR, "DefineIndex: opclass \"%s\" does not accept datatype \"%s\"",
attribute->class, typeidTypeName(attrType));
ReleaseSysCache(tuple);
@@ -660,7 +662,7 @@ ReindexIndex(const char *name, bool force /* currently unused */ )
if (IsIgnoringSystemIndexes())
overwrite = true;
if (!reindex_index(tuple->t_data->t_oid, force, overwrite))
-#endif /* OLD_FILE_NAMING */
+#endif /* OLD_FILE_NAMING */
elog(NOTICE, "index \"%s\" wasn't reindexed", name);
ReleaseSysCache(tuple);
@@ -752,18 +754,18 @@ ReindexDatabase(const char *dbname, bool force, bool all)
elog(ERROR, "REINDEX DATABASE: Can be executed only on the currently open database.");
/*
- * We cannot run inside a user transaction block; if we were
- * inside a transaction, then our commit- and
- * start-transaction-command calls would not have the intended effect!
+ * We cannot run inside a user transaction block; if we were inside a
+ * transaction, then our commit- and start-transaction-command calls
+ * would not have the intended effect!
*/
if (IsTransactionBlock())
elog(ERROR, "REINDEX DATABASE cannot run inside a BEGIN/END block");
/*
- * Create a memory context that will survive forced transaction commits
- * we do below. Since it is a child of QueryContext, it will go away
- * eventually even if we suffer an error; there's no need for special
- * abort cleanup logic.
+ * Create a memory context that will survive forced transaction
+ * commits we do below. Since it is a child of QueryContext, it will
+ * go away eventually even if we suffer an error; there's no need for
+ * special abort cleanup logic.
*/
private_context = AllocSetContextCreate(QueryContext,
"ReindexDatabase",
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index bbf008c918e..ca1dbf3cbe4 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -111,7 +111,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
i = 0;
values[i++] = PointerGetDatum(languageName);
- values[i++] = BoolGetDatum(true); /* lanispl */
+ values[i++] = BoolGetDatum(true); /* lanispl */
values[i++] = BoolGetDatum(stmt->pltrusted);
values[i++] = ObjectIdGetDatum(procTup->t_data->t_oid);
values[i++] = DirectFunctionCall1(textin,
diff --git a/src/backend/commands/remove.c b/src/backend/commands/remove.c
index 2c271758e08..da5ad74d8ba 100644
--- a/src/backend/commands/remove.c
+++ b/src/backend/commands/remove.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.59 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.60 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,8 +40,8 @@
*/
void
RemoveOperator(char *operatorName, /* operator name */
- char *typeName1, /* left argument type name */
- char *typeName2) /* right argument type name */
+ char *typeName1, /* left argument type name */
+ char *typeName2) /* right argument type name */
{
Relation relation;
HeapTuple tup;
diff --git a/src/backend/commands/rename.c b/src/backend/commands/rename.c
index 3630cdd0d19..52568f29f5f 100644
--- a/src/backend/commands/rename.c
+++ b/src/backend/commands/rename.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.55 2001/01/24 19:42:52 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.56 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -189,15 +189,15 @@ renamerel(const char *oldrelname, const char *newrelname)
newrelname);
/*
- * Check for renaming a temp table, which only requires altering
- * the temp-table mapping, not the underlying table.
+ * Check for renaming a temp table, which only requires altering the
+ * temp-table mapping, not the underlying table.
*/
if (rename_temp_relation(oldrelname, newrelname))
return; /* all done... */
/*
- * Instead of using heap_openr(), do it the hard way, so that we
- * can rename indexes as well as regular relations.
+ * Instead of using heap_openr(), do it the hard way, so that we can
+ * rename indexes as well as regular relations.
*/
targetrelation = RelationNameGetRelation(oldrelname);
@@ -219,8 +219,9 @@ renamerel(const char *oldrelname, const char *newrelname)
heap_close(targetrelation, NoLock);
/*
- * Flush the relcache entry (easier than trying to change it at exactly
- * the right instant). It'll get rebuilt on next access to relation.
+ * Flush the relcache entry (easier than trying to change it at
+ * exactly the right instant). It'll get rebuilt on next access to
+ * relation.
*
* XXX What if relation is myxactonly?
*
@@ -244,8 +245,8 @@ renamerel(const char *oldrelname, const char *newrelname)
elog(ERROR, "renamerel: relation \"%s\" exists", newrelname);
/*
- * Update pg_class tuple with new relname. (Scribbling on reltup
- * is OK because it's a copy...)
+ * Update pg_class tuple with new relname. (Scribbling on reltup is
+ * OK because it's a copy...)
*/
StrNCpy(NameStr(((Form_pg_class) GETSTRUCT(reltup))->relname),
newrelname, NAMEDATALEN);
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 04398423b67..85a8b740048 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.51 2001/03/07 21:20:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.52 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,7 +33,7 @@
* so we pre-log a few fetches in advance. In the event of
* crash we can lose as much as we pre-logged.
*/
-#define SEQ_LOG_VALS 32
+#define SEQ_LOG_VALS 32
typedef struct sequence_magic
{
@@ -140,7 +140,7 @@ DefineSequence(CreateSeqStmt *seq)
case SEQ_COL_LOG:
typnam->name = "int4";
coldef->colname = "log_cnt";
- value[i - 1] = Int32GetDatum((int32)1);
+ value[i - 1] = Int32GetDatum((int32) 1);
break;
case SEQ_COL_CYCLE:
typnam->name = "char";
@@ -247,7 +247,7 @@ nextval(PG_FUNCTION_ARGS)
logit = true;
}
- while (fetch) /* try to fetch cache [+ log ] numbers */
+ while (fetch) /* try to fetch cache [+ log ] numbers */
{
/*
@@ -292,8 +292,8 @@ nextval(PG_FUNCTION_ARGS)
log--;
rescnt++;
last = next;
- if (rescnt == 1) /* if it's first result - */
- result = next; /* it's what to return */
+ if (rescnt == 1) /* if it's first result - */
+ result = next; /* it's what to return */
}
}
@@ -306,12 +306,12 @@ nextval(PG_FUNCTION_ARGS)
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
- XLogRecData rdata[2];
+ XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]);
@@ -319,17 +319,17 @@ nextval(PG_FUNCTION_ARGS)
seq->is_called = 't';
seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper;
- rdata[1].len = ((PageHeader)page)->pd_special -
- ((PageHeader)page)->pd_upper;
+ rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
+ rdata[1].len = ((PageHeader) page)->pd_special -
+ ((PageHeader) page)->pd_upper;
rdata[1].next = NULL;
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata);
+ recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
- if (fetch) /* not all numbers were fetched */
+ if (fetch) /* not all numbers were fetched */
log -= fetch;
}
@@ -374,15 +374,15 @@ currval(PG_FUNCTION_ARGS)
PG_RETURN_INT32(result);
}
-/*
+/*
* Main internal procedure that handles 2 & 3 arg forms of SETVAL.
*
* Note that the 3 arg version (which sets the is_called flag) is
* only for use in pg_dump, and setting the is_called flag may not
- * work if multiple users are attached to the database and referencing
+ * work if multiple users are attached to the database and referencing
* the sequence (unlikely if pg_dump is restoring it).
*
- * It is necessary to have the 3 arg version so that pg_dump can
+ * It is necessary to have the 3 arg version so that pg_dump can
* restore the state of a sequence exactly during data-only restores -
* it is the only way to clear the is_called flag in an existing
* sequence.
@@ -409,18 +409,19 @@ do_setval(char *seqname, int32 next, bool iscalled)
/* save info in local cache */
elm->last = next; /* last returned number */
- elm->cached = next; /* last cached number (forget cached values) */
+ elm->cached = next; /* last cached number (forget cached
+ * values) */
START_CRIT_SECTION();
{
xl_seq_rec xlrec;
XLogRecPtr recptr;
- XLogRecData rdata[2];
+ XLogRecData rdata[2];
Page page = BufferGetPage(buf);
xlrec.node = elm->rel->rd_node;
rdata[0].buffer = InvalidBuffer;
- rdata[0].data = (char*)&xlrec;
+ rdata[0].data = (char *) &xlrec;
rdata[0].len = sizeof(xl_seq_rec);
rdata[0].next = &(rdata[1]);
@@ -428,12 +429,12 @@ do_setval(char *seqname, int32 next, bool iscalled)
seq->is_called = 't';
seq->log_cnt = 0;
rdata[1].buffer = InvalidBuffer;
- rdata[1].data = (char*)page + ((PageHeader) page)->pd_upper;
- rdata[1].len = ((PageHeader)page)->pd_special -
- ((PageHeader)page)->pd_upper;
+ rdata[1].data = (char *) page + ((PageHeader) page)->pd_upper;
+ rdata[1].len = ((PageHeader) page)->pd_special -
+ ((PageHeader) page)->pd_upper;
rdata[1].next = NULL;
- recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG|XLOG_NO_TRAN, rdata);
+ recptr = XLogInsert(RM_SEQ_ID, XLOG_SEQ_LOG | XLOG_NO_TRAN, rdata);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
@@ -496,7 +497,7 @@ static char *
get_seq_name(text *seqin)
{
char *rawname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(seqin)));
+ PointerGetDatum(seqin)));
int rawlen = strlen(rawname);
char *seqname;
@@ -511,6 +512,7 @@ get_seq_name(text *seqin)
else
{
seqname = rawname;
+
/*
* It's important that this match the identifier downcasing code
* used by backend/parser/scan.l.
@@ -752,15 +754,16 @@ get_param(DefElem *def)
return -1;
}
-void seq_redo(XLogRecPtr lsn, XLogRecord *record)
+void
+seq_redo(XLogRecPtr lsn, XLogRecord *record)
{
- uint8 info = record->xl_info & ~XLR_INFO_MASK;
- Relation reln;
- Buffer buffer;
- Page page;
- char *item;
- Size itemsz;
- xl_seq_rec *xlrec = (xl_seq_rec*) XLogRecGetData(record);
+ uint8 info = record->xl_info & ~XLR_INFO_MASK;
+ Relation reln;
+ Buffer buffer;
+ Page page;
+ char *item;
+ Size itemsz;
+ xl_seq_rec *xlrec = (xl_seq_rec *) XLogRecGetData(record);
sequence_magic *sm;
if (info != XLOG_SEQ_LOG)
@@ -772,8 +775,8 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
buffer = XLogReadBuffer(true, reln, 0);
if (!BufferIsValid(buffer))
- elog(STOP, "seq_redo: can't read block of %u/%u",
- xlrec->node.tblNode, xlrec->node.relNode);
+ elog(STOP, "seq_redo: can't read block of %u/%u",
+ xlrec->node.tblNode, xlrec->node.relNode);
page = (Page) BufferGetPage(buffer);
@@ -781,10 +784,10 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
sm = (sequence_magic *) PageGetSpecialPointer(page);
sm->magic = SEQ_MAGIC;
- item = (char*)xlrec + sizeof(xl_seq_rec);
+ item = (char *) xlrec + sizeof(xl_seq_rec);
itemsz = record->xl_len - sizeof(xl_seq_rec);
itemsz = MAXALIGN(itemsz);
- if (PageAddItem(page, (Item)item, itemsz,
+ if (PageAddItem(page, (Item) item, itemsz,
FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(STOP, "seq_redo: failed to add item to page");
@@ -795,14 +798,16 @@ void seq_redo(XLogRecPtr lsn, XLogRecord *record)
return;
}
-void seq_undo(XLogRecPtr lsn, XLogRecord *record)
+void
+seq_undo(XLogRecPtr lsn, XLogRecord *record)
{
}
-void seq_desc(char *buf, uint8 xl_info, char* rec)
+void
+seq_desc(char *buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
- xl_seq_rec *xlrec = (xl_seq_rec*) rec;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
+ xl_seq_rec *xlrec = (xl_seq_rec *) rec;
if (info == XLOG_SEQ_LOG)
strcat(buf, "log: ");
@@ -813,5 +818,5 @@ void seq_desc(char *buf, uint8 xl_info, char* rec)
}
sprintf(buf + strlen(buf), "node %u/%u",
- xlrec->node.tblNode, xlrec->node.relNode);
+ xlrec->node.tblNode, xlrec->node.relNode);
}
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 4a6ddef9283..034b49887e7 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.88 2001/03/14 21:50:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.89 2001/03/22 03:59:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,8 +36,8 @@ static void DescribeTrigger(TriggerDesc *trigdesc, Trigger *trigger);
static HeapTuple GetTupleForTrigger(EState *estate, ItemPointer tid,
TupleTableSlot **newSlot);
static HeapTuple ExecCallTriggerFunc(Trigger *trigger,
- TriggerData *trigdata,
- MemoryContext per_tuple_context);
+ TriggerData *trigdata,
+ MemoryContext per_tuple_context);
static void DeferredTriggerSaveEvent(Relation rel, int event,
HeapTuple oldtup, HeapTuple newtup);
@@ -87,7 +87,9 @@ CreateTrigger(CreateTrigStmt *stmt)
constrrelid = InvalidOid;
else
{
- /* NoLock is probably sufficient here, since we're only
+
+ /*
+ * NoLock is probably sufficient here, since we're only
* interested in getting the relation's OID...
*/
rel = heap_openr(stmt->constrrelname, NoLock);
@@ -192,7 +194,7 @@ CreateTrigger(CreateTrigStmt *stmt)
values[Anum_pg_trigger_tgrelid - 1] = ObjectIdGetDatum(RelationGetRelid(rel));
values[Anum_pg_trigger_tgname - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->trigname));
+ CStringGetDatum(stmt->trigname));
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
values[Anum_pg_trigger_tgenabled - 1] = BoolGetDatum(true);
@@ -211,7 +213,7 @@ CreateTrigger(CreateTrigStmt *stmt)
foreach(le, stmt->args)
{
- char *ar = ((Value*) lfirst(le))->val.str;
+ char *ar = ((Value *) lfirst(le))->val.str;
len += strlen(ar) + 4;
for (; *ar; ar++)
@@ -224,7 +226,7 @@ CreateTrigger(CreateTrigStmt *stmt)
args[0] = '\0';
foreach(le, stmt->args)
{
- char *s = ((Value*) lfirst(le))->val.str;
+ char *s = ((Value *) lfirst(le))->val.str;
char *d = args + strlen(args);
while (*s)
@@ -237,7 +239,7 @@ CreateTrigger(CreateTrigStmt *stmt)
}
values[Anum_pg_trigger_tgnargs - 1] = Int16GetDatum(nargs);
values[Anum_pg_trigger_tgargs - 1] = DirectFunctionCall1(byteain,
- CStringGetDatum(args));
+ CStringGetDatum(args));
}
else
{
@@ -569,15 +571,16 @@ RelationBuildTriggers(Relation relation)
sizeof(Trigger));
else
triggers = (Trigger *) repalloc(triggers,
- (found + 1) * sizeof(Trigger));
+ (found + 1) * sizeof(Trigger));
build = &(triggers[found]);
build->tgoid = htup->t_data->t_oid;
build->tgname = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname))));
+ DatumGetCString(DirectFunctionCall1(nameout,
+ NameGetDatum(&pg_trigger->tgname))));
build->tgfoid = pg_trigger->tgfoid;
- build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as uninitialized */
+ build->tgfunc.fn_oid = InvalidOid; /* mark FmgrInfo as
+ * uninitialized */
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
build->tgisconstraint = pg_trigger->tgisconstraint;
@@ -836,22 +839,22 @@ ExecCallTriggerFunc(Trigger *trigger,
TriggerData *trigdata,
MemoryContext per_tuple_context)
{
- FunctionCallInfoData fcinfo;
- Datum result;
- MemoryContext oldContext;
+ FunctionCallInfoData fcinfo;
+ Datum result;
+ MemoryContext oldContext;
/*
- * Fmgr lookup info is cached in the Trigger structure,
- * so that we need not repeat the lookup on every call.
+ * Fmgr lookup info is cached in the Trigger structure, so that we
+ * need not repeat the lookup on every call.
*/
if (trigger->tgfunc.fn_oid == InvalidOid)
fmgr_info(trigger->tgfoid, &trigger->tgfunc);
/*
- * Do the function evaluation in the per-tuple memory context,
- * so that leaked memory will be reclaimed once per tuple.
- * Note in particular that any new tuple created by the trigger function
- * will live till the end of the tuple cycle.
+ * Do the function evaluation in the per-tuple memory context, so that
+ * leaked memory will be reclaimed once per tuple. Note in particular
+ * that any new tuple created by the trigger function will live till
+ * the end of the tuple cycle.
*/
oldContext = MemoryContextSwitchTo(per_tuple_context);
@@ -868,8 +871,8 @@ ExecCallTriggerFunc(Trigger *trigger,
MemoryContextSwitchTo(oldContext);
/*
- * Trigger protocol allows function to return a null pointer,
- * but NOT to set the isnull result flag.
+ * Trigger protocol allows function to return a null pointer, but NOT
+ * to set the isnull result flag.
*/
if (fcinfo.isnull)
elog(ERROR, "ExecCallTriggerFunc: function %u returned NULL",
@@ -885,7 +888,7 @@ ExecBRInsertTriggers(EState *estate, Relation rel, HeapTuple trigtuple)
Trigger **trigger = rel->trigdesc->tg_before_row[TRIGGER_EVENT_INSERT];
HeapTuple newtuple = trigtuple;
HeapTuple oldtuple;
- TriggerData LocTriggerData;
+ TriggerData LocTriggerData;
int i;
LocTriggerData.type = T_TriggerData;
@@ -915,9 +918,7 @@ ExecARInsertTriggers(EState *estate, Relation rel, HeapTuple trigtuple)
if (rel->trigdesc->n_after_row[TRIGGER_EVENT_INSERT] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_UPDATE] > 0 ||
rel->trigdesc->n_after_row[TRIGGER_EVENT_DELETE] > 0)
- {
DeferredTriggerSaveEvent(rel, TRIGGER_EVENT_INSERT, NULL, trigtuple);
- }
}
bool
@@ -1240,10 +1241,11 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate)
static void
deferredTriggerAddEvent(DeferredTriggerEvent event)
{
+
/*
* Since the event list could grow quite long, we keep track of the
- * list tail and append there, rather than just doing a stupid "lappend".
- * This avoids O(N^2) behavior for large numbers of events.
+ * list tail and append there, rather than just doing a stupid
+ * "lappend". This avoids O(N^2) behavior for large numbers of events.
*/
event->dte_next = NULL;
if (deftrig_event_tail == NULL)
@@ -1291,7 +1293,7 @@ deferredTriggerGetPreviousEvent(Oid relid, ItemPointer ctid)
if (previous == NULL)
elog(ERROR,
- "deferredTriggerGetPreviousEvent: event for tuple %s not found",
+ "deferredTriggerGetPreviousEvent: event for tuple %s not found",
DatumGetCString(DirectFunctionCall1(tidout,
PointerGetDatum(ctid))));
return previous;
@@ -1528,7 +1530,7 @@ DeferredTriggerBeginXact(void)
if (deftrig_cxt != NULL)
elog(ERROR,
- "DeferredTriggerBeginXact() called while inside transaction");
+ "DeferredTriggerBeginXact() called while inside transaction");
/* ----------
* Create the per transaction memory context and copy all states
@@ -1671,7 +1673,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
l = deftrig_dfl_trigstates;
while (l != NIL)
{
- List *next = lnext(l);
+ List *next = lnext(l);
pfree(lfirst(l));
pfree(l);
@@ -1700,7 +1702,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
l = deftrig_trigstates;
while (l != NIL)
{
- List *next = lnext(l);
+ List *next = lnext(l);
pfree(lfirst(l));
pfree(l);
@@ -1912,7 +1914,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
* Called by ExecAR...Triggers() to add the event to the queue.
*
* NOTE: should be called only if we've determined that an event must
- * be added to the queue. We must save *all* events if there is either
+ * be added to the queue. We must save *all* events if there is either
* an UPDATE or a DELETE deferred trigger; see uses of
* deferredTriggerGetPreviousEvent.
* ----------
@@ -2099,15 +2101,15 @@ DeferredTriggerSaveEvent(Relation rel, int event,
TRIGGER_DEFERRED_ROW_INSERTED)
elog(ERROR, "triggered data change violation "
"on relation \"%s\"",
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(rel->rd_rel->relname)))));
+ DatumGetCString(DirectFunctionCall1(nameout,
+ NameGetDatum(&(rel->rd_rel->relname)))));
if (prev_event->dte_item[i].dti_state &
TRIGGER_DEFERRED_KEY_CHANGED)
elog(ERROR, "triggered data change violation "
"on relation \"%s\"",
- DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(rel->rd_rel->relname)))));
+ DatumGetCString(DirectFunctionCall1(nameout,
+ NameGetDatum(&(rel->rd_rel->relname)))));
}
/* ----------
@@ -2142,7 +2144,7 @@ DeferredTriggerSaveEvent(Relation rel, int event,
elog(ERROR, "triggered data change violation "
"on relation \"%s\"",
DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&(rel->rd_rel->relname)))));
+ NameGetDatum(&(rel->rd_rel->relname)))));
break;
}
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 378620cb3b7..ede41b64cc8 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.73 2001/01/24 19:42:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.74 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -122,7 +122,7 @@ write_password_file(Relation rel)
CRYPT_PWD_FILE_SEPSTR
"%s\n",
DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(DatumGetName(datum_n)))),
+ NameGetDatum(DatumGetName(datum_n)))),
null_p ? "" :
DatumGetCString(DirectFunctionCall1(textout, datum_p)),
null_v ? "\\N" :
@@ -248,7 +248,7 @@ CreateUser(CreateUserStmt *stmt)
* Build a tuple to insert
*/
new_record[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->user));
+ CStringGetDatum(stmt->user));
new_record[Anum_pg_shadow_usesysid - 1] = Int32GetDatum(havesysid ? stmt->sysid : max_id + 1);
AssertState(BoolIsValid(stmt->createdb));
@@ -312,7 +312,7 @@ CreateUser(CreateUserStmt *stmt)
* this in */
ags.action = +1;
ags.listUsers = makeList1(makeInteger(havesysid ?
- stmt->sysid : max_id + 1));
+ stmt->sysid : max_id + 1));
AlterGroup(&ags, "CREATE USER");
}
@@ -377,7 +377,7 @@ AlterUser(AlterUserStmt *stmt)
* Build a tuple to update, perusing the information just obtained
*/
new_record[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(stmt->user));
+ CStringGetDatum(stmt->user));
new_record_nulls[Anum_pg_shadow_usename - 1] = ' ';
/* sysid - leave as is */
@@ -561,7 +561,7 @@ DropUser(DropUserStmt *stmt)
elog(ERROR, "DROP USER: user \"%s\" owns database \"%s\", cannot be removed%s",
user,
DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(DatumGetName(datum)))),
+ NameGetDatum(DatumGetName(datum)))),
(length(stmt->users) > 1) ? " (no users removed)" : ""
);
}
@@ -603,6 +603,7 @@ DropUser(DropUserStmt *stmt)
}
heap_endscan(scan);
heap_close(pg_rel, AccessExclusiveLock);
+
/*
* Advance command counter so that later iterations of this loop
* will see the changes already made. This is essential if, for
@@ -873,7 +874,7 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
{
/* Get the uid of the proposed user to add. */
tuple = SearchSysCache(SHADOWNAME,
- PointerGetDatum(strVal(lfirst(item))),
+ PointerGetDatum(strVal(lfirst(item))),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "%s: user \"%s\" does not exist",
@@ -995,7 +996,7 @@ AlterGroup(AlterGroupStmt *stmt, const char *tag)
{
/* Get the uid of the proposed user to drop. */
tuple = SearchSysCache(SHADOWNAME,
- PointerGetDatum(strVal(lfirst(item))),
+ PointerGetDatum(strVal(lfirst(item))),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "ALTER GROUP: user \"%s\" does not exist", strVal(lfirst(item)));
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 3606d05f741..078c9b53475 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.187 2001/03/14 08:40:57 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.188 2001/03/22 03:59:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,11 +47,11 @@
#include "utils/syscache.h"
#include "utils/temprel.h"
-extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
- char *unused, int unlen);
-extern XLogRecPtr log_heap_move(Relation reln,
- Buffer oldbuf, ItemPointerData from,
- Buffer newbuf, HeapTuple newtup);
+extern XLogRecPtr log_heap_clean(Relation reln, Buffer buffer,
+ char *unused, int unlen);
+extern XLogRecPtr log_heap_move(Relation reln,
+ Buffer oldbuf, ItemPointerData from,
+ Buffer newbuf, HeapTuple newtup);
static MemoryContext vac_context = NULL;
@@ -78,9 +78,9 @@ static void vpage_insert(VacPageList vacpagelist, VacPage vpnew);
static void get_indices(Relation relation, int *nindices, Relation **Irel);
static void close_indices(int nindices, Relation *Irel);
static IndexInfo **get_index_desc(Relation onerel, int nindices,
- Relation *Irel);
+ Relation *Irel);
static void *vac_find_eq(void *bot, int nelem, int size, void *elm,
- int (*compar) (const void *, const void *));
+ int (*compar) (const void *, const void *));
static int vac_cmp_blk(const void *left, const void *right);
static int vac_cmp_offno(const void *left, const void *right);
static int vac_cmp_vtlinks(const void *left, const void *right);
@@ -120,9 +120,9 @@ vacuum(char *vacrel, bool verbose, bool analyze, List *anal_cols)
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of QueryContext, it will go away eventually
- * even if we suffer an error; there's no need for special abort
- * cleanup logic.
+ * Since it is a child of QueryContext, it will go away eventually even
+ * if we suffer an error; there's no need for special abort cleanup
+ * logic.
*/
vac_context = AllocSetContextCreate(QueryContext,
"Vacuum",
@@ -215,8 +215,8 @@ vacuum_shutdown()
/*
* Clean up working storage --- note we must do this after
- * StartTransactionCommand, else we might be trying to delete
- * the active context!
+ * StartTransactionCommand, else we might be trying to delete the
+ * active context!
*/
MemoryContextDelete(vac_context);
vac_context = NULL;
@@ -360,10 +360,10 @@ vacuum_rel(Oid relid)
{
Relation onerel;
LockRelId onerelid;
- VacPageListData vacuum_pages; /* List of pages to vacuum and/or clean
- * indices */
- VacPageListData fraged_pages; /* List of pages with space enough for
- * re-using */
+ VacPageListData vacuum_pages; /* List of pages to vacuum and/or
+ * clean indices */
+ VacPageListData fraged_pages; /* List of pages with space enough
+ * for re-using */
Relation *Irel;
int32 nindices,
i;
@@ -411,10 +411,10 @@ vacuum_rel(Oid relid)
}
/*
- * Get a session-level exclusive lock too. This will protect our
- * exclusive access to the relation across multiple transactions,
- * so that we can vacuum the relation's TOAST table (if any) secure
- * in the knowledge that no one is diddling the parent relation.
+ * Get a session-level exclusive lock too. This will protect our
+ * exclusive access to the relation across multiple transactions, so
+ * that we can vacuum the relation's TOAST table (if any) secure in
+ * the knowledge that no one is diddling the parent relation.
*
* NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the
@@ -458,10 +458,11 @@ vacuum_rel(Oid relid)
vacrelstats->hasindex = true;
else
vacrelstats->hasindex = false;
-#ifdef NOT_USED
+#ifdef NOT_USED
+
/*
- * reindex in VACUUM is dangerous under WAL.
- * ifdef out until it becomes safe.
+ * reindex in VACUUM is dangerous under WAL. ifdef out until it
+ * becomes safe.
*/
if (reindex)
{
@@ -470,7 +471,7 @@ vacuum_rel(Oid relid)
Irel = (Relation *) NULL;
activate_indexes_of_a_table(relid, false);
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* Clean/scan index relation(s) */
if (Irel != (Relation *) NULL)
@@ -506,6 +507,7 @@ vacuum_rel(Oid relid)
}
else
{
+
/*
* Flush dirty pages out to disk. We must do this even if we
* didn't do anything else, because we want to ensure that all
@@ -518,10 +520,10 @@ vacuum_rel(Oid relid)
i);
}
}
-#ifdef NOT_USED
+#ifdef NOT_USED
if (reindex)
activate_indexes_of_a_table(relid, true);
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* all done with this class, but hold lock until commit */
heap_close(onerel, NoLock);
@@ -537,11 +539,11 @@ vacuum_rel(Oid relid)
CommitTransactionCommand();
/*
- * If the relation has a secondary toast one, vacuum that too
- * while we still hold the session lock on the master table.
- * We don't need to propagate "analyze" to it, because the toaster
- * always uses hardcoded index access and statistics are
- * totally unimportant for toast relations
+ * If the relation has a secondary toast one, vacuum that too while we
+ * still hold the session lock on the master table. We don't need to
+ * propagate "analyze" to it, because the toaster always uses
+ * hardcoded index access and statistics are totally unimportant for
+ * toast relations
*/
if (toast_relid != InvalidOid)
vacuum_rel(toast_relid);
@@ -563,7 +565,7 @@ vacuum_rel(Oid relid)
*/
static void
scan_heap(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages)
+ VacPageList vacuum_pages, VacPageList fraged_pages)
{
BlockNumber nblocks,
blkno;
@@ -845,7 +847,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
* dead tuples removed. Below we will apply
* PageRepairFragmentation to the copy, so that we can
* determine how much space will be available after
- * removal of dead tuples. But note we are NOT changing
+ * removal of dead tuples. But note we are NOT changing
* the real page yet...
*/
if (tempPage == (Page) NULL)
@@ -964,8 +966,8 @@ Re-using: Free/Avail. Space %lu/%lu; EndEmpty/Avail. Pages %u/%u. %s",
nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
new_pages, num_tuples, tups_vacuumed,
nkeep, vacrelstats->num_vtlinks, ncrash,
- nunused, (unsigned long)min_tlen, (unsigned long)max_tlen,
- (unsigned long)free_size, (unsigned long)usable_free_size,
+ nunused, (unsigned long) min_tlen, (unsigned long) max_tlen,
+ (unsigned long) free_size, (unsigned long) usable_free_size,
empty_end_pages, fraged_pages->num_pages,
show_rusage(&ru0));
@@ -984,8 +986,8 @@ Re-using: Free/Avail. Space %lu/%lu; EndEmpty/Avail. Pages %u/%u. %s",
*/
static void
repair_frag(VRelStats *vacrelstats, Relation onerel,
- VacPageList vacuum_pages, VacPageList fraged_pages,
- int nindices, Relation *Irel)
+ VacPageList vacuum_pages, VacPageList fraged_pages,
+ int nindices, Relation *Irel)
{
TransactionId myXID;
CommandId myCID;
@@ -1077,7 +1079,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
dowrite = false;
if (blkno == last_vacuum_block) /* it's reaped page */
{
- if (last_vacuum_page->offsets_free > 0) /* there are dead tuples */
+ if (last_vacuum_page->offsets_free > 0) /* there are dead tuples */
{ /* on this page - clean */
Assert(!isempty);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -1100,7 +1102,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
last_vacuum_block = -1;
}
if (num_fraged_pages > 0 &&
- fraged_pages->pagedesc[num_fraged_pages - 1]->blkno ==
+ fraged_pages->pagedesc[num_fraged_pages - 1]->blkno ==
(BlockNumber) blkno)
{
/* page is in fraged_pages too; remove it */
@@ -1142,8 +1144,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* If this (chain) tuple is moved by me already then I
- * have to check is it in vacpage or not - i.e. is it moved
- * while cleaning this page or some previous one.
+ * have to check is it in vacpage or not - i.e. is it
+ * moved while cleaning this page or some previous one.
*/
if (tuple.t_data->t_infomask & HEAP_MOVED_OFF)
{
@@ -1232,8 +1234,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* xaction and this tuple is already deleted by
* me. Actually, upper part of chain should be
* removed and seems that this should be handled
- * in scan_heap(), but it's not implemented at
- * the moment and so we just stop shrinking here.
+ * in scan_heap(), but it's not implemented at the
+ * moment and so we just stop shrinking here.
*/
ReleaseBuffer(Cbuf);
pfree(vtmove);
@@ -1256,15 +1258,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
/*
- * if to_vacpage no longer has enough free space to be
- * useful, remove it from fraged_pages list
+ * if to_vacpage no longer has enough free space
+ * to be useful, remove it from fraged_pages list
*/
if (to_vacpage != NULL &&
- !enough_space(to_vacpage, vacrelstats->min_tlen))
+ !enough_space(to_vacpage, vacrelstats->min_tlen))
{
Assert(num_fraged_pages > to_item);
memmove(fraged_pages->pagedesc + to_item,
- fraged_pages->pagedesc + to_item + 1,
+ fraged_pages->pagedesc + to_item + 1,
sizeof(VacPage) * (num_fraged_pages - to_item - 1));
num_fraged_pages--;
}
@@ -1326,10 +1328,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vtld.new_tid = tp.t_self;
vtlp = (VTupleLink)
vac_find_eq((void *) (vacrelstats->vtlinks),
- vacrelstats->num_vtlinks,
- sizeof(VTupleLinkData),
- (void *) &vtld,
- vac_cmp_vtlinks);
+ vacrelstats->num_vtlinks,
+ sizeof(VTupleLinkData),
+ (void *) &vtld,
+ vac_cmp_vtlinks);
if (vtlp == NULL)
elog(ERROR, "Parent tuple was not found");
tp.t_self = vtlp->this_tid;
@@ -1416,7 +1418,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ItemPointerSetInvalid(&Ctid);
for (ti = 0; ti < num_vtmove; ti++)
{
- VacPage destvacpage = vtmove[ti].vacpage;
+ VacPage destvacpage = vtmove[ti].vacpage;
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
@@ -1460,21 +1462,22 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
*
* NOTE: a nasty bug used to lurk here. It is possible
* for the source and destination pages to be the same
- * (since this tuple-chain member can be on a page lower
- * than the one we're currently processing in the outer
- * loop). If that's true, then after vacuum_page() the
- * source tuple will have been moved, and tuple.t_data
- * will be pointing at garbage. Therefore we must do
- * everything that uses tuple.t_data BEFORE this step!!
+ * (since this tuple-chain member can be on a page
+ * lower than the one we're currently processing in
+ * the outer loop). If that's true, then after
+ * vacuum_page() the source tuple will have been
+ * moved, and tuple.t_data will be pointing at
+ * garbage. Therefore we must do everything that uses
+ * tuple.t_data BEFORE this step!!
*
* This path is different from the other callers of
- * vacuum_page, because we have already incremented the
- * vacpage's offsets_used field to account for the
+ * vacuum_page, because we have already incremented
+ * the vacpage's offsets_used field to account for the
* tuple(s) we expect to move onto the page. Therefore
- * vacuum_page's check for offsets_used == 0 is
- * wrong. But since that's a good debugging check for
- * all other callers, we work around it here rather
- * than remove it.
+ * vacuum_page's check for offsets_used == 0 is wrong.
+ * But since that's a good debugging check for all
+ * other callers, we work around it here rather than
+ * remove it.
*/
if (!PageIsEmpty(ToPage) && vtmove[ti].cleanVpd)
{
@@ -1498,7 +1501,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (newoff == InvalidOffsetNumber)
{
elog(STOP, "moving chain: failed to add item with len = %lu to page %u",
- (unsigned long)tuple_len, destvacpage->blkno);
+ (unsigned long) tuple_len, destvacpage->blkno);
}
newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data);
@@ -1507,9 +1510,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ItemPointerSet(&(newtup.t_self), destvacpage->blkno, newoff);
{
- XLogRecPtr recptr =
- log_heap_move(onerel, Cbuf, tuple.t_self,
- cur_buffer, &newtup);
+ XLogRecPtr recptr =
+ log_heap_move(onerel, Cbuf, tuple.t_self,
+ cur_buffer, &newtup);
if (Cbuf != cur_buffer)
{
@@ -1526,7 +1529,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Set new tuple's t_ctid pointing to itself for last
- * tuple in chain, and to next tuple in chain otherwise.
+ * tuple in chain, and to next tuple in chain
+ * otherwise.
*/
if (!ItemPointerIsValid(&Ctid))
newtup.t_data->t_ctid = newtup.t_self;
@@ -1552,13 +1556,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (Irel != (Relation *) NULL)
{
+
/*
* XXX using CurrentMemoryContext here means
- * intra-vacuum memory leak for functional indexes.
- * Should fix someday.
+ * intra-vacuum memory leak for functional
+ * indexes. Should fix someday.
*
* XXX This code fails to handle partial indexes!
- * Probably should change it to use ExecOpenIndices.
+ * Probably should change it to use
+ * ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@@ -1653,8 +1659,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
{
elog(STOP, "\
failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
- (unsigned long)tuple_len, cur_page->blkno, (unsigned long)cur_page->free,
- cur_page->offsets_used, cur_page->offsets_free);
+ (unsigned long) tuple_len, cur_page->blkno, (unsigned long) cur_page->free,
+ cur_page->offsets_used, cur_page->offsets_free);
}
newitemid = PageGetItemId(ToPage, newoff);
pfree(newtup.t_data);
@@ -1673,9 +1679,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
{
- XLogRecPtr recptr =
- log_heap_move(onerel, buf, tuple.t_self,
- cur_buffer, &newtup);
+ XLogRecPtr recptr =
+ log_heap_move(onerel, buf, tuple.t_self,
+ cur_buffer, &newtup);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
@@ -1698,13 +1704,13 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* insert index' tuples if needed */
if (Irel != (Relation *) NULL)
{
+
/*
- * XXX using CurrentMemoryContext here means
- * intra-vacuum memory leak for functional indexes.
- * Should fix someday.
+ * XXX using CurrentMemoryContext here means intra-vacuum
+ * memory leak for functional indexes. Should fix someday.
*
- * XXX This code fails to handle partial indexes!
- * Probably should change it to use ExecOpenIndices.
+ * XXX This code fails to handle partial indexes! Probably
+ * should change it to use ExecOpenIndices.
*/
for (i = 0; i < nindices; i++)
{
@@ -1803,14 +1809,15 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
if (num_moved > 0)
{
+
/*
* We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our
* exclusive access to the relation. However, that would require
* a lot of extra code to close and re-open the relation, indices,
- * etc. For now, a quick hack: record status of current transaction
- * as committed, and continue.
+ * etc. For now, a quick hack: record status of current
+ * transaction as committed, and continue.
*/
RecordTransactionCommit();
}
@@ -1873,7 +1880,7 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
nblocks, blkno, num_moved,
show_rusage(&ru0));
- /*
+ /*
* Reflect the motion of system tuples to catalog cache here.
*/
CommandCounterIncrement();
@@ -1883,13 +1890,13 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/* vacuum indices again if needed */
if (Irel != (Relation *) NULL)
{
- VacPage *vpleft,
+ VacPage *vpleft,
*vpright,
vpsave;
/* re-sort Nvacpagelist.pagedesc */
for (vpleft = Nvacpagelist.pagedesc,
- vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
+ vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
vpleft < vpright; vpleft++, vpright--)
{
vpsave = *vpleft;
@@ -1906,9 +1913,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
if (vacpage->blkno == (BlockNumber) (blkno - 1) &&
vacpage->offsets_free > 0)
{
- char unbuf[BLCKSZ];
- OffsetNumber *unused = (OffsetNumber*)unbuf;
- int uncnt;
+ char unbuf[BLCKSZ];
+ OffsetNumber *unused = (OffsetNumber *) unbuf;
+ int uncnt;
buf = ReadBuffer(onerel, vacpage->blkno);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
@@ -1943,8 +1950,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
- recptr = log_heap_clean(onerel, buf, (char*)unused,
- (char*)(&(unused[uncnt])) - (char*)unused);
+
+ recptr = log_heap_clean(onerel, buf, (char *) unused,
+ (char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
}
@@ -1962,9 +1970,9 @@ failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)"
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
- * we don't need to truncate, because we want to ensure that all tuples
- * have correct on-row commit status on disk (see bufmgr.c's comments
- * for FlushRelationBuffers()).
+ * we don't need to truncate, because we want to ensure that all
+ * tuples have correct on-row commit status on disk (see bufmgr.c's
+ * comments for FlushRelationBuffers()).
*/
i = FlushRelationBuffers(onerel, blkno);
if (i < 0)
@@ -2005,8 +2013,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
int i;
nblocks = vacuum_pages->num_pages;
- nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with
- * them */
+ nblocks -= vacuum_pages->empty_end_pages; /* nothing to do with them */
for (i = 0, vacpage = vacuum_pages->pagedesc; i < nblocks; i++, vacpage++)
{
@@ -2022,9 +2029,9 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
- * we don't need to truncate, because we want to ensure that all tuples
- * have correct on-row commit status on disk (see bufmgr.c's comments
- * for FlushRelationBuffers()).
+ * we don't need to truncate, because we want to ensure that all
+ * tuples have correct on-row commit status on disk (see bufmgr.c's
+ * comments for FlushRelationBuffers()).
*/
Assert(vacrelstats->num_pages >= vacuum_pages->empty_end_pages);
nblocks = vacrelstats->num_pages - vacuum_pages->empty_end_pages;
@@ -2042,7 +2049,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
vacrelstats->num_pages, nblocks);
nblocks = smgrtruncate(DEFAULT_SMGR, onerel, nblocks);
Assert(nblocks >= 0);
- vacrelstats->num_pages = nblocks; /* set new number of blocks */
+ vacrelstats->num_pages = nblocks; /* set new number of
+ * blocks */
}
}
@@ -2053,12 +2061,12 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
static void
vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
{
- char unbuf[BLCKSZ];
- OffsetNumber *unused = (OffsetNumber*)unbuf;
- int uncnt;
- Page page = BufferGetPage(buffer);
- ItemId itemid;
- int i;
+ char unbuf[BLCKSZ];
+ OffsetNumber *unused = (OffsetNumber *) unbuf;
+ int uncnt;
+ Page page = BufferGetPage(buffer);
+ ItemId itemid;
+ int i;
/* There shouldn't be any tuples moved onto the page yet! */
Assert(vacpage->offsets_used == 0);
@@ -2072,8 +2080,9 @@ vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage)
uncnt = PageRepairFragmentation(page, unused);
{
XLogRecPtr recptr;
- recptr = log_heap_clean(onerel, buffer, (char*)unused,
- (char*)(&(unused[uncnt])) - (char*)unused);
+
+ recptr = log_heap_clean(onerel, buffer, (char *) unused,
+ (char *) (&(unused[uncnt])) - (char *) unused);
PageSetLSN(page, recptr);
PageSetSUI(page, ThisStartUpID);
}
@@ -2220,8 +2229,8 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist)
vp = &vacpage;
vpp = (VacPage *) vac_find_eq((void *) (vacpagelist->pagedesc),
- vacpagelist->num_pages, sizeof(VacPage), (void *) &vp,
- vac_cmp_blk);
+ vacpagelist->num_pages, sizeof(VacPage), (void *) &vp,
+ vac_cmp_blk);
if (vpp == (VacPage *) NULL)
return (VacPage) NULL;
@@ -2235,8 +2244,8 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist)
}
voff = (OffsetNumber *) vac_find_eq((void *) (vp->offsets),
- vp->offsets_free, sizeof(OffsetNumber), (void *) &ioffno,
- vac_cmp_offno);
+ vp->offsets_free, sizeof(OffsetNumber), (void *) &ioffno,
+ vac_cmp_offno);
if (voff == (OffsetNumber *) NULL)
return (VacPage) NULL;
@@ -2265,7 +2274,7 @@ tid_reaped(ItemPointer itemptr, VacPageList vacpagelist)
*/
static void
update_relstats(Oid relid, int num_pages, int num_tuples, bool hasindex,
- VRelStats *vacrelstats)
+ VRelStats *vacrelstats)
{
Relation rd;
HeapTupleData rtup;
@@ -2313,7 +2322,7 @@ update_relstats(Oid relid, int num_pages, int num_tuples, bool hasindex,
static void
reap_page(VacPageList vacpagelist, VacPage vacpage)
{
- VacPage newvacpage;
+ VacPage newvacpage;
/* allocate a VacPageData entry */
newvacpage = (VacPage) palloc(sizeof(VacPageData) + vacpage->offsets_free * sizeof(OffsetNumber));
@@ -2354,7 +2363,7 @@ vpage_insert(VacPageList vacpagelist, VacPage vpnew)
static void *
vac_find_eq(void *bot, int nelem, int size, void *elm,
- int (*compar) (const void *, const void *))
+ int (*compar) (const void *, const void *))
{
int res;
int last = nelem - 1;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 6f07bff095d..cc5f64f41a0 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.45 2001/01/24 19:42:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.46 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -453,6 +453,7 @@ parse_DefaultXactIsoLevel(char *value)
{
#if 0
TransactionState s = CurrentTransactionState;
+
#endif
if (value == NULL)
@@ -632,7 +633,7 @@ parse_client_encoding(char *value)
}
#else
if (value &&
- strcasecmp(value, pg_encoding_to_char(pg_get_client_encoding())) != 0)
+ strcasecmp(value, pg_encoding_to_char(pg_get_client_encoding())) != 0)
elog(ERROR, "Client encoding %s is not supported", value);
#endif
return TRUE;
@@ -701,28 +702,27 @@ reset_server_encoding(void)
void
SetPGVariable(const char *name, const char *value)
{
- char *mvalue = value ? pstrdup(value) : ((char*) NULL);
-
- /*
- * Special cases ought to be removed and handled separately
- * by TCOP
- */
- if (strcasecmp(name, "datestyle")==0)
- parse_date(mvalue);
- else if (strcasecmp(name, "timezone")==0)
- parse_timezone(mvalue);
- else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
- parse_DefaultXactIsoLevel(mvalue);
- else if (strcasecmp(name, "XactIsoLevel")==0)
- parse_XactIsoLevel(mvalue);
- else if (strcasecmp(name, "client_encoding")==0)
- parse_client_encoding(mvalue);
- else if (strcasecmp(name, "server_encoding")==0)
- parse_server_encoding(mvalue);
- else if (strcasecmp(name, "random_seed")==0)
- parse_random_seed(mvalue);
- else
- SetConfigOption(name, value, superuser() ? PGC_SUSET : PGC_USERSET);
+ char *mvalue = value ? pstrdup(value) : ((char *) NULL);
+
+ /*
+ * Special cases ought to be removed and handled separately by TCOP
+ */
+ if (strcasecmp(name, "datestyle") == 0)
+ parse_date(mvalue);
+ else if (strcasecmp(name, "timezone") == 0)
+ parse_timezone(mvalue);
+ else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
+ parse_DefaultXactIsoLevel(mvalue);
+ else if (strcasecmp(name, "XactIsoLevel") == 0)
+ parse_XactIsoLevel(mvalue);
+ else if (strcasecmp(name, "client_encoding") == 0)
+ parse_client_encoding(mvalue);
+ else if (strcasecmp(name, "server_encoding") == 0)
+ parse_server_encoding(mvalue);
+ else if (strcasecmp(name, "random_seed") == 0)
+ parse_random_seed(mvalue);
+ else
+ SetConfigOption(name, value, superuser() ? PGC_SUSET : PGC_USERSET);
if (mvalue)
pfree(mvalue);
@@ -732,44 +732,45 @@ SetPGVariable(const char *name, const char *value)
void
GetPGVariable(const char *name)
{
- if (strcasecmp(name, "datestyle")==0)
- show_date();
- else if (strcasecmp(name, "timezone")==0)
- show_timezone();
- else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
- show_DefaultXactIsoLevel();
- else if (strcasecmp(name, "XactIsoLevel")==0)
- show_XactIsoLevel();
- else if (strcasecmp(name, "client_encoding")==0)
- show_client_encoding();
- else if (strcasecmp(name, "server_encoding")==0)
- show_server_encoding();
- else if (strcasecmp(name, "random_seed")==0)
- show_random_seed();
- else
- {
- const char * val = GetConfigOption(name);
- elog(NOTICE, "%s is %s", name, val);
- }
-}
+ if (strcasecmp(name, "datestyle") == 0)
+ show_date();
+ else if (strcasecmp(name, "timezone") == 0)
+ show_timezone();
+ else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
+ show_DefaultXactIsoLevel();
+ else if (strcasecmp(name, "XactIsoLevel") == 0)
+ show_XactIsoLevel();
+ else if (strcasecmp(name, "client_encoding") == 0)
+ show_client_encoding();
+ else if (strcasecmp(name, "server_encoding") == 0)
+ show_server_encoding();
+ else if (strcasecmp(name, "random_seed") == 0)
+ show_random_seed();
+ else
+ {
+ const char *val = GetConfigOption(name);
+
+ elog(NOTICE, "%s is %s", name, val);
+ }
+}
void
ResetPGVariable(const char *name)
{
- if (strcasecmp(name, "datestyle")==0)
- reset_date();
- else if (strcasecmp(name, "timezone")==0)
- reset_timezone();
- else if (strcasecmp(name, "DefaultXactIsoLevel")==0)
- reset_DefaultXactIsoLevel();
- else if (strcasecmp(name, "XactIsoLevel")==0)
- reset_XactIsoLevel();
- else if (strcasecmp(name, "client_encoding")==0)
- reset_client_encoding();
- else if (strcasecmp(name, "server_encoding")==0)
- reset_server_encoding();
- else if (strcasecmp(name, "random_seed")==0)
- reset_random_seed();
- else
- SetConfigOption(name, NULL, superuser() ? PGC_SUSET : PGC_USERSET);
-}
+ if (strcasecmp(name, "datestyle") == 0)
+ reset_date();
+ else if (strcasecmp(name, "timezone") == 0)
+ reset_timezone();
+ else if (strcasecmp(name, "DefaultXactIsoLevel") == 0)
+ reset_DefaultXactIsoLevel();
+ else if (strcasecmp(name, "XactIsoLevel") == 0)
+ reset_XactIsoLevel();
+ else if (strcasecmp(name, "client_encoding") == 0)
+ reset_client_encoding();
+ else if (strcasecmp(name, "server_encoding") == 0)
+ reset_server_encoding();
+ else if (strcasecmp(name, "random_seed") == 0)
+ reset_random_seed();
+ else
+ SetConfigOption(name, NULL, superuser() ? PGC_SUSET : PGC_USERSET);
+}
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index 99481d4d54b..320f2c08e92 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: view.c,v 1.53 2001/01/24 19:42:53 momjian Exp $
+ * $Id: view.c,v 1.54 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,7 +57,7 @@ DefineVirtualRelation(char *relname, List *tlist)
TargetEntry *entry = lfirst(t);
Resdom *res = entry->resdom;
- if (! res->resjunk)
+ if (!res->resjunk)
{
char *resname = res->resname;
char *restypename = typeidTypeName(res->restype);
@@ -118,9 +118,9 @@ MakeRetrieveViewRuleName(char *viewName)
snprintf(buf, buflen, "_RET%s", viewName);
/* clip to less than NAMEDATALEN bytes, if necessary */
#ifdef MULTIBYTE
- maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN-1);
+ maxlen = pg_mbcliplen(buf, strlen(buf), NAMEDATALEN - 1);
#else
- maxlen = NAMEDATALEN-1;
+ maxlen = NAMEDATALEN - 1;
#endif
if (maxlen < buflen)
buf[maxlen] = '\0';
@@ -211,12 +211,12 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
*rt_entry2;
/*
- * Make a copy of the given parsetree. It's not so much that we
- * don't want to scribble on our input, it's that the parser has
- * a bad habit of outputting multiple links to the same subtree
- * for constructs like BETWEEN, and we mustn't have OffsetVarNodes
- * increment the varno of a Var node twice. copyObject will expand
- * any multiply-referenced subtree into multiple copies.
+ * Make a copy of the given parsetree. It's not so much that we don't
+ * want to scribble on our input, it's that the parser has a bad habit
+ * of outputting multiple links to the same subtree for constructs
+ * like BETWEEN, and we mustn't have OffsetVarNodes increment the
+ * varno of a Var node twice. copyObject will expand any
+ * multiply-referenced subtree into multiple copies.
*/
viewParse = (Query *) copyObject(viewParse);
@@ -261,6 +261,7 @@ UpdateRangeTableOfViewParse(char *viewName, Query *viewParse)
void
DefineView(char *viewName, Query *viewParse)
{
+
/*
* Create the "view" relation NOTE: if it already exists, the xact
* will be aborted.
@@ -295,9 +296,10 @@ DefineView(char *viewName, Query *viewParse)
void
RemoveView(char *viewName)
{
+
/*
- * We just have to drop the relation; the associated rules will
- * be cleaned up automatically.
+ * We just have to drop the relation; the associated rules will be
+ * cleaned up automatically.
*/
heap_drop_with_catalog(viewName, allowSystemTableMods);
}
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 0888e2638b2..a9c5bd40372 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: execAmi.c,v 1.56 2001/01/24 19:42:53 momjian Exp $
+ * $Id: execAmi.c,v 1.57 2001/03/22 03:59:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
* ExecInsert \ executor interface / aminsert
* ExecReScanR / to access methods \ amrescan
* ExecMarkPos / \ ammarkpos
- * ExecRestrPos / \ amrestpos
+ * ExecRestrPos / \ amrestpos
*/
#include "postgres.h"
@@ -91,7 +91,7 @@ ExecOpenScanR(Oid relOid,
* on whether this is a heap relation or an index relation.
*
* For a table, acquire AccessShareLock for the duration of the query
- * execution. For indexes, acquire no lock here; the index machinery
+ * execution. For indexes, acquire no lock here; the index machinery
* does its own locks and unlocks. (We rely on having some kind of
* lock on the parent table to ensure the index won't go away!)
* ----------------
@@ -413,7 +413,7 @@ ExecMarkPos(Plan *node)
{
switch (nodeTag(node))
{
- case T_SeqScan:
+ case T_SeqScan:
ExecSeqMarkPos((SeqScan *) node);
break;
@@ -455,7 +455,7 @@ ExecRestrPos(Plan *node)
{
switch (nodeTag(node))
{
- case T_SeqScan:
+ case T_SeqScan:
ExecSeqRestrPos((SeqScan *) node);
break;
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index f23ba273462..d288a8de735 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.25 2001/01/29 00:39:17 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.26 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -265,6 +265,7 @@ ExecInitJunkFilter(List *targetList, TupleDesc tupType)
void
ExecFreeJunkFilter(JunkFilter *junkfilter)
{
+
/*
* Since the junkfilter is inside its own context, we just have to
* delete the context and we're set.
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 929134209ba..fc1dccd0467 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -27,7 +27,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.138 2001/01/29 00:39:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.139 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,15 +51,15 @@ static TupleDesc InitPlan(CmdType operation,
Plan *plan,
EState *estate);
static void initResultRelInfo(ResultRelInfo *resultRelInfo,
- Index resultRelationIndex,
- List *rangeTable,
- CmdType operation);
+ Index resultRelationIndex,
+ List *rangeTable,
+ CmdType operation);
static void EndPlan(Plan *plan, EState *estate);
static TupleTableSlot *ExecutePlan(EState *estate, Plan *plan,
- CmdType operation,
- long numberTuples,
- ScanDirection direction,
- DestReceiver *destfunc);
+ CmdType operation,
+ long numberTuples,
+ ScanDirection direction,
+ DestReceiver *destfunc);
static void ExecRetrieve(TupleTableSlot *slot,
DestReceiver *destfunc,
EState *estate);
@@ -72,9 +72,9 @@ static void ExecReplace(TupleTableSlot *slot, ItemPointer tupleid,
static TupleTableSlot *EvalPlanQualNext(EState *estate);
static void EndEvalPlanQual(EState *estate);
static void ExecCheckQueryPerms(CmdType operation, Query *parseTree,
- Plan *plan);
+ Plan *plan);
static void ExecCheckPlanPerms(Plan *plan, List *rangeTable,
- CmdType operation);
+ CmdType operation);
static void ExecCheckRTPerms(List *rangeTable, CmdType operation);
static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
@@ -91,7 +91,7 @@ static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
* be returned by the query.
*
* NB: the CurrentMemoryContext when this is called must be the context
- * to be used as the per-query context for the query plan. ExecutorRun()
+ * to be used as the per-query context for the query plan. ExecutorRun()
* and ExecutorEnd() must be called in this same memory context.
* ----------------------------------------------------------------
*/
@@ -287,6 +287,7 @@ ExecutorEnd(QueryDesc *queryDesc, EState *estate)
static void
ExecCheckQueryPerms(CmdType operation, Query *parseTree, Plan *plan)
{
+
/*
* Check RTEs in the query's primary rangetable.
*/
@@ -339,7 +340,7 @@ ExecCheckPlanPerms(Plan *plan, List *rangeTable, CmdType operation)
{
case T_SubqueryScan:
{
- SubqueryScan *scan = (SubqueryScan *) plan;
+ SubqueryScan *scan = (SubqueryScan *) plan;
RangeTblEntry *rte;
/* Recursively check the subquery */
@@ -405,12 +406,13 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
relName = rte->relname;
/*
- * userid to check as: current user unless we have a setuid indication.
+ * userid to check as: current user unless we have a setuid
+ * indication.
*
- * Note: GetUserId() is presently fast enough that there's no harm
- * in calling it separately for each RTE. If that stops being true,
- * we could call it once in ExecCheckQueryPerms and pass the userid
- * down from there. But for now, no need for the extra clutter.
+ * Note: GetUserId() is presently fast enough that there's no harm in
+ * calling it separately for each RTE. If that stops being true, we
+ * could call it once in ExecCheckQueryPerms and pass the userid down
+ * from there. But for now, no need for the extra clutter.
*/
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
@@ -426,6 +428,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
if (rte->checkForWrite)
{
+
/*
* Note: write access in a SELECT context means SELECT FOR UPDATE.
* Right now we don't distinguish that from true update as far as
@@ -519,6 +522,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (resultRelations != NIL)
{
+
/*
* Multiple result relations (due to inheritance)
* parseTree->resultRelations identifies them all
@@ -541,8 +545,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
else
{
+
/*
- * Single result relation identified by parseTree->resultRelation
+ * Single result relation identified by
+ * parseTree->resultRelation
*/
numResultRelations = 1;
resultRelInfos = (ResultRelInfo *) palloc(sizeof(ResultRelInfo));
@@ -559,6 +565,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
}
else
{
+
/*
* if no result relation, then set state appropriately
*/
@@ -616,10 +623,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
tupType = ExecGetTupType(plan); /* tuple descriptor */
/*
- * Initialize the junk filter if needed. SELECT and INSERT queries need
- * a filter if there are any junk attrs in the tlist. UPDATE and
- * DELETE always need one, since there's always a junk 'ctid' attribute
- * present --- no need to look first.
+ * Initialize the junk filter if needed. SELECT and INSERT queries
+ * need a filter if there are any junk attrs in the tlist. UPDATE and
+ * DELETE always need one, since there's always a junk 'ctid'
+ * attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@@ -650,11 +657,12 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
if (junk_filter_needed)
{
+
/*
- * If there are multiple result relations, each one needs
- * its own junk filter. Note this is only possible for
- * UPDATE/DELETE, so we can't be fooled by some needing
- * a filter and some not.
+ * If there are multiple result relations, each one needs its
+ * own junk filter. Note this is only possible for
+ * UPDATE/DELETE, so we can't be fooled by some needing a
+ * filter and some not.
*/
if (parseTree->resultRelations != NIL)
{
@@ -678,6 +686,7 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
resultRelInfo++;
subplans = lnext(subplans);
}
+
/*
* Set active junkfilter too; at this point ExecInitAppend
* has already selected an active result relation...
@@ -750,10 +759,10 @@ InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate)
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the into relation.
- * Note that AlterTableCreateToastTable ends with
- * CommandCounterIncrement(), so that the TOAST table will
- * be visible for insertion.
+ * If necessary, create a TOAST table for the into
+ * relation. Note that AlterTableCreateToastTable ends
+ * with CommandCounterIncrement(), so that the TOAST table
+ * will be visible for insertion.
*/
AlterTableCreateToastTable(intoName, true);
@@ -817,9 +826,8 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
/*
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
- * index entries for the tuples we add/update. We need not do
- * this for a DELETE, however, since deletion doesn't affect
- * indexes.
+ * index entries for the tuples we add/update. We need not do this
+ * for a DELETE, however, since deletion doesn't affect indexes.
*/
if (resultRelationDesc->rd_rel->relhasindex &&
operation != CMD_DELETE)
@@ -857,8 +865,8 @@ EndPlan(Plan *plan, EState *estate)
estate->es_tupleTable = NULL;
/*
- * close the result relation(s) if any, but hold locks
- * until xact commit. Also clean up junkfilters if present.
+ * close the result relation(s) if any, but hold locks until xact
+ * commit. Also clean up junkfilters if present.
*/
resultRelInfo = estate->es_result_relations;
for (i = estate->es_num_result_relations; i > 0; i--)
@@ -1033,7 +1041,7 @@ lnext: ;
/*
* Unlike the UPDATE/DELETE case, a null result is
* possible here, when the referenced table is on the
- * nullable side of an outer join. Ignore nulls.
+ * nullable side of an outer join. Ignore nulls.
*/
if (isNull)
continue;
@@ -1216,7 +1224,7 @@ ExecAppend(TupleTableSlot *slot,
/* BEFORE ROW INSERT Triggers */
if (resultRelationDesc->trigdesc &&
- resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
+ resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
@@ -1227,11 +1235,12 @@ ExecAppend(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */
{
+
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
- * memory context, and therefore will go away by itself.
- * The tuple table slot should not try to clear it.
+ * memory context, and therefore will go away by itself. The
+ * tuple table slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@@ -1294,7 +1303,7 @@ ExecDelete(TupleTableSlot *slot,
/* BEFORE ROW DELETE Triggers */
if (resultRelationDesc->trigdesc &&
- resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
+ resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_DELETE] > 0)
{
bool dodelete;
@@ -1323,7 +1332,7 @@ ldelete:;
else if (!(ItemPointerEquals(tupleid, &ctid)))
{
TupleTableSlot *epqslot = EvalPlanQual(estate,
- resultRelInfo->ri_RangeTableIndex, &ctid);
+ resultRelInfo->ri_RangeTableIndex, &ctid);
if (!TupIsNull(epqslot))
{
@@ -1400,7 +1409,7 @@ ExecReplace(TupleTableSlot *slot,
/* BEFORE ROW UPDATE Triggers */
if (resultRelationDesc->trigdesc &&
- resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
+ resultRelationDesc->trigdesc->n_before_row[TRIGGER_EVENT_UPDATE] > 0)
{
HeapTuple newtuple;
@@ -1411,11 +1420,12 @@ ExecReplace(TupleTableSlot *slot,
if (newtuple != tuple) /* modified by Trigger(s) */
{
+
/*
* Insert modified tuple into tuple table slot, replacing the
* original. We assume that it was allocated in per-tuple
- * memory context, and therefore will go away by itself.
- * The tuple table slot should not try to clear it.
+ * memory context, and therefore will go away by itself. The
+ * tuple table slot should not try to clear it.
*/
ExecStoreTuple(newtuple, slot, InvalidBuffer, false);
tuple = newtuple;
@@ -1447,7 +1457,7 @@ lreplace:;
else if (!(ItemPointerEquals(tupleid, &ctid)))
{
TupleTableSlot *epqslot = EvalPlanQual(estate,
- resultRelInfo->ri_RangeTableIndex, &ctid);
+ resultRelInfo->ri_RangeTableIndex, &ctid);
if (!TupIsNull(epqslot))
{
@@ -1469,10 +1479,10 @@ lreplace:;
/*
* Note: instead of having to update the old index tuples associated
- * with the heap tuple, all we do is form and insert new index
- * tuples. This is because replaces are actually deletes and inserts
- * and index tuple deletion is done automagically by the vacuum
- * daemon. All we do is insert new index tuples. -cim 9/27/89
+ * with the heap tuple, all we do is form and insert new index tuples.
+ * This is because replaces are actually deletes and inserts and index
+ * tuple deletion is done automagically by the vacuum daemon. All we
+ * do is insert new index tuples. -cim 9/27/89
*/
/*
@@ -1525,8 +1535,8 @@ ExecRelCheck(ResultRelInfo *resultRelInfo,
}
/*
- * We will use the EState's per-tuple context for evaluating constraint
- * expressions (creating it if it's not already there).
+ * We will use the EState's per-tuple context for evaluating
+ * constraint expressions (creating it if it's not already there).
*/
econtext = GetPerTupleExprContext(estate);
@@ -1568,10 +1578,10 @@ ExecConstraints(char *caller, ResultRelInfo *resultRelInfo,
for (attrChk = 1; attrChk <= natts; attrChk++)
{
- if (rel->rd_att->attrs[attrChk-1]->attnotnull &&
+ if (rel->rd_att->attrs[attrChk - 1]->attnotnull &&
heap_attisnull(tuple, attrChk))
elog(ERROR, "%s: Fail to add null value in not null attribute %s",
- caller, NameStr(rel->rd_att->attrs[attrChk-1]->attname));
+ caller, NameStr(rel->rd_att->attrs[attrChk - 1]->attname));
}
}
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index bab2851df9d..79873073b7a 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.83 2001/01/29 00:39:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.84 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,22 +46,22 @@
/* static function decls */
static Datum ExecEvalAggref(Aggref *aggref, ExprContext *econtext,
- bool *isNull);
+ bool *isNull);
static Datum ExecEvalArrayRef(ArrayRef *arrayRef, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull);
static Datum ExecEvalOper(Expr *opClause, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalFunc(Expr *funcClause, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static ExprDoneCond ExecEvalFuncArgs(FunctionCachePtr fcache,
- List *argList,
- ExprContext *econtext);
+ List *argList,
+ ExprContext *econtext);
static Datum ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull);
static Datum ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull);
static Datum ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull);
static Datum ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
/*----------
@@ -77,7 +77,7 @@ static Datum ExecEvalCase(CaseExpr *caseExpr, ExprContext *econtext,
* done in versions up through 7.0) then an assignment like
* UPDATE table SET arrayfield[4] = NULL
* will result in setting the whole array to NULL, which is certainly not
- * very desirable. By returning the source array we make the assignment
+ * very desirable. By returning the source array we make the assignment
* into a no-op, instead. (Eventually we need to redesign arrays so that
* individual elements can be NULL, but for now, let's try to protect users
* from shooting themselves in the foot.)
@@ -112,10 +112,11 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
isDone));
+
/*
* If refexpr yields NULL, result is always NULL, for now anyway.
- * (This means you cannot assign to an element or slice of an array
- * that's NULL; it'll just stay NULL.)
+ * (This means you cannot assign to an element or slice of an
+ * array that's NULL; it'll just stay NULL.)
*/
if (*isNull)
return (Datum) NULL;
@@ -147,7 +148,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
/* If any index expr yields NULL, result is NULL or source array */
if (*isNull)
{
- if (! isAssignment || array_source == NULL)
+ if (!isAssignment || array_source == NULL)
return (Datum) NULL;
*isNull = false;
return PointerGetDatum(array_source);
@@ -166,10 +167,14 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
NULL));
- /* If any index expr yields NULL, result is NULL or source array */
+
+ /*
+ * If any index expr yields NULL, result is NULL or source
+ * array
+ */
if (*isNull)
{
- if (! isAssignment || array_source == NULL)
+ if (!isAssignment || array_source == NULL)
return (Datum) NULL;
*isNull = false;
return PointerGetDatum(array_source);
@@ -189,9 +194,10 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
econtext,
isNull,
NULL);
+
/*
- * For now, can't cope with inserting NULL into an array,
- * so make it a no-op per discussion above...
+ * For now, can't cope with inserting NULL into an array, so make
+ * it a no-op per discussion above...
*/
if (*isNull)
{
@@ -202,7 +208,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
}
if (array_source == NULL)
- return sourceData; /* XXX do something else? */
+ return sourceData; /* XXX do something else? */
if (lIndex == NULL)
resultArray = array_set(array_source, i,
@@ -215,7 +221,7 @@ ExecEvalArrayRef(ArrayRef *arrayRef,
else
resultArray = array_set_slice(array_source, i,
upper.indx, lower.indx,
- (ArrayType *) DatumGetPointer(sourceData),
+ (ArrayType *) DatumGetPointer(sourceData),
arrayRef->refelembyval,
arrayRef->refelemlength,
arrayRef->refattrlength,
@@ -587,12 +593,12 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
int i;
List *arg;
- argIsDone = ExprSingleResult; /* default assumption */
+ argIsDone = ExprSingleResult; /* default assumption */
i = 0;
foreach(arg, argList)
{
- ExprDoneCond thisArgIsDone;
+ ExprDoneCond thisArgIsDone;
fcache->fcinfo.arg[i] = ExecEvalExpr((Node *) lfirst(arg),
econtext,
@@ -601,10 +607,12 @@ ExecEvalFuncArgs(FunctionCachePtr fcache,
if (thisArgIsDone != ExprSingleResult)
{
+
/*
* We allow only one argument to have a set value; we'd need
- * much more complexity to keep track of multiple set arguments
- * (cf. ExecTargetList) and it doesn't seem worth it.
+ * much more complexity to keep track of multiple set
+ * arguments (cf. ExecTargetList) and it doesn't seem worth
+ * it.
*/
if (argIsDone != ExprSingleResult)
elog(ERROR, "Functions and operators can take only one set argument");
@@ -632,15 +640,15 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
bool *isNull,
ExprDoneCond *isDone)
{
- Datum result;
- ExprDoneCond argDone;
- int i;
+ Datum result;
+ ExprDoneCond argDone;
+ int i;
/*
* arguments is a list of expressions to evaluate before passing to
* the function manager. We skip the evaluation if it was already
- * done in the previous call (ie, we are continuing the evaluation
- * of a set-valued function). Otherwise, collect the current argument
+ * done in the previous call (ie, we are continuing the evaluation of
+ * a set-valued function). Otherwise, collect the current argument
* values into fcache->fcinfo.
*/
if (fcache->fcinfo.nargs > 0 && !fcache->argsValid)
@@ -664,28 +672,30 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
*/
if (fcache->func.fn_retset || fcache->hasSetArg)
{
+
/*
- * We need to return a set result. Complain if caller not ready
+ * We need to return a set result. Complain if caller not ready
* to accept one.
*/
if (isDone == NULL)
elog(ERROR, "Set-valued function called in context that cannot accept a set");
/*
- * This loop handles the situation where we have both a set argument
- * and a set-valued function. Once we have exhausted the function's
- * value(s) for a particular argument value, we have to get the next
- * argument value and start the function over again. We might have
- * to do it more than once, if the function produces an empty result
- * set for a particular input value.
+ * This loop handles the situation where we have both a set
+ * argument and a set-valued function. Once we have exhausted the
+ * function's value(s) for a particular argument value, we have to
+ * get the next argument value and start the function over again.
+ * We might have to do it more than once, if the function produces
+ * an empty result set for a particular input value.
*/
for (;;)
{
+
/*
* If function is strict, and there are any NULL arguments,
* skip calling the function (at least for this set of args).
*/
- bool callit = true;
+ bool callit = true;
if (fcache->func.fn_strict)
{
@@ -716,13 +726,15 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
if (*isDone != ExprEndResult)
{
+
/*
- * Got a result from current argument. If function itself
- * returns set, flag that we want to reuse current argument
- * values on next call.
+ * Got a result from current argument. If function itself
+ * returns set, flag that we want to reuse current
+ * argument values on next call.
*/
if (fcache->func.fn_retset)
fcache->argsValid = true;
+
/*
* Make sure we say we are returning a set, even if the
* function itself doesn't return sets.
@@ -762,11 +774,12 @@ ExecMakeFunctionResult(FunctionCachePtr fcache,
}
else
{
+
/*
* Non-set case: much easier.
*
- * If function is strict, and there are any NULL arguments,
- * skip calling the function and return NULL.
+ * If function is strict, and there are any NULL arguments, skip
+ * calling the function and return NULL.
*/
if (fcache->func.fn_strict)
{
@@ -852,9 +865,9 @@ ExecEvalFunc(Expr *funcClause,
FunctionCachePtr fcache;
/*
- * we extract the oid of the function associated with the func node and
- * then pass the work onto ExecMakeFunctionResult which evaluates the
- * arguments and returns the result of calling the function on the
+ * we extract the oid of the function associated with the func node
+ * and then pass the work onto ExecMakeFunctionResult which evaluates
+ * the arguments and returns the result of calling the function on the
* evaluated arguments.
*
* this is nearly identical to the ExecEvalOper code.
@@ -915,7 +928,7 @@ ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull)
* evaluation of 'not' is simple.. expr is false, then return 'true'
* and vice versa.
*/
- return BoolGetDatum(! DatumGetBool(expr_value));
+ return BoolGetDatum(!DatumGetBool(expr_value));
}
/* ----------------------------------------------------------------
@@ -999,7 +1012,7 @@ ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull)
*/
if (*isNull)
AnyNull = true; /* remember we got a null */
- else if (! DatumGetBool(clause_value))
+ else if (!DatumGetBool(clause_value))
return clause_value;
}
@@ -1079,7 +1092,7 @@ ExecEvalFieldSelect(FieldSelect *fselect,
bool *isNull,
ExprDoneCond *isDone)
{
- Datum result;
+ Datum result;
TupleTableSlot *resSlot;
result = ExecEvalExpr(fselect->arg, econtext, isNull, isDone);
@@ -1111,7 +1124,7 @@ ExecEvalFieldSelect(FieldSelect *fselect,
*
* A caller that can only accept a singleton (non-set) result should pass
* NULL for isDone; if the expression computes a set result then an elog()
- * error will be reported. If the caller does pass an isDone pointer then
+ * error will be reported. If the caller does pass an isDone pointer then
* *isDone is set to one of these three states:
* ExprSingleResult singleton result (not a set)
* ExprMultipleResult return value is one element of a set
@@ -1127,7 +1140,7 @@ ExecEvalFieldSelect(FieldSelect *fselect,
* The caller should already have switched into the temporary memory
* context econtext->ecxt_per_tuple_memory. The convenience entry point
* ExecEvalExprSwitchContext() is provided for callers who don't prefer to
- * do the switch in an outer loop. We do not do the switch here because
+ * do the switch in an outer loop. We do not do the switch here because
* it'd be a waste of cycles during recursive entries to ExecEvalExpr().
*
* This routine is an inner loop routine and must be as fast as possible.
@@ -1353,15 +1366,15 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull)
{
if (resultForNull == false)
{
- result = false; /* treat NULL as FALSE */
+ result = false; /* treat NULL as FALSE */
break;
}
}
else
{
- if (! DatumGetBool(expr_value))
+ if (!DatumGetBool(expr_value))
{
- result = false; /* definitely FALSE */
+ result = false; /* definitely FALSE */
break;
}
}
@@ -1383,7 +1396,7 @@ ExecTargetListLength(List *targetlist)
foreach(tl, targetlist)
{
- TargetEntry *curTle = (TargetEntry *) lfirst(tl);
+ TargetEntry *curTle = (TargetEntry *) lfirst(tl);
if (curTle->resdom != NULL)
len++;
@@ -1404,17 +1417,15 @@ ExecCleanTargetListLength(List *targetlist)
foreach(tl, targetlist)
{
- TargetEntry *curTle = (TargetEntry *) lfirst(tl);
+ TargetEntry *curTle = (TargetEntry *) lfirst(tl);
if (curTle->resdom != NULL)
{
- if (! curTle->resdom->resjunk)
+ if (!curTle->resdom->resjunk)
len++;
}
else
- {
len += curTle->fjoin->fj_nNodes;
- }
}
return len;
}
@@ -1440,6 +1451,7 @@ ExecTargetList(List *targetlist,
ExprDoneCond *isDone)
{
MemoryContext oldContext;
+
#define NPREALLOCDOMAINS 64
char nullsArray[NPREALLOCDOMAINS];
bool fjIsNullArray[NPREALLOCDOMAINS];
@@ -1484,10 +1496,11 @@ ExecTargetList(List *targetlist,
* we have a really large targetlist. otherwise we use the stack.
*
* We also allocate a bool array that is used to hold fjoin result state,
- * and another array that holds the isDone status for each targetlist item.
- * The isDone status is needed so that we can iterate, generating multiple
- * tuples, when one or more tlist items return sets. (We expect the caller
- * to call us again if we return *isDone = ExprMultipleResult.)
+ * and another array that holds the isDone status for each targetlist
+ * item. The isDone status is needed so that we can iterate,
+ * generating multiple tuples, when one or more tlist items return
+ * sets. (We expect the caller to call us again if we return *isDone
+ * = ExprMultipleResult.)
*/
if (nodomains > NPREALLOCDOMAINS)
{
@@ -1507,7 +1520,7 @@ ExecTargetList(List *targetlist,
*/
if (isDone)
- *isDone = ExprSingleResult; /* until proven otherwise */
+ *isDone = ExprSingleResult; /* until proven otherwise */
haveDoneSets = false; /* any exhausted set exprs in tlist? */
@@ -1554,8 +1567,10 @@ ExecTargetList(List *targetlist,
ExecEvalFjoin(tle, econtext, fjIsNull, isDone);
- /* XXX this is wrong, but since fjoin code is completely broken
- * anyway, I'm not going to worry about it now --- tgl 8/23/00
+ /*
+ * XXX this is wrong, but since fjoin code is completely
+ * broken anyway, I'm not going to worry about it now --- tgl
+ * 8/23/00
*/
if (isDone && *isDone == ExprEndResult)
{
@@ -1594,6 +1609,7 @@ ExecTargetList(List *targetlist,
if (haveDoneSets)
{
+
/*
* note: can't get here unless we verified isDone != NULL
*/
@@ -1601,7 +1617,8 @@ ExecTargetList(List *targetlist,
{
/*
- * all sets are done, so report that tlist expansion is complete.
+ * all sets are done, so report that tlist expansion is
+ * complete.
*/
*isDone = ExprEndResult;
MemoryContextSwitchTo(oldContext);
@@ -1612,7 +1629,7 @@ ExecTargetList(List *targetlist,
{
/*
- * We have some done and some undone sets. Restart the done
+ * We have some done and some undone sets. Restart the done
* ones so that we can deliver a tuple (if possible).
*/
foreach(tl, targetlist)
@@ -1628,7 +1645,7 @@ ExecTargetList(List *targetlist,
values[resind] = ExecEvalExpr(tle->expr,
econtext,
&isNull,
- &itemIsDone[resind]);
+ &itemIsDone[resind]);
nulls[resind] = isNull ? 'n' : ' ';
if (itemIsDone[resind] == ExprEndResult)
@@ -1644,10 +1661,11 @@ ExecTargetList(List *targetlist,
}
}
}
+
/*
- * If we cannot make a tuple because some sets are empty,
- * we still have to cycle the nonempty sets to completion,
- * else resources will not be released from subplans etc.
+ * If we cannot make a tuple because some sets are empty, we
+ * still have to cycle the nonempty sets to completion, else
+ * resources will not be released from subplans etc.
*/
if (*isDone == ExprEndResult)
{
@@ -1752,8 +1770,8 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone)
/*
* store the tuple in the projection slot and return the slot.
*/
- return ExecStoreTuple(newTuple, /* tuple to store */
- slot, /* slot to store in */
- InvalidBuffer, /* tuple has no buffer */
+ return ExecStoreTuple(newTuple, /* tuple to store */
+ slot, /* slot to store in */
+ InvalidBuffer, /* tuple has no buffer */
true);
}
diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c
index 3a2e79dbdd4..58a3b5edea4 100644
--- a/src/backend/executor/execScan.c
+++ b/src/backend/executor/execScan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.15 2001/01/24 19:42:54 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.16 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -46,7 +46,7 @@
*/
TupleTableSlot *
ExecScan(Scan *node,
- ExecScanAccessMtd accessMtd) /* function returning a tuple */
+ ExecScanAccessMtd accessMtd) /* function returning a tuple */
{
CommonScanState *scanstate;
EState *estate;
@@ -81,7 +81,7 @@ ExecScan(Scan *node,
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a scan tuple.
* ----------------
*/
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index e5f1a269d81..3e75aef337c 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.46 2001/01/29 00:39:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.47 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -40,7 +40,7 @@
* TupIsNull - true when slot contains no tuple(Macro)
*
* CONVENIENCE INITIALIZATION ROUTINES
- * ExecInitResultTupleSlot \ convenience routines to initialize
+ * ExecInitResultTupleSlot \ convenience routines to initialize
* ExecInitScanTupleSlot \ the various tuple slots for nodes
* ExecInitExtraTupleSlot / which store copies of tuples.
* ExecInitNullTupleSlot /
@@ -422,7 +422,7 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
slot->val = (HeapTuple) NULL;
- slot->ttc_shouldFree = true; /* probably useless code... */
+ slot->ttc_shouldFree = true;/* probably useless code... */
/* ----------------
* Drop the pin on the referenced buffer, if there is one.
@@ -446,7 +446,7 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
void
ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */
TupleDesc tupdesc, /* new tuple descriptor */
- bool shouldFree) /* is desc owned by slot? */
+ bool shouldFree) /* is desc owned by slot? */
{
if (slot->ttc_shouldFreeDesc &&
slot->ttc_tupleDescriptor != NULL)
@@ -482,7 +482,7 @@ ExecSetSlotDescriptorIsNew(TupleTableSlot *slot, /* slot to change */
* ExecInit{Result,Scan,Extra}TupleSlot
*
* These are convenience routines to initialize the specified slot
- * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
+ * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot
* is used for initializing special-purpose slots.
* --------------------------------
*/
@@ -541,11 +541,13 @@ ExecInitExtraTupleSlot(EState *estate)
TupleTableSlot *
ExecInitNullTupleSlot(EState *estate, TupleDesc tupType)
{
- TupleTableSlot* slot = ExecInitExtraTupleSlot(estate);
+ TupleTableSlot *slot = ExecInitExtraTupleSlot(estate);
+
/*
* Since heap_getattr() will treat attributes beyond a tuple's t_natts
- * as being NULL, we can make an all-nulls tuple just by making it be of
- * zero length. However, the slot descriptor must match the real tupType.
+ * as being NULL, we can make an all-nulls tuple just by making it be
+ * of zero length. However, the slot descriptor must match the real
+ * tupType.
*/
HeapTuple nullTuple;
Datum values[1];
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 6b030b64a0e..6ee0d2e26ed 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.73 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.74 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,6 +148,7 @@ ExecAssignExprContext(EState *estate, CommonState *commonstate)
econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = CurrentMemoryContext;
+
/*
* Create working memory for expression evaluation in this context.
*/
@@ -184,14 +185,16 @@ MakeExprContext(TupleTableSlot *slot,
econtext->ecxt_innertuple = NULL;
econtext->ecxt_outertuple = NULL;
econtext->ecxt_per_query_memory = queryContext;
+
/*
* We make the temporary context a child of current working context,
* not of the specified queryContext. This seems reasonable but I'm
* not totally sure about it...
*
* Expression contexts made via this routine typically don't live long
- * enough to get reset, so specify a minsize of 0. That avoids alloc'ing
- * any memory in the common case where expr eval doesn't use any.
+ * enough to get reset, so specify a minsize of 0. That avoids
+ * alloc'ing any memory in the common case where expr eval doesn't use
+ * any.
*/
econtext->ecxt_per_tuple_memory =
AllocSetContextCreate(CurrentMemoryContext,
@@ -209,7 +212,7 @@ MakeExprContext(TupleTableSlot *slot,
/*
* Free an ExprContext made by MakeExprContext, including the temporary
- * context used for expression evaluation. Note this will cause any
+ * context used for expression evaluation. Note this will cause any
* pass-by-reference expression result to go away!
*/
void
@@ -447,7 +450,7 @@ ExecAssignScanTypeFromOuterPlan(Plan *node, CommonScanState *csstate)
* resultRelInfo->ri_RelationDesc.
*
* This used to be horribly ugly code, and slow too because it
- * did a sequential scan of pg_index. Now we rely on the relcache
+ * did a sequential scan of pg_index. Now we rely on the relcache
* to cache a list of the OIDs of the indices associated with any
* specific relation, and we use the pg_index syscache to get the
* entries we need from pg_index.
@@ -467,7 +470,7 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
resultRelInfo->ri_NumIndices = 0;
/* checks for disabled indexes */
- if (! RelationGetForm(resultRelation)->relhasindex)
+ if (!RelationGetForm(resultRelation)->relhasindex)
return;
if (IsIgnoringSystemIndexes() &&
IsSystemRelationName(RelationGetRelationName(resultRelation)))
@@ -635,8 +638,9 @@ ExecInsertIndexTuples(TupleTableSlot *slot,
heapDescriptor = RelationGetDescr(heapRelation);
/*
- * We will use the EState's per-tuple context for evaluating predicates
- * and functional-index functions (creating it if it's not already there).
+ * We will use the EState's per-tuple context for evaluating
+ * predicates and functional-index functions (creating it if it's not
+ * already there).
*/
econtext = GetPerTupleExprContext(estate);
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 575f33d84b6..4cc1dc27926 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.43 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.44 2001/03/22 03:59:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,15 +70,15 @@ typedef SQLFunctionCache *SQLFunctionCachePtr;
/* non-export function prototypes */
static execution_state *init_execution_state(char *src,
- Oid *argOidVect, int nargs);
+ Oid *argOidVect, int nargs);
static void init_sql_fcache(FmgrInfo *finfo);
static void postquel_start(execution_state *es);
static TupleTableSlot *postquel_getnext(execution_state *es);
static void postquel_end(execution_state *es);
static void postquel_sub_params(execution_state *es, FunctionCallInfo fcinfo);
static Datum postquel_execute(execution_state *es,
- FunctionCallInfo fcinfo,
- SQLFunctionCachePtr fcache);
+ FunctionCallInfo fcinfo,
+ SQLFunctionCachePtr fcache);
static execution_state *
@@ -180,7 +180,7 @@ init_sql_fcache(FmgrInfo *finfo)
* ----------------
*/
typeTuple = SearchSysCache(TYPEOID,
- ObjectIdGetDatum(procedureStruct->prorettype),
+ ObjectIdGetDatum(procedureStruct->prorettype),
0, 0, 0);
if (!HeapTupleIsValid(typeTuple))
elog(ERROR, "init_sql_fcache: Cache lookup failed for type %u",
@@ -235,9 +235,7 @@ init_sql_fcache(FmgrInfo *finfo)
nargs * sizeof(Oid));
}
else
- {
argOidVect = (Oid *) NULL;
- }
tmp = SysCacheGetAttr(PROCOID,
procedureTuple,
@@ -346,8 +344,8 @@ copy_function_result(SQLFunctionCachePtr fcache,
return resultSlot; /* no need to copy result */
/*
- * If first time through, we have to initialize the funcSlot's
- * tuple descriptor.
+ * If first time through, we have to initialize the funcSlot's tuple
+ * descriptor.
*/
if (funcSlot->ttc_tupleDescriptor == NULL)
{
@@ -415,12 +413,14 @@ postquel_execute(execution_state *es,
/*
* If we are supposed to return a tuple, we return the tuple slot
- * pointer converted to Datum. If we are supposed to return a simple
- * value, then project out the first attribute of the result tuple
- * (ie, take the first result column of the final SELECT).
+ * pointer converted to Datum. If we are supposed to return a
+ * simple value, then project out the first attribute of the
+ * result tuple (ie, take the first result column of the final
+ * SELECT).
*/
if (fcache->returnsTuple)
{
+
/*
* XXX do we need to remove junk attrs from the result tuple?
* Probably OK to leave them, as long as they are at the end.
@@ -434,6 +434,7 @@ postquel_execute(execution_state *es,
1,
resSlot->ttc_tupleDescriptor,
&(fcinfo->isnull));
+
/*
* Note: if result type is pass-by-reference then we are
* returning a pointer into the tuple copied by
@@ -546,7 +547,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
*/
if (fcinfo->flinfo->fn_retset)
{
- ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
+ ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
if (rsi && IsA(rsi, ReturnSetInfo))
rsi->isDone = ExprEndResult;
@@ -572,7 +573,7 @@ fmgr_sql(PG_FUNCTION_ARGS)
*/
if (fcinfo->flinfo->fn_retset)
{
- ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
+ ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
if (rsi && IsA(rsi, ReturnSetInfo))
rsi->isDone = ExprMultipleResult;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index f60f499e6be..e0f50bd66d1 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -19,7 +19,7 @@
* The agg's input type and transtype must be the same in this case!
*
* If transfunc is marked "strict" then NULL input_values are skipped,
- * keeping the previous transvalue. If transfunc is not strict then it
+ * keeping the previous transvalue. If transfunc is not strict then it
* is called for every input tuple and must deal with NULL initcond
* or NULL input_value for itself.
*
@@ -34,7 +34,7 @@
* are not allowed to accumulate until end of query. We do this by
* "ping-ponging" between two memory contexts; successive calls to the
* transfunc are executed in alternate contexts, passing the previous
- * transvalue that is in the other context. At the beginning of each
+ * transvalue that is in the other context. At the beginning of each
* tuple cycle we can reset the current output context to avoid memory
* usage growth. Note: we must use MemoryContextContains() to check
* whether the transfunc has perhaps handed us back one of its input
@@ -46,7 +46,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.75 2001/02/16 03:16:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.76 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -130,8 +130,8 @@ typedef struct AggStatePerAggData
* an input tuple group and updated for each input tuple.
*
* For a simple (non DISTINCT) aggregate, we just feed the input values
- * straight to the transition function. If it's DISTINCT, we pass
- * the input values into a Tuplesort object; then at completion of the
+ * straight to the transition function. If it's DISTINCT, we pass the
+ * input values into a Tuplesort object; then at completion of the
* input tuple group, we scan the sorted values, eliminate duplicates,
* and run the transition function on the rest.
*/
@@ -144,20 +144,21 @@ typedef struct AggStatePerAggData
bool noTransValue; /* true if transValue not set yet */
/*
- * Note: noTransValue initially has the same value as transValueIsNull,
- * and if true both are cleared to false at the same time. They are
- * not the same though: if transfn later returns a NULL, we want to
- * keep that NULL and not auto-replace it with a later input value.
- * Only the first non-NULL input will be auto-substituted.
+ * Note: noTransValue initially has the same value as
+ * transValueIsNull, and if true both are cleared to false at the same
+ * time. They are not the same though: if transfn later returns a
+ * NULL, we want to keep that NULL and not auto-replace it with a
+ * later input value. Only the first non-NULL input will be
+ * auto-substituted.
*/
} AggStatePerAggData;
static void initialize_aggregate(AggStatePerAgg peraggstate);
static void advance_transition_function(AggStatePerAgg peraggstate,
- Datum newVal, bool isNull);
+ Datum newVal, bool isNull);
static void process_sorted_aggregate(AggState *aggstate,
- AggStatePerAgg peraggstate);
+ AggStatePerAgg peraggstate);
static void finalize_aggregate(AggStatePerAgg peraggstate,
Datum *resultVal, bool *resultIsNull);
@@ -195,8 +196,8 @@ initialize_aggregate(AggStatePerAgg peraggstate)
* (Re)set transValue to the initial value.
*
* Note that when the initial value is pass-by-ref, we just reuse it
- * without copying for each group. Hence, transition function
- * had better not scribble on its input, or it will fail for GROUP BY!
+ * without copying for each group. Hence, transition function had
+ * better not scribble on its input, or it will fail for GROUP BY!
*/
peraggstate->transValue = peraggstate->initValue;
peraggstate->transValueIsNull = peraggstate->initValueIsNull;
@@ -222,50 +223,55 @@ static void
advance_transition_function(AggStatePerAgg peraggstate,
Datum newVal, bool isNull)
{
- FunctionCallInfoData fcinfo;
+ FunctionCallInfoData fcinfo;
if (peraggstate->transfn.fn_strict)
{
if (isNull)
{
+
/*
- * For a strict transfn, nothing happens at a NULL input tuple;
- * we just keep the prior transValue. However, if the transtype
- * is pass-by-ref, we have to copy it into the new context
- * because the old one is going to get reset.
+ * For a strict transfn, nothing happens at a NULL input
+ * tuple; we just keep the prior transValue. However, if the
+ * transtype is pass-by-ref, we have to copy it into the new
+ * context because the old one is going to get reset.
*/
if (!peraggstate->transValueIsNull)
peraggstate->transValue = datumCopy(peraggstate->transValue,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
return;
}
if (peraggstate->noTransValue)
{
+
/*
- * transValue has not been initialized. This is the first non-NULL
- * input value. We use it as the initial value for transValue.
- * (We already checked that the agg's input type is binary-
- * compatible with its transtype, so straight copy here is OK.)
+ * transValue has not been initialized. This is the first
+ * non-NULL input value. We use it as the initial value for
+ * transValue. (We already checked that the agg's input type
+ * is binary- compatible with its transtype, so straight copy
+ * here is OK.)
*
- * We had better copy the datum if it is pass-by-ref, since
- * the given pointer may be pointing into a scan tuple that
- * will be freed on the next iteration of the scan.
+ * We had better copy the datum if it is pass-by-ref, since the
+ * given pointer may be pointing into a scan tuple that will
+ * be freed on the next iteration of the scan.
*/
peraggstate->transValue = datumCopy(newVal,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
+ peraggstate->transtypeByVal,
+ peraggstate->transtypeLen);
peraggstate->transValueIsNull = false;
peraggstate->noTransValue = false;
return;
}
if (peraggstate->transValueIsNull)
{
+
/*
* Don't call a strict function with NULL inputs. Note it is
- * possible to get here despite the above tests, if the transfn
- * is strict *and* returned a NULL on a prior cycle. If that
- * happens we will propagate the NULL all the way to the end.
+ * possible to get here despite the above tests, if the
+ * transfn is strict *and* returned a NULL on a prior cycle.
+ * If that happens we will propagate the NULL all the way to
+ * the end.
*/
return;
}
@@ -283,14 +289,14 @@ advance_transition_function(AggStatePerAgg peraggstate,
newVal = FunctionCallInvoke(&fcinfo);
/*
- * If the transition function was uncooperative, it may have
- * given us a pass-by-ref result that points at the scan tuple
- * or the prior-cycle working memory. Copy it into the active
- * context if it doesn't look right.
+ * If the transition function was uncooperative, it may have given us
+ * a pass-by-ref result that points at the scan tuple or the
+ * prior-cycle working memory. Copy it into the active context if it
+ * doesn't look right.
*/
if (!peraggstate->transtypeByVal && !fcinfo.isnull &&
- ! MemoryContextContains(CurrentMemoryContext,
- DatumGetPointer(newVal)))
+ !MemoryContextContains(CurrentMemoryContext,
+ DatumGetPointer(newVal)))
newVal = datumCopy(newVal,
peraggstate->transtypeByVal,
peraggstate->transtypeLen);
@@ -302,7 +308,7 @@ advance_transition_function(AggStatePerAgg peraggstate,
/*
* Run the transition function for a DISTINCT aggregate. This is called
* after we have completed entering all the input values into the sort
- * object. We complete the sort, read out the values in sorted order,
+ * object. We complete the sort, read out the values in sorted order,
* and run the transition function on each non-duplicate value.
*
* When called, CurrentMemoryContext should be the per-query context.
@@ -321,19 +327,21 @@ process_sorted_aggregate(AggState *aggstate,
/*
* Note: if input type is pass-by-ref, the datums returned by the sort
- * are freshly palloc'd in the per-query context, so we must be careful
- * to pfree them when they are no longer needed.
+ * are freshly palloc'd in the per-query context, so we must be
+ * careful to pfree them when they are no longer needed.
*/
while (tuplesort_getdatum(peraggstate->sortstate, true,
&newVal, &isNull))
{
+
/*
* DISTINCT always suppresses nulls, per SQL spec, regardless of
* the transition function's strictness.
*/
if (isNull)
continue;
+
/*
* Clear and select the current working context for evaluation of
* the equality function and transition function.
@@ -349,6 +357,7 @@ process_sorted_aggregate(AggState *aggstate,
/* equal to prior, so forget this one */
if (!peraggstate->inputtypeByVal)
pfree(DatumGetPointer(newVal));
+
/*
* note we do NOT flip contexts in this case, so no need to
* copy prior transValue to other context.
@@ -357,6 +366,7 @@ process_sorted_aggregate(AggState *aggstate,
else
{
advance_transition_function(peraggstate, newVal, false);
+
/*
* Make the other context current so that this transition
* result is preserved.
@@ -389,12 +399,13 @@ static void
finalize_aggregate(AggStatePerAgg peraggstate,
Datum *resultVal, bool *resultIsNull)
{
+
/*
* Apply the agg's finalfn if one is provided, else return transValue.
*/
if (OidIsValid(peraggstate->finalfn_oid))
{
- FunctionCallInfoData fcinfo;
+ FunctionCallInfoData fcinfo;
MemSet(&fcinfo, 0, sizeof(fcinfo));
fcinfo.flinfo = &peraggstate->finalfn;
@@ -422,9 +433,9 @@ finalize_aggregate(AggStatePerAgg peraggstate,
/*
* If result is pass-by-ref, make sure it is in the right context.
*/
- if (!peraggstate->resulttypeByVal && ! *resultIsNull &&
- ! MemoryContextContains(CurrentMemoryContext,
- DatumGetPointer(*resultVal)))
+ if (!peraggstate->resulttypeByVal && !*resultIsNull &&
+ !MemoryContextContains(CurrentMemoryContext,
+ DatumGetPointer(*resultVal)))
*resultVal = datumCopy(*resultVal,
peraggstate->resulttypeByVal,
peraggstate->resulttypeLen);
@@ -480,7 +491,8 @@ ExecAgg(Agg *node)
peragg = aggstate->peragg;
/*
- * We loop retrieving groups until we find one matching node->plan.qual
+ * We loop retrieving groups until we find one matching
+ * node->plan.qual
*/
do
{
@@ -578,19 +590,19 @@ ExecAgg(Agg *node)
* calculation, and stash results in the per-output-tuple context.
*
* This is a bit tricky when there are both DISTINCT and plain
- * aggregates: we must first finalize all the plain aggs and then all
- * the DISTINCT ones. This is needed because the last transition
- * values for the plain aggs are stored in the not-current working
- * context, and we have to evaluate those aggs (and stash the results
- * in the output tup_cxt!) before we start flipping contexts again
- * in process_sorted_aggregate.
+ * aggregates: we must first finalize all the plain aggs and then
+ * all the DISTINCT ones. This is needed because the last
+ * transition values for the plain aggs are stored in the
+ * not-current working context, and we have to evaluate those aggs
+ * (and stash the results in the output tup_cxt!) before we start
+ * flipping contexts again in process_sorted_aggregate.
*/
oldContext = MemoryContextSwitchTo(aggstate->tup_cxt);
for (aggno = 0; aggno < aggstate->numaggs; aggno++)
{
AggStatePerAgg peraggstate = &peragg[aggno];
- if (! peraggstate->aggref->aggdistinct)
+ if (!peraggstate->aggref->aggdistinct)
finalize_aggregate(peraggstate,
&aggvalues[aggno], &aggnulls[aggno]);
}
@@ -766,21 +778,22 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
ExecAssignExprContext(estate, &aggstate->csstate.cstate);
/*
- * We actually need three separate expression memory contexts: one
- * for calculating per-output-tuple values (ie, the finished aggregate
+ * We actually need three separate expression memory contexts: one for
+ * calculating per-output-tuple values (ie, the finished aggregate
* results), and two that we ping-pong between for per-input-tuple
* evaluation of input expressions and transition functions. The
- * context made by ExecAssignExprContext() is used as the output context.
+ * context made by ExecAssignExprContext() is used as the output
+ * context.
*/
aggstate->tup_cxt =
aggstate->csstate.cstate.cs_ExprContext->ecxt_per_tuple_memory;
- aggstate->agg_cxt[0] =
+ aggstate->agg_cxt[0] =
AllocSetContextCreate(CurrentMemoryContext,
"AggExprContext1",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
- aggstate->agg_cxt[1] =
+ aggstate->agg_cxt[1] =
AllocSetContextCreate(CurrentMemoryContext,
"AggExprContext2",
ALLOCSET_DEFAULT_MINSIZE,
@@ -882,30 +895,32 @@ ExecInitAgg(Agg *node, EState *estate, Plan *parent)
/*
* If the transfn is strict and the initval is NULL, make sure
* input type and transtype are the same (or at least binary-
- * compatible), so that it's OK to use the first input value
- * as the initial transValue. This should have been checked at
- * agg definition time, but just in case...
+ * compatible), so that it's OK to use the first input value as
+ * the initial transValue. This should have been checked at agg
+ * definition time, but just in case...
*/
if (peraggstate->transfn.fn_strict && peraggstate->initValueIsNull)
{
+
/*
- * Note: use the type from the input expression here,
- * not aggform->aggbasetype, because the latter might be 0.
+ * Note: use the type from the input expression here, not
+ * aggform->aggbasetype, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);
if (inputType != aggform->aggtranstype &&
- ! IS_BINARY_COMPATIBLE(inputType, aggform->aggtranstype))
+ !IS_BINARY_COMPATIBLE(inputType, aggform->aggtranstype))
elog(ERROR, "Aggregate %s needs to have compatible input type and transition type",
aggname);
}
if (aggref->aggdistinct)
{
+
/*
- * Note: use the type from the input expression here,
- * not aggform->aggbasetype, because the latter might be 0.
+ * Note: use the type from the input expression here, not
+ * aggform->aggbasetype, because the latter might be 0.
* (Consider COUNT(*).)
*/
Oid inputType = exprType(aggref->target);
@@ -947,12 +962,14 @@ ExecEndAgg(Agg *node)
Plan *outerPlan;
ExecFreeProjectionInfo(&aggstate->csstate.cstate);
+
/*
* Make sure ExecFreeExprContext() frees the right expr context...
*/
aggstate->csstate.cstate.cs_ExprContext->ecxt_per_tuple_memory =
aggstate->tup_cxt;
ExecFreeExprContext(&aggstate->csstate.cstate);
+
/*
* ... and I free the others.
*/
diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c
index e4ede51852a..500e9c07c43 100644
--- a/src/backend/executor/nodeGroup.c
+++ b/src/backend/executor/nodeGroup.c
@@ -15,7 +15,7 @@
* locate group boundaries.
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.41 2001/02/16 03:16:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.42 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,8 +88,8 @@ ExecGroupEveryTuple(Group *node)
tupdesc = ExecGetScanType(&grpstate->csstate);
/*
- * We need not call ResetExprContext here because execTuplesMatch
- * will reset the per-tuple memory context once per input tuple.
+ * We need not call ResetExprContext here because execTuplesMatch will
+ * reset the per-tuple memory context once per input tuple.
*/
/* if we haven't returned first tuple of a new group yet ... */
@@ -199,8 +199,8 @@ ExecGroupOneTuple(Group *node)
tupdesc = ExecGetScanType(&grpstate->csstate);
/*
- * We need not call ResetExprContext here because execTuplesMatch
- * will reset the per-tuple memory context once per input tuple.
+ * We need not call ResetExprContext here because execTuplesMatch will
+ * reset the per-tuple memory context once per input tuple.
*/
firsttuple = grpstate->grp_firstTuple;
@@ -465,8 +465,8 @@ execTuplesMatch(HeapTuple tuple1,
/* Apply the type-specific equality function */
- if (! DatumGetBool(FunctionCall2(&eqfunctions[i],
- attr1, attr2)))
+ if (!DatumGetBool(FunctionCall2(&eqfunctions[i],
+ attr1, attr2)))
{
result = false; /* they aren't equal */
break;
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 070354ace7c..7b5e3d4cced 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
*
- * $Id: nodeHash.c,v 1.54 2001/01/24 19:42:54 momjian Exp $
+ * $Id: nodeHash.c,v 1.55 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -540,9 +540,7 @@ ExecHashGetBucket(HashJoinTable hashtable,
* ------------------
*/
if (isNull)
- {
bucketno = 0;
- }
else
{
bucketno = hashFunc(keyval,
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index a3fc2f545cb..dae06d2c937 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.36 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.37 2001/03/22 03:59:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -105,7 +105,7 @@ ExecHashJoin(HashJoin *node)
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a join tuple.
* ----------------
*/
@@ -155,6 +155,7 @@ ExecHashJoin(HashJoin *node)
for (;;)
{
+
/*
* If we don't have an outer tuple, get the next one
*/
@@ -276,14 +277,15 @@ ExecHashJoin(HashJoin *node)
*/
hjstate->hj_NeedNewOuter = true;
- if (! hjstate->hj_MatchedOuter &&
+ if (!hjstate->hj_MatchedOuter &&
node->join.jointype == JOIN_LEFT)
{
+
/*
* We are doing an outer join and there were no join matches
* for this outer tuple. Generate a fake join tuple with
- * nulls for the inner tuple, and return it if it passes
- * the non-join quals.
+ * nulls for the inner tuple, and return it if it passes the
+ * non-join quals.
*/
econtext->ecxt_innertuple = hjstate->hj_NullInnerTupleSlot;
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index c0369e8f4cd..a6e6e45e9dc 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.57 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.58 2001/03/22 03:59:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -224,7 +224,7 @@ IndexNext(IndexScan *node)
qual = lnext(qual);
}
if (!prev_matches)
- return slot; /* OK to return tuple */
+ return slot;/* OK to return tuple */
/* Duplicate tuple, so drop it and loop back for another */
ExecClearTuple(slot);
}
@@ -326,7 +326,8 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
estate = node->scan.plan.state;
indexstate = node->indxstate;
- econtext = indexstate->iss_RuntimeContext; /* context for runtime keys */
+ econtext = indexstate->iss_RuntimeContext; /* context for runtime
+ * keys */
direction = estate->es_direction;
numIndices = indexstate->iss_NumIndices;
scanDescs = indexstate->iss_ScanDescs;
@@ -340,16 +341,18 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
if (econtext)
{
+
/*
- * If we are being passed an outer tuple,
- * save it for runtime key calc
+ * If we are being passed an outer tuple, save it for runtime key
+ * calc
*/
if (exprCtxt != NULL)
econtext->ecxt_outertuple = exprCtxt->ecxt_outertuple;
+
/*
- * Reset the runtime-key context so we don't leak memory as
- * each outer tuple is scanned. Note this assumes that we
- * will recalculate *all* runtime keys on each call.
+ * Reset the runtime-key context so we don't leak memory as each
+ * outer tuple is scanned. Note this assumes that we will
+ * recalculate *all* runtime keys on each call.
*/
ResetExprContext(econtext);
}
@@ -385,8 +388,8 @@ ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent)
* outer tuple. We then stick the result into the scan
* key.
*
- * Note: the result of the eval could be a pass-by-ref
- * value that's stored in the outer scan's tuple, not in
+ * Note: the result of the eval could be a pass-by-ref value
+ * that's stored in the outer scan's tuple, not in
* econtext->ecxt_per_tuple_memory. We assume that the
* outer tuple will stay put throughout our scan. If this
* is wrong, we could copy the result into our context
@@ -790,7 +793,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
Assert(leftop != NULL);
- if (IsA(leftop, Var) && var_is_rel((Var *) leftop))
+ if (IsA(leftop, Var) &&var_is_rel((Var *) leftop))
{
/* ----------------
* if the leftop is a "rel-var", then it means
@@ -862,7 +865,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
Assert(rightop != NULL);
- if (IsA(rightop, Var) && var_is_rel((Var *) rightop))
+ if (IsA(rightop, Var) &&var_is_rel((Var *) rightop))
{
/* ----------------
* here we make sure only one op identifies the
@@ -986,7 +989,7 @@ ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent)
*/
if (have_runtime_keys)
{
- ExprContext *stdecontext = scanstate->cstate.cs_ExprContext;
+ ExprContext *stdecontext = scanstate->cstate.cs_ExprContext;
ExecAssignExprContext(estate, &scanstate->cstate);
indexstate->iss_RuntimeKeyInfo = runtimeKeyInfo;
diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c
index c7cc76f0a70..534c3a419d1 100644
--- a/src/backend/executor/nodeLimit.c
+++ b/src/backend/executor/nodeLimit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.3 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeLimit.c,v 1.4 2001/03/22 03:59:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,7 +59,7 @@ ExecLimit(Limit *node)
* may not be set until now.)
* ----------------
*/
- if (! limitstate->parmsSet)
+ if (!limitstate->parmsSet)
recompute_limits(node);
netlimit = limitstate->offset + limitstate->count;
@@ -89,7 +89,7 @@ ExecLimit(Limit *node)
{
if (limitstate->atEnd)
return NULL;
- if (! limitstate->noCount && limitstate->position > netlimit)
+ if (!limitstate->noCount && limitstate->position > netlimit)
return NULL;
}
else
@@ -104,13 +104,14 @@ ExecLimit(Limit *node)
slot = ExecProcNode(outerPlan, (Plan *) node);
if (TupIsNull(slot))
{
+
/*
* We are at start or end of the subplan. Update local state
* appropriately, but always return NULL.
*/
if (ScanDirectionIsForward(direction))
{
- Assert(! limitstate->atEnd);
+ Assert(!limitstate->atEnd);
/* must bump position to stay in sync for backwards fetch */
limitstate->position++;
limitstate->atEnd = true;
@@ -122,6 +123,7 @@ ExecLimit(Limit *node)
}
return NULL;
}
+
/*
* We got the next subplan tuple successfully, so adjust state.
*/
@@ -135,7 +137,7 @@ ExecLimit(Limit *node)
limitstate->atEnd = false;
/* ----------------
- * Now, is this a tuple we want? If not, loop around to fetch
+ * Now, is this a tuple we want? If not, loop around to fetch
* another tuple from the subplan.
* ----------------
*/
@@ -185,9 +187,9 @@ recompute_limits(Limit *node)
if (node->limitCount)
{
limitstate->count = DatumGetInt32(ExecEvalExpr(node->limitCount,
- econtext,
- &isNull,
- NULL));
+ econtext,
+ &isNull,
+ NULL));
/* Interpret NULL count as no count (LIMIT ALL) */
if (isNull)
limitstate->noCount = true;
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index dd09c6ac1c1..1f55f852f0e 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.33 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.34 2001/03/22 03:59:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
*
* The first time this is called, ExecMaterial retrieves tuples
* from this node's outer subplan and inserts them into a tuplestore
- * (a temporary tuple storage structure). The first tuple is then
+ * (a temporary tuple storage structure). The first tuple is then
* returned. Successive calls to ExecMaterial return successive
* tuples from the tuplestore.
*
@@ -85,7 +85,7 @@ ExecMaterial(Material *node)
* Initialize tuplestore module.
* ----------------
*/
- tuplestorestate = tuplestore_begin_heap(true, /* randomAccess */
+ tuplestorestate = tuplestore_begin_heap(true, /* randomAccess */
SortMem);
matstate->tuplestorestate = (void *) tuplestorestate;
@@ -250,7 +250,7 @@ ExecEndMaterial(Material *node)
void
ExecMaterialMarkPos(Material *node)
{
- MaterialState *matstate = node->matstate;
+ MaterialState *matstate = node->matstate;
/* ----------------
* if we haven't materialized yet, just return.
@@ -271,7 +271,7 @@ ExecMaterialMarkPos(Material *node)
void
ExecMaterialRestrPos(Material *node)
{
- MaterialState *matstate = node->matstate;
+ MaterialState *matstate = node->matstate;
/* ----------------
* if we haven't materialized yet, just return.
@@ -299,8 +299,8 @@ ExecMaterialReScan(Material *node, ExprContext *exprCtxt, Plan *parent)
MaterialState *matstate = node->matstate;
/*
- * If we haven't materialized yet, just return. If outerplan' chgParam is
- * not NULL then it will be re-scanned by ExecProcNode, else - no
+ * If we haven't materialized yet, just return. If outerplan' chgParam
+ * is not NULL then it will be re-scanned by ExecProcNode, else - no
* reason to re-scan it at all.
*/
if (!matstate->tuplestorestate)
@@ -309,8 +309,8 @@ ExecMaterialReScan(Material *node, ExprContext *exprCtxt, Plan *parent)
ExecClearTuple(matstate->csstate.cstate.cs_ResultTupleSlot);
/*
- * If subnode is to be rescanned then we forget previous stored results;
- * we have to re-read the subplan and re-store.
+ * If subnode is to be rescanned then we forget previous stored
+ * results; we have to re-read the subplan and re-store.
*
* Otherwise we can just rewind and rescan the stored output.
*/
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index fd8868a4a54..e3617c032b0 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.42 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.43 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -169,7 +169,7 @@ MJFormSkipQual(List *qualList, char *replaceopname)
CharGetDatum('b'));
if (!HeapTupleIsValid(optup))
elog(ERROR,
- "MJFormSkipQual: mergejoin operator %u has no matching %s op",
+ "MJFormSkipQual: mergejoin operator %u has no matching %s op",
op->opno, replaceopname);
opform = (Form_pg_operator) GETSTRUCT(optup);
@@ -258,7 +258,7 @@ MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext)
&isNull,
NULL);
- if (! DatumGetBool(const_value) || isNull)
+ if (!DatumGetBool(const_value) || isNull)
break; /* return false */
eqclause = lnext(eqclause);
@@ -439,7 +439,7 @@ ExecMergeJoin(MergeJoin *node)
default:
elog(ERROR, "ExecMergeJoin: unsupported join type %d",
(int) node->join.jointype);
- doFillOuter = false; /* keep compiler quiet */
+ doFillOuter = false;/* keep compiler quiet */
doFillInner = false;
break;
}
@@ -464,7 +464,7 @@ ExecMergeJoin(MergeJoin *node)
if (mergestate->jstate.cs_TupFromTlist)
{
TupleTableSlot *result;
- ExprDoneCond isDone;
+ ExprDoneCond isDone;
result = ExecProject(mergestate->jstate.cs_ProjInfo, &isDone);
if (isDone == ExprMultipleResult)
@@ -475,7 +475,7 @@ ExecMergeJoin(MergeJoin *node)
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a join tuple.
* ----------------
*/
@@ -500,9 +500,9 @@ ExecMergeJoin(MergeJoin *node)
/*
* EXEC_MJ_INITIALIZE means that this is the first time
- * ExecMergeJoin() has been called and so we have to
- * fetch the first tuple for both outer and inner subplans.
- * If we fail to get a tuple here, then that subplan is
+ * ExecMergeJoin() has been called and so we have to fetch
+ * the first tuple for both outer and inner subplans. If
+ * we fail to get a tuple here, then that subplan is
* empty, and we either end the join or go to one of the
* fill-remaining-tuples states.
*/
@@ -516,6 +516,7 @@ ExecMergeJoin(MergeJoin *node)
MJ_printf("ExecMergeJoin: outer subplan is empty\n");
if (doFillInner)
{
+
/*
* Need to emit right-join tuples for remaining
* inner tuples. We set MatchedInner = true to
@@ -536,11 +537,13 @@ ExecMergeJoin(MergeJoin *node)
MJ_printf("ExecMergeJoin: inner subplan is empty\n");
if (doFillOuter)
{
+
/*
- * Need to emit left-join tuples for all outer tuples,
- * including the one we just fetched. We set
- * MatchedOuter = false to force the ENDINNER state
- * to emit this tuple before advancing outer.
+ * Need to emit left-join tuples for all outer
+ * tuples, including the one we just fetched. We
+ * set MatchedOuter = false to force the ENDINNER
+ * state to emit this tuple before advancing
+ * outer.
*/
mergestate->mj_JoinState = EXEC_MJ_ENDINNER;
mergestate->mj_MatchedOuter = false;
@@ -614,17 +617,17 @@ ExecMergeJoin(MergeJoin *node)
/*
* Check the extra qual conditions to see if we actually
- * want to return this join tuple. If not, can proceed with
- * merge. We must distinguish the additional joinquals
- * (which must pass to consider the tuples "matched" for
- * outer-join logic) from the otherquals (which must pass
- * before we actually return the tuple).
+ * want to return this join tuple. If not, can proceed
+ * with merge. We must distinguish the additional
+ * joinquals (which must pass to consider the tuples
+ * "matched" for outer-join logic) from the otherquals
+ * (which must pass before we actually return the tuple).
*
* We don't bother with a ResetExprContext here, on the
- * assumption that we just did one before checking the merge
- * qual. One per tuple should be sufficient. Also, the
- * econtext's tuple pointers were set up before checking
- * the merge qual, so we needn't do it again.
+ * assumption that we just did one before checking the
+ * merge qual. One per tuple should be sufficient. Also,
+ * the econtext's tuple pointers were set up before
+ * checking the merge qual, so we needn't do it again.
*/
qualResult = (joinqual == NIL ||
ExecQual(joinqual, econtext, false));
@@ -677,11 +680,13 @@ ExecMergeJoin(MergeJoin *node)
if (doFillInner && !mergestate->mj_MatchedInner)
{
+
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedInner = true; /* do it only once */
+ mergestate->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -753,11 +758,13 @@ ExecMergeJoin(MergeJoin *node)
if (doFillOuter && !mergestate->mj_MatchedOuter)
{
+
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedOuter = true; /* do it only once */
+ mergestate->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -810,6 +817,7 @@ ExecMergeJoin(MergeJoin *node)
innerTupleSlot = mergestate->mj_InnerTupleSlot;
if (doFillInner && !TupIsNull(innerTupleSlot))
{
+
/*
* Need to emit right-join tuples for remaining
* inner tuples.
@@ -879,19 +887,20 @@ ExecMergeJoin(MergeJoin *node)
{
/*
- * the merge clause matched so now we restore the inner
- * scan position to the first mark, and loop back to
- * JOINTEST. Actually, since we know the mergeclause
- * matches, we can skip JOINTEST and go straight to
- * JOINTUPLES.
+ * the merge clause matched so now we restore the
+ * inner scan position to the first mark, and loop
+ * back to JOINTEST. Actually, since we know the
+ * mergeclause matches, we can skip JOINTEST and go
+ * straight to JOINTUPLES.
*
* NOTE: we do not need to worry about the MatchedInner
* state for the rescanned inner tuples. We know all
- * of them will match this new outer tuple and therefore
- * won't be emitted as fill tuples. This works *only*
- * because we require the extra joinquals to be nil when
- * doing a right or full join --- otherwise some of the
- * rescanned tuples might fail the extra joinquals.
+ * of them will match this new outer tuple and
+ * therefore won't be emitted as fill tuples. This
+ * works *only* because we require the extra joinquals
+ * to be nil when doing a right or full join ---
+ * otherwise some of the rescanned tuples might fail
+ * the extra joinquals.
*/
ExecRestrPos(innerPlan);
mergestate->mj_JoinState = EXEC_MJ_JOINTUPLES;
@@ -918,6 +927,7 @@ ExecMergeJoin(MergeJoin *node)
{
if (doFillOuter)
{
+
/*
* Need to emit left-join tuples for remaining
* outer tuples.
@@ -1044,11 +1054,13 @@ ExecMergeJoin(MergeJoin *node)
if (doFillOuter && !mergestate->mj_MatchedOuter)
{
+
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedOuter = true; /* do it only once */
+ mergestate->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1101,6 +1113,7 @@ ExecMergeJoin(MergeJoin *node)
innerTupleSlot = mergestate->mj_InnerTupleSlot;
if (doFillInner && !TupIsNull(innerTupleSlot))
{
+
/*
* Need to emit right-join tuples for remaining
* inner tuples.
@@ -1229,11 +1242,13 @@ ExecMergeJoin(MergeJoin *node)
if (doFillInner && !mergestate->mj_MatchedInner)
{
+
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedInner = true; /* do it only once */
+ mergestate->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -1286,6 +1301,7 @@ ExecMergeJoin(MergeJoin *node)
outerTupleSlot = mergestate->mj_OuterTupleSlot;
if (doFillOuter && !TupIsNull(outerTupleSlot))
{
+
/*
* Need to emit left-join tuples for remaining
* outer tuples.
@@ -1306,8 +1322,8 @@ ExecMergeJoin(MergeJoin *node)
/*
* EXEC_MJ_ENDOUTER means we have run out of outer tuples,
- * but are doing a right/full join and therefore must null-
- * fill any remaing unmatched inner tuples.
+ * but are doing a right/full join and therefore must
+ * null- fill any remaing unmatched inner tuples.
*/
case EXEC_MJ_ENDOUTER:
MJ_printf("ExecMergeJoin: EXEC_MJ_ENDOUTER\n");
@@ -1316,11 +1332,13 @@ ExecMergeJoin(MergeJoin *node)
if (!mergestate->mj_MatchedInner)
{
+
/*
* Generate a fake join tuple with nulls for the outer
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedInner = true; /* do it only once */
+ mergestate->mj_MatchedInner = true; /* do it only once */
ResetExprContext(econtext);
@@ -1383,11 +1401,13 @@ ExecMergeJoin(MergeJoin *node)
if (!mergestate->mj_MatchedOuter)
{
+
/*
* Generate a fake join tuple with nulls for the inner
- * tuple, and return it if it passes the non-join quals.
+ * tuple, and return it if it passes the non-join
+ * quals.
*/
- mergestate->mj_MatchedOuter = true; /* do it only once */
+ mergestate->mj_MatchedOuter = true; /* do it only once */
ResetExprContext(econtext);
@@ -1515,14 +1535,16 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent)
case JOIN_LEFT:
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(innerPlan((Plan*) node)));
+ ExecGetTupType(innerPlan((Plan *) node)));
break;
case JOIN_RIGHT:
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(outerPlan((Plan*) node)));
+ ExecGetTupType(outerPlan((Plan *) node)));
+
/*
- * Can't handle right or full join with non-nil extra joinclauses.
+ * Can't handle right or full join with non-nil extra
+ * joinclauses.
*/
if (node->join.joinqual != NIL)
elog(ERROR, "RIGHT JOIN is only supported with mergejoinable join conditions");
@@ -1530,12 +1552,14 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent)
case JOIN_FULL:
mergestate->mj_NullOuterTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(outerPlan((Plan*) node)));
+ ExecGetTupType(outerPlan((Plan *) node)));
mergestate->mj_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(innerPlan((Plan*) node)));
+ ExecGetTupType(innerPlan((Plan *) node)));
+
/*
- * Can't handle right or full join with non-nil extra joinclauses.
+ * Can't handle right or full join with non-nil extra
+ * joinclauses.
*/
if (node->join.joinqual != NIL)
elog(ERROR, "FULL JOIN is only supported with mergejoinable join conditions");
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index f514b03851f..9c01ee4a1fb 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.22 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.23 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,7 +97,7 @@ ExecNestLoop(NestLoop *node)
if (nlstate->jstate.cs_TupFromTlist)
{
TupleTableSlot *result;
- ExprDoneCond isDone;
+ ExprDoneCond isDone;
result = ExecProject(nlstate->jstate.cs_ProjInfo, &isDone);
if (isDone == ExprMultipleResult)
@@ -108,7 +108,7 @@ ExecNestLoop(NestLoop *node)
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a join tuple.
* ----------------
*/
@@ -179,14 +179,15 @@ ExecNestLoop(NestLoop *node)
nlstate->nl_NeedNewOuter = true;
- if (! nlstate->nl_MatchedOuter &&
+ if (!nlstate->nl_MatchedOuter &&
node->join.jointype == JOIN_LEFT)
{
+
/*
- * We are doing an outer join and there were no join matches
- * for this outer tuple. Generate a fake join tuple with
- * nulls for the inner tuple, and return it if it passes
- * the non-join quals.
+ * We are doing an outer join and there were no join
+ * matches for this outer tuple. Generate a fake join
+ * tuple with nulls for the inner tuple, and return it if
+ * it passes the non-join quals.
*/
econtext->ecxt_innertuple = nlstate->nl_NullInnerTupleSlot;
@@ -215,6 +216,7 @@ ExecNestLoop(NestLoop *node)
}
}
}
+
/*
* Otherwise just return to top of loop for a new outer tuple.
*/
@@ -328,7 +330,7 @@ ExecInitNestLoop(NestLoop *node, EState *estate, Plan *parent)
case JOIN_LEFT:
nlstate->nl_NullInnerTupleSlot =
ExecInitNullTupleSlot(estate,
- ExecGetTupType(innerPlan((Plan*) node)));
+ ExecGetTupType(innerPlan((Plan *) node)));
break;
default:
elog(ERROR, "ExecInitNestLoop: unsupported join type %d",
diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c
index fa401e20b24..863d4a4a56e 100644
--- a/src/backend/executor/nodeResult.c
+++ b/src/backend/executor/nodeResult.c
@@ -34,7 +34,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.17 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.18 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -111,7 +111,7 @@ ExecResult(Result *node)
/* ----------------
* Reset per-tuple memory context to free any expression evaluation
- * storage allocated in the previous tuple cycle. Note this can't
+ * storage allocated in the previous tuple cycle. Note this can't
* happen until we're done projecting out tuples from a scan tuple.
* ----------------
*/
diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c
index a39128ff2f0..d879cee7a75 100644
--- a/src/backend/executor/nodeSeqscan.c
+++ b/src/backend/executor/nodeSeqscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.27 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.28 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
#include "parser/parsetree.h"
static Oid InitScanRelation(SeqScan *node, EState *estate,
- CommonScanState *scanstate);
+ CommonScanState *scanstate);
static TupleTableSlot *SeqNext(SeqScan *node);
/* ----------------------------------------------------------------
@@ -174,8 +174,8 @@ InitScanRelation(SeqScan *node, EState *estate,
0, /* is index */
direction, /* scan direction */
estate->es_snapshot,
- &currentRelation, /* return: rel desc */
- (Pointer *) &currentScanDesc); /* return: scan desc */
+ &currentRelation, /* return: rel desc */
+ (Pointer *) &currentScanDesc); /* return: scan desc */
scanstate->css_currentRelation = currentRelation;
scanstate->css_currentScanDesc = currentScanDesc;
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index ad35a46d636..00c79992039 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -5,7 +5,7 @@
*
* The input of a SetOp node consists of tuples from two relations,
* which have been combined into one dataset and sorted on all the nonjunk
- * attributes. In addition there is a junk attribute that shows which
+ * attributes. In addition there is a junk attribute that shows which
* relation each tuple came from. The SetOp node scans each group of
* identical tuples to determine how many came from each input relation.
* Then it is a simple matter to emit the output demanded by the SQL spec
@@ -21,7 +21,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSetOp.c,v 1.2 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSetOp.c,v 1.3 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ ExecSetOp(SetOp *node)
* ----------------
*/
if (setopstate->cstate.cs_OuterTupleSlot == NULL &&
- ! setopstate->subplan_done)
+ !setopstate->subplan_done)
{
setopstate->cstate.cs_OuterTupleSlot =
ExecProcNode(outerPlan, (Plan *) node);
@@ -104,6 +104,7 @@ ExecSetOp(SetOp *node)
if (TupIsNull(resultTupleSlot))
{
+
/*
* First of group: save a copy in result slot, and reset
* duplicate-counters for new group.
@@ -113,13 +114,15 @@ ExecSetOp(SetOp *node)
ExecStoreTuple(heap_copytuple(inputTupleSlot->val),
resultTupleSlot,
InvalidBuffer,
- true); /* free copied tuple at ExecClearTuple */
+ true); /* free copied tuple at
+ * ExecClearTuple */
setopstate->numLeft = 0;
setopstate->numRight = 0;
endOfGroup = false;
}
else if (setopstate->subplan_done)
{
+
/*
* Reached end of input, so finish processing final group
*/
@@ -127,8 +130,10 @@ ExecSetOp(SetOp *node)
}
else
{
+
/*
- * Else test if the new tuple and the previously saved tuple match.
+ * Else test if the new tuple and the previously saved tuple
+ * match.
*/
if (execTuplesMatch(inputTupleSlot->val,
resultTupleSlot->val,
@@ -143,6 +148,7 @@ ExecSetOp(SetOp *node)
if (endOfGroup)
{
+
/*
* We've reached the end of the group containing resultTuple.
* Decide how many copies (if any) to emit. This logic is
@@ -185,12 +191,13 @@ ExecSetOp(SetOp *node)
}
else
{
+
/*
- * Current tuple is member of same group as resultTuple.
- * Count it in the appropriate counter.
+ * Current tuple is member of same group as resultTuple. Count
+ * it in the appropriate counter.
*/
- int flag;
- bool isNull;
+ int flag;
+ bool isNull;
flag = DatumGetInt32(heap_getattr(inputTupleSlot->val,
node->flagColIdx,
@@ -207,8 +214,8 @@ ExecSetOp(SetOp *node)
}
/*
- * If we fall out of loop, then we need to emit at least one copy
- * of resultTuple.
+ * If we fall out of loop, then we need to emit at least one copy of
+ * resultTuple.
*/
Assert(setopstate->numOutput > 0);
setopstate->numOutput--;
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index d75d9a6f052..a8df4940ae4 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.29 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubplan.c,v 1.30 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,7 +102,7 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
{
HeapTuple tup = slot->val;
TupleDesc tdesc = slot->ttc_tupleDescriptor;
- Datum rowresult = BoolGetDatum(! useor);
+ Datum rowresult = BoolGetDatum(!useor);
bool rownull = false;
int col = 1;
@@ -213,7 +213,7 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
/* combine within row per AND semantics */
if (expnull)
rownull = true;
- else if (! DatumGetBool(expresult))
+ else if (!DatumGetBool(expresult))
{
rowresult = BoolGetDatum(false);
rownull = false;
@@ -240,7 +240,7 @@ ExecSubPlan(SubPlan *node, List *pvar, ExprContext *econtext, bool *isNull)
/* combine across rows per AND semantics */
if (rownull)
*isNull = true;
- else if (! DatumGetBool(rowresult))
+ else if (!DatumGetBool(rowresult))
{
result = BoolGetDatum(false);
*isNull = false;
@@ -332,7 +332,7 @@ ExecInitSubPlan(SubPlan *node, EState *estate, Plan *parent)
*
* This is called from ExecEvalParam() when the value of a PARAM_EXEC
* parameter is requested and the param's execPlan field is set (indicating
- * that the param has not yet been evaluated). This allows lazy evaluation
+ * that the param has not yet been evaluated). This allows lazy evaluation
* of initplans: we don't run the subplan until/unless we need its output.
* Note that this routine MUST clear the execPlan fields of the plan's
* output parameters after evaluating them!
diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c
index 4c9144bc3a8..9b8711c9914 100644
--- a/src/backend/executor/nodeSubqueryscan.c
+++ b/src/backend/executor/nodeSubqueryscan.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.4 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.5 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,8 +67,8 @@ SubqueryNext(SubqueryScan *node)
/*
* Check if we are evaluating PlanQual for tuple of this relation.
* Additional checking is not good, but no other way for now. We could
- * introduce new nodes for this case and handle SubqueryScan --> NewNode
- * switching in Init/ReScan plan...
+ * introduce new nodes for this case and handle SubqueryScan -->
+ * NewNode switching in Init/ReScan plan...
*/
if (estate->es_evTuple != NULL &&
estate->es_evTuple[node->scan.scanrelid - 1] != NULL)
@@ -202,6 +202,7 @@ ExecInitSubqueryScan(SubqueryScan *node, EState *estate, Plan *parent)
int
ExecCountSlotsSubqueryScan(SubqueryScan *node)
{
+
/*
* The subplan has its own tuple table and must not be counted here!
*/
diff --git a/src/backend/executor/nodeTidscan.c b/src/backend/executor/nodeTidscan.c
index a5c0299d289..04c9efc4b0a 100644
--- a/src/backend/executor/nodeTidscan.c
+++ b/src/backend/executor/nodeTidscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.14 2001/01/29 00:39:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeTidscan.c,v 1.15 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -371,6 +371,7 @@ ExecTidRestrPos(TidScan *node)
tidstate = node->tidstate;
tidstate->tss_TidPtr = tidstate->tss_MarkTidPtr;
}
+
#endif
/* ----------------------------------------------------------------
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 4c4da6c3034..4aa8c475c30 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -3,17 +3,17 @@
* spi.c
* Server Programming Interface
*
- * $Id: spi.c,v 1.52 2001/02/19 19:49:52 tgl Exp $
+ * $Id: spi.c,v 1.53 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "executor/spi_priv.h"
#include "access/printtup.h"
-uint32 SPI_processed = 0;
-Oid SPI_lastoid = InvalidOid;
+uint32 SPI_processed = 0;
+Oid SPI_lastoid = InvalidOid;
SPITupleTable *SPI_tuptable = NULL;
-int SPI_result;
+int SPI_result;
static _SPI_connection *_SPI_stack = NULL;
static _SPI_connection *_SPI_current = NULL;
@@ -46,6 +46,7 @@ extern void ShowUsage(void);
int
SPI_connect()
{
+
/*
* When procedure called by Executor _SPI_curid expected to be equal
* to _SPI_connected
@@ -80,14 +81,14 @@ SPI_connect()
/* Create memory contexts for this procedure */
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Proc",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
_SPI_current->execCxt = AllocSetContextCreate(TopTransactionContext,
"SPI Exec",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
/* ... and switch to procedure's context */
_SPI_current->savedcxt = MemoryContextSwitchTo(_SPI_current->procCxt);
@@ -146,6 +147,7 @@ SPI_finish()
void
AtEOXact_SPI(void)
{
+
/*
* Note that memory contexts belonging to SPI stack entries will be
* freed automatically, so we can ignore them here. We just need to
@@ -425,8 +427,8 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
}
/*
- * If we have a toasted datum, forcibly detoast it here to avoid memory
- * leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (typisvarlena)
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
@@ -436,7 +438,7 @@ SPI_getvalue(HeapTuple tuple, TupleDesc tupdesc, int fnumber)
result = OidFunctionCall3(foutoid,
val,
ObjectIdGetDatum(typelem),
- Int32GetDatum(tupdesc->attrs[fnumber - 1]->atttypmod));
+ Int32GetDatum(tupdesc->attrs[fnumber - 1]->atttypmod));
/* Clean up detoasted copy, if any */
if (val != origval)
@@ -833,14 +835,13 @@ _SPI_pquery(QueryDesc *queryDesc, EState *state, int tcount)
#endif
tupdesc = ExecutorStart(queryDesc, state);
- /* Don't work currently --- need to rearrange callers so that
- * we prepare the portal before doing CreateExecutorState() etc.
- * See pquery.c for the correct order of operations.
+ /*
+ * Don't work currently --- need to rearrange callers so that we
+ * prepare the portal before doing CreateExecutorState() etc. See
+ * pquery.c for the correct order of operations.
*/
if (isRetrieveIntoPortal)
- {
elog(FATAL, "SPI_select: retrieve into portal not implemented");
- }
ExecutorRun(queryDesc, state, EXEC_FOR, (long) tcount);
@@ -901,9 +902,7 @@ _SPI_begin_call(bool execmem)
elog(FATAL, "SPI: stack corrupted");
if (execmem) /* switch to the Executor memory context */
- {
_SPI_execmem();
- }
return 0;
}
diff --git a/src/backend/lib/bit.c b/src/backend/lib/bit.c
index 1b54292a464..57da3522c46 100644
--- a/src/backend/lib/bit.c
+++ b/src/backend/lib/bit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/lib/Attic/bit.c,v 1.12 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/lib/Attic/bit.c,v 1.13 2001/03/22 03:59:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,14 +22,14 @@ void
BitArraySetBit(BitArray bitArray, BitIndex bitIndex)
{
bitArray[bitIndex / BITS_PER_BYTE] |=
- (1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)));
+ (1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)));
}
void
BitArrayClearBit(BitArray bitArray, BitIndex bitIndex)
{
bitArray[bitIndex / BITS_PER_BYTE] &=
- ~(1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)));
+ ~(1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)));
}
bool
@@ -37,5 +37,5 @@ BitArrayBitIsSet(BitArray bitArray, BitIndex bitIndex)
{
return ((bitArray[bitIndex / BITS_PER_BYTE] &
(1 << (BITS_PER_BYTE - 1 - (bitIndex % BITS_PER_BYTE)))
- ) != 0);
+ ) != 0);
}
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index d91fa9a8220..dcb702e9596 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.51 2001/01/24 19:42:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.52 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ static int map_old_to_new(Port *port, UserAuth old, int status);
static void auth_failed(Port *port);
-char * pg_krb_server_keyfile;
+char *pg_krb_server_keyfile;
#ifdef KRB4
@@ -177,7 +177,7 @@ pg_an_to_ln(char *aname)
* Various krb5 state which is not connection specfic, and a flag to
* indicate whether we have initialised it yet.
*/
-static int pg_krb5_initialised;
+static int pg_krb5_initialised;
static krb5_context pg_krb5_context;
static krb5_keytab pg_krb5_keytab;
static krb5_principal pg_krb5_server;
@@ -192,7 +192,8 @@ pg_krb5_init(void)
return STATUS_OK;
retval = krb5_init_context(&pg_krb5_context);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_init: krb5_init_context returned"
" Kerberos error %d\n", retval);
@@ -201,23 +202,25 @@ pg_krb5_init(void)
}
retval = krb5_kt_resolve(pg_krb5_context, pg_krb_server_keyfile, &pg_krb5_keytab);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_init: krb5_kt_resolve returned"
" Kerberos error %d\n", retval);
- com_err("postgres", retval, "while resolving keytab file %s",
+ com_err("postgres", retval, "while resolving keytab file %s",
pg_krb_server_keyfile);
krb5_free_context(pg_krb5_context);
return STATUS_ERROR;
}
- retval = krb5_sname_to_principal(pg_krb5_context, NULL, PG_KRB_SRVNAM,
+ retval = krb5_sname_to_principal(pg_krb5_context, NULL, PG_KRB_SRVNAM,
KRB5_NT_SRV_HST, &pg_krb5_server);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_init: krb5_sname_to_principal returned"
" Kerberos error %d\n", retval);
- com_err("postgres", retval,
+ com_err("postgres", retval,
"while getting server principal for service %s",
pg_krb_server_keyfile);
krb5_kt_close(pg_krb5_context, pg_krb5_keytab);
@@ -245,25 +248,26 @@ static int
pg_krb5_recvauth(Port *port)
{
krb5_error_code retval;
- int ret;
+ int ret;
krb5_auth_context auth_context = NULL;
krb5_ticket *ticket;
- char *kusername;
+ char *kusername;
ret = pg_krb5_init();
if (ret != STATUS_OK)
return ret;
retval = krb5_recvauth(pg_krb5_context, &auth_context,
- (krb5_pointer)&port->sock, PG_KRB_SRVNAM,
+ (krb5_pointer) & port->sock, PG_KRB_SRVNAM,
pg_krb5_server, 0, pg_krb5_keytab, &ticket);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_recvauth: krb5_recvauth returned"
" Kerberos error %d\n", retval);
- com_err("postgres", retval, "from krb5_recvauth");
+ com_err("postgres", retval, "from krb5_recvauth");
return STATUS_ERROR;
- }
+ }
/*
* The "client" structure comes out of the ticket and is therefore
@@ -272,13 +276,14 @@ pg_krb5_recvauth(Port *port)
*
* I have no idea why this is considered necessary.
*/
- retval = krb5_unparse_name(pg_krb5_context,
+ retval = krb5_unparse_name(pg_krb5_context,
ticket->enc_part2->client, &kusername);
- if (retval) {
+ if (retval)
+ {
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
"pg_krb5_recvauth: krb5_unparse_name returned"
" Kerberos error %d\n", retval);
- com_err("postgres", retval, "while unparsing client name");
+ com_err("postgres", retval, "while unparsing client name");
krb5_free_ticket(pg_krb5_context, ticket);
krb5_auth_con_free(pg_krb5_context, auth_context);
return STATUS_ERROR;
@@ -288,13 +293,13 @@ pg_krb5_recvauth(Port *port)
if (strncmp(port->user, kusername, SM_USER))
{
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- "pg_krb5_recvauth: user name \"%s\" != krb5 name \"%s\"\n",
+ "pg_krb5_recvauth: user name \"%s\" != krb5 name \"%s\"\n",
port->user, kusername);
ret = STATUS_ERROR;
}
else
ret = STATUS_OK;
-
+
krb5_free_ticket(pg_krb5_context, ticket);
krb5_auth_con_free(pg_krb5_context, auth_context);
free(kusername);
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 1d5870e93f4..4d50ee1ae12 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.57 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.58 2001/03/22 03:59:30 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
@@ -60,7 +60,7 @@
* entries, of which any unused entries will be NULL.
*/
static LargeObjectDesc **cookies = NULL;
-static int cookies_size = 0;
+static int cookies_size = 0;
static MemoryContext fscxt = NULL;
@@ -329,10 +329,10 @@ loread(PG_FUNCTION_ARGS)
Datum
lowrite(PG_FUNCTION_ARGS)
{
- int32 fd = PG_GETARG_INT32(0);
+ int32 fd = PG_GETARG_INT32(0);
struct varlena *wbuf = PG_GETARG_VARLENA_P(1);
- int bytestowrite;
- int totalwritten;
+ int bytestowrite;
+ int totalwritten;
bytestowrite = VARSIZE(wbuf) - VARHDRSZ;
totalwritten = lo_write(fd, VARDATA(wbuf), bytestowrite);
@@ -371,7 +371,7 @@ lo_import(PG_FUNCTION_ARGS)
*/
nbytes = VARSIZE(filename) - VARHDRSZ;
if (nbytes >= MAXPGPATH)
- nbytes = MAXPGPATH-1;
+ nbytes = MAXPGPATH - 1;
memcpy(fnamebuf, VARDATA(filename), nbytes);
fnamebuf[nbytes] = '\0';
fd = PathNameOpenFile(fnamebuf, O_RDONLY | PG_BINARY, 0666);
@@ -445,7 +445,7 @@ lo_export(PG_FUNCTION_ARGS)
*/
nbytes = VARSIZE(filename) - VARHDRSZ;
if (nbytes >= MAXPGPATH)
- nbytes = MAXPGPATH-1;
+ nbytes = MAXPGPATH - 1;
memcpy(fnamebuf, VARDATA(filename), nbytes);
fnamebuf[nbytes] = '\0';
oumask = umask((mode_t) 0022);
diff --git a/src/backend/libpq/crypt.c b/src/backend/libpq/crypt.c
index 325056ab1ef..59aef0d514d 100644
--- a/src/backend/libpq/crypt.c
+++ b/src/backend/libpq/crypt.c
@@ -9,7 +9,7 @@
* Dec 17, 1997 - Todd A. Brandys
* Orignal Version Completed.
*
- * $Id: crypt.c,v 1.30 2001/02/07 23:31:38 tgl Exp $
+ * $Id: crypt.c,v 1.31 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -295,7 +295,7 @@ crypt_verify(const Port *port, const char *user, const char *pgpass)
vuntil = INVALID_ABSTIME;
else
vuntil = DatumGetAbsoluteTime(DirectFunctionCall1(nabstimein,
- CStringGetDatum(valuntil)));
+ CStringGetDatum(valuntil)));
current = GetCurrentAbsoluteTime();
if (vuntil != INVALID_ABSTIME && vuntil < current)
retval = STATUS_ERROR;
diff --git a/src/backend/libpq/password.c b/src/backend/libpq/password.c
index 856c3028800..77b09be18a4 100644
--- a/src/backend/libpq/password.c
+++ b/src/backend/libpq/password.c
@@ -2,7 +2,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: password.c,v 1.35 2001/01/24 19:42:56 momjian Exp $
+ * $Id: password.c,v 1.36 2001/03/22 03:59:30 momjian Exp $
*
*/
@@ -37,7 +37,7 @@ verify_password(const Port *port, const char *user, const char *password)
if (!pw_file)
{
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- "verify_password: Unable to open password file \"%s\": %s\n",
+ "verify_password: Unable to open password file \"%s\": %s\n",
pw_file_fullname, strerror(errno));
fputs(PQerrormsg, stderr);
pqdebug("%s", PQerrormsg);
@@ -77,12 +77,12 @@ verify_password(const Port *port, const char *user, const char *password)
/*
* If the password is empty of "+" then we use the regular
- * pg_shadow passwords. If we use crypt then we have to
- * use pg_shadow passwords no matter what.
+ * pg_shadow passwords. If we use crypt then we have to use
+ * pg_shadow passwords no matter what.
*/
if (port->auth_method == uaCrypt
|| test_pw == NULL || test_pw[0] == '\0'
- || strcmp(test_pw, "+")==0)
+ || strcmp(test_pw, "+") == 0)
return crypt_verify(port, user, password);
if (strcmp(crypt(password, test_pw), test_pw) == 0)
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 7a20d66f7e1..e3250862363 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -29,7 +29,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: pqcomm.c,v 1.116 2001/01/24 19:42:56 momjian Exp $
+ * $Id: pqcomm.c,v 1.117 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -71,7 +71,7 @@
#include <netdb.h>
#include <netinet/in.h>
#ifdef HAVE_NETINET_TCP_H
-# include <netinet/tcp.h>
+#include <netinet/tcp.h>
#endif
#include <arpa/inet.h>
#include <sys/file.h>
@@ -91,8 +91,8 @@ static void pq_close(void);
/*
* Configuration options
*/
-int Unix_socket_permissions;
-char * Unix_socket_group;
+int Unix_socket_permissions;
+char *Unix_socket_group;
/*
@@ -223,47 +223,49 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
UNIXSOCK_PATH(saddr.un, portNumber, unixSocketName);
len = UNIXSOCK_LEN(saddr.un);
strcpy(sock_path, saddr.un.sun_path);
+
/*
* Grab an interlock file associated with the socket file.
*/
- if (! CreateSocketLockFile(sock_path, true))
+ if (!CreateSocketLockFile(sock_path, true))
return STATUS_ERROR;
+
/*
- * Once we have the interlock, we can safely delete any pre-existing
- * socket file to avoid failure at bind() time.
+ * Once we have the interlock, we can safely delete any
+ * pre-existing socket file to avoid failure at bind() time.
*/
unlink(sock_path);
}
-#endif /* HAVE_UNIX_SOCKETS */
+#endif /* HAVE_UNIX_SOCKETS */
- if (family == AF_INET)
- {
+ if (family == AF_INET)
+ {
/* TCP/IP socket */
if (hostName[0] == '\0')
- saddr.in.sin_addr.s_addr = htonl(INADDR_ANY);
+ saddr.in.sin_addr.s_addr = htonl(INADDR_ANY);
else
- {
+ {
struct hostent *hp;
-
+
hp = gethostbyname(hostName);
if ((hp == NULL) || (hp->h_addrtype != AF_INET))
{
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
- "FATAL: StreamServerPort: gethostbyname(%s) failed\n",
- hostName);
- fputs(PQerrormsg, stderr);
- pqdebug("%s", PQerrormsg);
+ "FATAL: StreamServerPort: gethostbyname(%s) failed\n",
+ hostName);
+ fputs(PQerrormsg, stderr);
+ pqdebug("%s", PQerrormsg);
return STATUS_ERROR;
}
memmove((char *) &(saddr.in.sin_addr), (char *) hp->h_addr,
hp->h_length);
}
-
+
saddr.in.sin_port = htons(portNumber);
len = sizeof(struct sockaddr_in);
}
- err = bind(fd, (struct sockaddr *)&saddr.sa, len);
+ err = bind(fd, (struct sockaddr *) & saddr.sa, len);
if (err < 0)
{
snprintf(PQerrormsg, PQERRORMSG_LENGTH,
@@ -291,16 +293,16 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
on_proc_exit(StreamDoUnlink, 0);
/*
- * Fix socket ownership/permission if requested. Note we must
- * do this before we listen() to avoid a window where unwanted
+ * Fix socket ownership/permission if requested. Note we must do
+ * this before we listen() to avoid a window where unwanted
* connections could get accepted.
*/
Assert(Unix_socket_group);
if (Unix_socket_group[0] != '\0')
{
- char *endptr;
+ char *endptr;
unsigned long int val;
- gid_t gid;
+ gid_t gid;
val = strtoul(Unix_socket_group, &endptr, 10);
if (*endptr == '\0')
@@ -346,7 +348,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber,
return STATUS_ERROR;
}
}
-#endif /* HAVE_UNIX_SOCKETS */
+#endif /* HAVE_UNIX_SOCKETS */
listen(fd, SOMAXCONN);
@@ -385,9 +387,10 @@ StreamConnection(int server_fd, Port *port)
}
#ifdef SCO_ACCEPT_BUG
+
/*
- * UnixWare 7+ and OpenServer 5.0.4 are known to have this bug,
- * but it shouldn't hurt it catch if for all of them.
+ * UnixWare 7+ and OpenServer 5.0.4 are known to have this bug, but it
+ * shouldn't hurt it catch if for all of them.
*/
if (port->raddr.sa.sa_family == 0)
port->raddr.sa.sa_family = AF_UNIX;
diff --git a/src/backend/libpq/pqpacket.c b/src/backend/libpq/pqpacket.c
index 5e99b148f8e..5f9d3cdb48f 100644
--- a/src/backend/libpq/pqpacket.c
+++ b/src/backend/libpq/pqpacket.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/Attic/pqpacket.c,v 1.28 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/Attic/pqpacket.c,v 1.29 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,8 +64,8 @@ PacketReceiveFragment(Port *port)
#ifndef __BEOS__
got = read(port->sock, pkt->ptr, pkt->nrtodo);
#else
- got = recv(port->sock, pkt->ptr, pkt->nrtodo, 0);
-#endif /* __BEOS__ */
+ got = recv(port->sock, pkt->ptr, pkt->nrtodo, 0);
+#endif /* __BEOS__ */
if (got > 0)
{
pkt->nrtodo -= got;
diff --git a/src/backend/libpq/pqsignal.c b/src/backend/libpq/pqsignal.c
index 668d5f996dd..8cc8f140ace 100644
--- a/src/backend/libpq/pqsignal.c
+++ b/src/backend/libpq/pqsignal.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/pqsignal.c,v 1.19 2001/02/10 02:31:26 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/pqsignal.c,v 1.20 2001/03/22 03:59:30 momjian Exp $
*
* NOTES
* This shouldn't be in libpq, but the monitor and some other
@@ -61,10 +61,11 @@ pqinitmask(void)
#ifdef HAVE_SIGPROCMASK
sigemptyset(&UnBlockSig);
sigfillset(&BlockSig);
+
/*
- * Unmark those signals that should never be blocked.
- * Some of these signal names don't exist on all platforms. Most do,
- * but might as well ifdef them all for consistency...
+ * Unmark those signals that should never be blocked. Some of these
+ * signal names don't exist on all platforms. Most do, but might as
+ * well ifdef them all for consistency...
*/
#ifdef SIGTRAP
sigdelset(&BlockSig, SIGTRAP);
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index b8c6cffdf64..46e03d8e0ab 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.41 2001/02/06 17:00:01 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.42 2001/03/22 03:59:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -53,17 +53,17 @@ main(int argc, char *argv[])
{
int len;
struct passwd *pw;
- char * pw_name_persist;
+ char *pw_name_persist;
/*
- * Place platform-specific startup hacks here. This is the right
- * place to put code that must be executed early in launch of either
- * a postmaster, a standalone backend, or a standalone bootstrap run.
+ * Place platform-specific startup hacks here. This is the right
+ * place to put code that must be executed early in launch of either a
+ * postmaster, a standalone backend, or a standalone bootstrap run.
* Note that this code will NOT be executed when a backend or
* sub-bootstrap run is forked by the postmaster.
*
- * XXX The need for code here is proof that the platform in question
- * is too brain-dead to provide a standard C execution environment
+ * XXX The need for code here is proof that the platform in question is
+ * too brain-dead to provide a standard C execution environment
* without help. Avoid adding more here, if you can.
*/
@@ -76,7 +76,7 @@ main(int argc, char *argv[])
int buffer[] = {SSIN_UACPROC, UAC_NOPRINT};
#endif /* NOPRINTADE */
-#endif /* __alpha */
+#endif /* __alpha */
#if defined(NOFIXADE) || defined(NOPRINTADE)
@@ -93,38 +93,39 @@ main(int argc, char *argv[])
#endif /* NOFIXADE || NOPRINTADE */
#ifdef __BEOS__
- /* BeOS-specific actions on startup */
- beos_startup(argc,argv);
+ /* BeOS-specific actions on startup */
+ beos_startup(argc, argv);
#endif
/*
- * Not-quite-so-platform-specific startup environment checks.
- * Still best to minimize these.
+ * Not-quite-so-platform-specific startup environment checks. Still
+ * best to minimize these.
*/
/*
* Make sure we are not running as root.
*
- * BeOS currently runs everything as root :-(, so this check must
- * be temporarily disabled there...
- */
+ * BeOS currently runs everything as root :-(, so this check must be
+ * temporarily disabled there...
+ */
#ifndef __BEOS__
if (!(argc > 1
- && ( strcmp(argv[1], "--help")==0 || strcmp(argv[1], "-?")==0
- || strcmp(argv[1], "--version")==0 || strcmp(argv[1], "-V")==0 ))
- && (geteuid() == 0) )
+ && (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0
+ || strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0))
+ && (geteuid() == 0))
{
fprintf(stderr, "%s", NOROOTEXEC);
exit(1);
}
-#endif /* __BEOS__ */
+#endif /* __BEOS__ */
/*
* Set up locale information from environment, in only the categories
* needed by Postgres; leave other categories set to default "C".
- * (Note that CTYPE and COLLATE will be overridden later from pg_control
- * if we are in an already-initialized database. We set them here so
- * that they will be available to fill pg_control during initdb.)
+ * (Note that CTYPE and COLLATE will be overridden later from
+ * pg_control if we are in an already-initialized database. We set
+ * them here so that they will be available to fill pg_control during
+ * initdb.)
*/
#ifdef USE_LOCALE
setlocale(LC_CTYPE, "");
@@ -133,9 +134,10 @@ main(int argc, char *argv[])
#endif
/*
- * Now dispatch to one of PostmasterMain, PostgresMain, or BootstrapMain
- * depending on the program name (and possibly first argument) we
- * were called with. The lack of consistency here is historical.
+ * Now dispatch to one of PostmasterMain, PostgresMain, or
+ * BootstrapMain depending on the program name (and possibly first
+ * argument) we were called with. The lack of consistency here is
+ * historical.
*/
len = strlen(argv[0]);
@@ -146,15 +148,16 @@ main(int argc, char *argv[])
}
/*
- * If the first argument is "-boot", then invoke bootstrap mode.
- * Note we remove "-boot" from the arguments passed on to BootstrapMain.
+ * If the first argument is "-boot", then invoke bootstrap mode. Note
+ * we remove "-boot" from the arguments passed on to BootstrapMain.
*/
if (argc > 1 && strcmp(argv[1], "-boot") == 0)
exit(BootstrapMain(argc - 1, argv + 1));
/*
* Otherwise we're a standalone backend. Invoke PostgresMain,
- * specifying current userid as the "authenticated" Postgres user name.
+ * specifying current userid as the "authenticated" Postgres user
+ * name.
*/
pw = getpwuid(geteuid());
if (pw == NULL)
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index f7127443082..3dc2bf0373a 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -6,7 +6,7 @@
* NOTE: a general convention when copying or comparing plan nodes is
* that we ignore the executor state subnode. We do not need to look
* at it because no current uses of copyObject() or equal() need to
- * deal with already-executing plan trees. By leaving the state subnodes
+ * deal with already-executing plan trees. By leaving the state subnodes
* out, we avoid needing to write copy/compare routines for all the
* different executor state node types.
*
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.138 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.139 2001/03/22 03:59:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -305,7 +305,7 @@ _copyTidScan(TidScan *from)
static SubqueryScan *
_copySubqueryScan(SubqueryScan *from)
{
- SubqueryScan *newnode = makeNode(SubqueryScan);
+ SubqueryScan *newnode = makeNode(SubqueryScan);
/* ----------------
* copy node superclass fields
@@ -339,7 +339,7 @@ CopyJoinFields(Join *from, Join *newnode)
/* subPlan list must point to subplans in the new subtree, not the old */
if (from->plan.subPlan != NIL)
newnode->plan.subPlan = nconc(newnode->plan.subPlan,
- pull_subplans((Node *) newnode->joinqual));
+ pull_subplans((Node *) newnode->joinqual));
}
@@ -991,7 +991,7 @@ _copyRangeTblRef(RangeTblRef *from)
static FromExpr *
_copyFromExpr(FromExpr *from)
{
- FromExpr *newnode = makeNode(FromExpr);
+ FromExpr *newnode = makeNode(FromExpr);
Node_Copy(from, newnode, fromlist);
Node_Copy(from, newnode, quals);
@@ -1002,7 +1002,7 @@ _copyFromExpr(FromExpr *from)
static JoinExpr *
_copyJoinExpr(JoinExpr *from)
{
- JoinExpr *newnode = makeNode(JoinExpr);
+ JoinExpr *newnode = makeNode(JoinExpr);
newnode->jointype = from->jointype;
newnode->isNatural = from->isNatural;
@@ -1281,7 +1281,7 @@ _copyTidPath(TidPath *from)
static AppendPath *
_copyAppendPath(AppendPath *from)
{
- AppendPath *newnode = makeNode(AppendPath);
+ AppendPath *newnode = makeNode(AppendPath);
/* ----------------
* copy the node superclass fields
@@ -1424,7 +1424,11 @@ _copyRestrictInfo(RestrictInfo *from)
newnode->mergejoinoperator = from->mergejoinoperator;
newnode->left_sortop = from->left_sortop;
newnode->right_sortop = from->right_sortop;
- /* Do not copy pathkeys, since they'd not be canonical in a copied query */
+
+ /*
+ * Do not copy pathkeys, since they'd not be canonical in a copied
+ * query
+ */
newnode->left_pathkey = NIL;
newnode->right_pathkey = NIL;
newnode->hashjoinoperator = from->hashjoinoperator;
@@ -1525,7 +1529,7 @@ _copyRangeTblEntry(RangeTblEntry *from)
static FkConstraint *
_copyFkConstraint(FkConstraint *from)
{
- FkConstraint *newnode = makeNode(FkConstraint);
+ FkConstraint *newnode = makeNode(FkConstraint);
if (from->constr_name)
newnode->constr_name = pstrdup(from->constr_name);
@@ -1538,7 +1542,7 @@ _copyFkConstraint(FkConstraint *from)
newnode->actions = from->actions;
newnode->deferrable = from->deferrable;
newnode->initdeferred = from->initdeferred;
-
+
return newnode;
}
@@ -1556,7 +1560,7 @@ _copySortClause(SortClause *from)
static A_Expr *
_copyAExpr(A_Expr *from)
{
- A_Expr *newnode = makeNode(A_Expr);
+ A_Expr *newnode = makeNode(A_Expr);
newnode->oper = from->oper;
if (from->opname)
@@ -1593,7 +1597,7 @@ _copyParamNo(ParamNo *from)
static Ident *
_copyIdent(Ident *from)
{
- Ident *newnode = makeNode(Ident);
+ Ident *newnode = makeNode(Ident);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -1606,7 +1610,7 @@ _copyIdent(Ident *from)
static FuncCall *
_copyFuncCall(FuncCall *from)
{
- FuncCall *newnode = makeNode(FuncCall);
+ FuncCall *newnode = makeNode(FuncCall);
if (from->funcname)
newnode->funcname = pstrdup(from->funcname);
@@ -1620,7 +1624,7 @@ _copyFuncCall(FuncCall *from)
static A_Indices *
_copyAIndices(A_Indices *from)
{
- A_Indices *newnode = makeNode(A_Indices);
+ A_Indices *newnode = makeNode(A_Indices);
Node_Copy(from, newnode, lidx);
Node_Copy(from, newnode, uidx);
@@ -1631,7 +1635,7 @@ _copyAIndices(A_Indices *from)
static ResTarget *
_copyResTarget(ResTarget *from)
{
- ResTarget *newnode = makeNode(ResTarget);
+ ResTarget *newnode = makeNode(ResTarget);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -1659,7 +1663,7 @@ _copyTypeName(TypeName *from)
static SortGroupBy *
_copySortGroupBy(SortGroupBy *from)
{
- SortGroupBy *newnode = makeNode(SortGroupBy);
+ SortGroupBy *newnode = makeNode(SortGroupBy);
if (from->useOp)
newnode->useOp = pstrdup(from->useOp);
@@ -1684,7 +1688,7 @@ _copyRangeVar(RangeVar *from)
static RangeSubselect *
_copyRangeSubselect(RangeSubselect *from)
{
- RangeSubselect *newnode = makeNode(RangeSubselect);
+ RangeSubselect *newnode = makeNode(RangeSubselect);
Node_Copy(from, newnode, subquery);
Node_Copy(from, newnode, name);
@@ -1706,7 +1710,7 @@ _copyTypeCast(TypeCast *from)
static IndexElem *
_copyIndexElem(IndexElem *from)
{
- IndexElem *newnode = makeNode(IndexElem);
+ IndexElem *newnode = makeNode(IndexElem);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -1720,7 +1724,7 @@ _copyIndexElem(IndexElem *from)
static ColumnDef *
_copyColumnDef(ColumnDef *from)
{
- ColumnDef *newnode = makeNode(ColumnDef);
+ ColumnDef *newnode = makeNode(ColumnDef);
if (from->colname)
newnode->colname = pstrdup(from->colname);
@@ -1738,7 +1742,7 @@ _copyColumnDef(ColumnDef *from)
static Constraint *
_copyConstraint(Constraint *from)
{
- Constraint *newnode = makeNode(Constraint);
+ Constraint *newnode = makeNode(Constraint);
newnode->contype = from->contype;
if (from->name)
@@ -1754,7 +1758,7 @@ _copyConstraint(Constraint *from)
static DefElem *
_copyDefElem(DefElem *from)
{
- DefElem *newnode = makeNode(DefElem);
+ DefElem *newnode = makeNode(DefElem);
if (from->defname)
newnode->defname = pstrdup(from->defname);
@@ -1811,7 +1815,7 @@ static InsertStmt *
_copyInsertStmt(InsertStmt *from)
{
InsertStmt *newnode = makeNode(InsertStmt);
-
+
if (from->relname)
newnode->relname = pstrdup(from->relname);
Node_Copy(from, newnode, cols);
@@ -1825,7 +1829,7 @@ static DeleteStmt *
_copyDeleteStmt(DeleteStmt *from)
{
DeleteStmt *newnode = makeNode(DeleteStmt);
-
+
if (from->relname)
newnode->relname = pstrdup(from->relname);
Node_Copy(from, newnode, whereClause);
@@ -1838,7 +1842,7 @@ static UpdateStmt *
_copyUpdateStmt(UpdateStmt *from)
{
UpdateStmt *newnode = makeNode(UpdateStmt);
-
+
if (from->relname)
newnode->relname = pstrdup(from->relname);
Node_Copy(from, newnode, targetList);
@@ -1853,7 +1857,7 @@ static SelectStmt *
_copySelectStmt(SelectStmt *from)
{
SelectStmt *newnode = makeNode(SelectStmt);
-
+
Node_Copy(from, newnode, distinctClause);
if (from->into)
newnode->into = pstrdup(from->into);
@@ -1882,7 +1886,7 @@ static SetOperationStmt *
_copySetOperationStmt(SetOperationStmt *from)
{
SetOperationStmt *newnode = makeNode(SetOperationStmt);
-
+
newnode->op = from->op;
newnode->all = from->all;
Node_Copy(from, newnode, larg);
@@ -1896,7 +1900,7 @@ static AlterTableStmt *
_copyAlterTableStmt(AlterTableStmt *from)
{
AlterTableStmt *newnode = makeNode(AlterTableStmt);
-
+
newnode->subtype = from->subtype;
if (from->relname)
newnode->relname = pstrdup(from->relname);
@@ -1913,7 +1917,7 @@ static ChangeACLStmt *
_copyChangeACLStmt(ChangeACLStmt *from)
{
ChangeACLStmt *newnode = makeNode(ChangeACLStmt);
-
+
Node_Copy(from, newnode, relNames);
if (from->aclString)
newnode->aclString = pstrdup(from->aclString);
@@ -1936,7 +1940,7 @@ static ClusterStmt *
_copyClusterStmt(ClusterStmt *from)
{
ClusterStmt *newnode = makeNode(ClusterStmt);
-
+
if (from->relname)
newnode->relname = pstrdup(from->relname);
if (from->indexname)
@@ -1948,8 +1952,8 @@ _copyClusterStmt(ClusterStmt *from)
static CopyStmt *
_copyCopyStmt(CopyStmt *from)
{
- CopyStmt *newnode = makeNode(CopyStmt);
-
+ CopyStmt *newnode = makeNode(CopyStmt);
+
newnode->binary = from->binary;
if (from->relname)
newnode->relname = pstrdup(from->relname);
@@ -1969,7 +1973,7 @@ static CreateStmt *
_copyCreateStmt(CreateStmt *from)
{
CreateStmt *newnode = makeNode(CreateStmt);
-
+
newnode->istemp = from->istemp;
newnode->relname = pstrdup(from->relname);
Node_Copy(from, newnode, tableElts);
@@ -1983,7 +1987,7 @@ static VersionStmt *
_copyVersionStmt(VersionStmt *from)
{
VersionStmt *newnode = makeNode(VersionStmt);
-
+
newnode->relname = pstrdup(from->relname);
newnode->direction = from->direction;
newnode->fromRelname = pstrdup(from->fromRelname);
@@ -1996,7 +2000,7 @@ static DefineStmt *
_copyDefineStmt(DefineStmt *from)
{
DefineStmt *newnode = makeNode(DefineStmt);
-
+
newnode->defType = from->defType;
newnode->defname = pstrdup(from->defname);
Node_Copy(from, newnode, definition);
@@ -2007,8 +2011,8 @@ _copyDefineStmt(DefineStmt *from)
static DropStmt *
_copyDropStmt(DropStmt *from)
{
- DropStmt *newnode = makeNode(DropStmt);
-
+ DropStmt *newnode = makeNode(DropStmt);
+
Node_Copy(from, newnode, names);
newnode->removeType = from->removeType;
@@ -2029,11 +2033,11 @@ static CommentStmt *
_copyCommentStmt(CommentStmt *from)
{
CommentStmt *newnode = makeNode(CommentStmt);
-
+
newnode->objtype = from->objtype;
newnode->objname = pstrdup(from->objname);
if (from->objproperty)
- newnode->objproperty = pstrdup(from->objproperty);
+ newnode->objproperty = pstrdup(from->objproperty);
Node_Copy(from, newnode, objlist);
newnode->comment = pstrdup(from->comment);
@@ -2044,7 +2048,7 @@ static ExtendStmt *
_copyExtendStmt(ExtendStmt *from)
{
ExtendStmt *newnode = makeNode(ExtendStmt);
-
+
newnode->idxname = pstrdup(from->idxname);
Node_Copy(from, newnode, whereClause);
Node_Copy(from, newnode, rangetable);
@@ -2055,8 +2059,8 @@ _copyExtendStmt(ExtendStmt *from)
static FetchStmt *
_copyFetchStmt(FetchStmt *from)
{
- FetchStmt *newnode = makeNode(FetchStmt);
-
+ FetchStmt *newnode = makeNode(FetchStmt);
+
newnode->direction = from->direction;
newnode->howMany = from->howMany;
newnode->portalname = pstrdup(from->portalname);
@@ -2068,8 +2072,8 @@ _copyFetchStmt(FetchStmt *from)
static IndexStmt *
_copyIndexStmt(IndexStmt *from)
{
- IndexStmt *newnode = makeNode(IndexStmt);
-
+ IndexStmt *newnode = makeNode(IndexStmt);
+
newnode->idxname = pstrdup(from->idxname);
newnode->relname = pstrdup(from->relname);
newnode->accessMethod = pstrdup(from->accessMethod);
@@ -2087,7 +2091,7 @@ static ProcedureStmt *
_copyProcedureStmt(ProcedureStmt *from)
{
ProcedureStmt *newnode = makeNode(ProcedureStmt);
-
+
newnode->funcname = pstrdup(from->funcname);
Node_Copy(from, newnode, argTypes);
Node_Copy(from, newnode, returnType);
@@ -2102,7 +2106,7 @@ static RemoveAggrStmt *
_copyRemoveAggrStmt(RemoveAggrStmt *from)
{
RemoveAggrStmt *newnode = makeNode(RemoveAggrStmt);
-
+
newnode->aggname = pstrdup(from->aggname);
Node_Copy(from, newnode, aggtype);
@@ -2113,7 +2117,7 @@ static RemoveFuncStmt *
_copyRemoveFuncStmt(RemoveFuncStmt *from)
{
RemoveFuncStmt *newnode = makeNode(RemoveFuncStmt);
-
+
newnode->funcname = pstrdup(from->funcname);
Node_Copy(from, newnode, args);
@@ -2124,7 +2128,7 @@ static RemoveOperStmt *
_copyRemoveOperStmt(RemoveOperStmt *from)
{
RemoveOperStmt *newnode = makeNode(RemoveOperStmt);
-
+
newnode->opname = pstrdup(from->opname);
Node_Copy(from, newnode, args);
@@ -2135,7 +2139,7 @@ static RenameStmt *
_copyRenameStmt(RenameStmt *from)
{
RenameStmt *newnode = makeNode(RenameStmt);
-
+
newnode->relname = pstrdup(from->relname);
newnode->inhOpt = from->inhOpt;
if (from->column)
@@ -2149,8 +2153,8 @@ _copyRenameStmt(RenameStmt *from)
static RuleStmt *
_copyRuleStmt(RuleStmt *from)
{
- RuleStmt *newnode = makeNode(RuleStmt);
-
+ RuleStmt *newnode = makeNode(RuleStmt);
+
newnode->rulename = pstrdup(from->rulename);
Node_Copy(from, newnode, whereClause);
newnode->event = from->event;
@@ -2231,7 +2235,7 @@ _copyLoadStmt(LoadStmt *from)
static CreatedbStmt *
_copyCreatedbStmt(CreatedbStmt *from)
{
- CreatedbStmt *newnode = makeNode(CreatedbStmt);
+ CreatedbStmt *newnode = makeNode(CreatedbStmt);
if (from->dbname)
newnode->dbname = pstrdup(from->dbname);
@@ -2247,7 +2251,7 @@ _copyCreatedbStmt(CreatedbStmt *from)
static DropdbStmt *
_copyDropdbStmt(DropdbStmt *from)
{
- DropdbStmt *newnode = makeNode(DropdbStmt);
+ DropdbStmt *newnode = makeNode(DropdbStmt);
if (from->dbname)
newnode->dbname = pstrdup(from->dbname);
@@ -2258,7 +2262,7 @@ _copyDropdbStmt(DropdbStmt *from)
static VacuumStmt *
_copyVacuumStmt(VacuumStmt *from)
{
- VacuumStmt *newnode = makeNode(VacuumStmt);
+ VacuumStmt *newnode = makeNode(VacuumStmt);
newnode->verbose = from->verbose;
newnode->analyze = from->analyze;
@@ -2272,7 +2276,7 @@ _copyVacuumStmt(VacuumStmt *from)
static ExplainStmt *
_copyExplainStmt(ExplainStmt *from)
{
- ExplainStmt *newnode = makeNode(ExplainStmt);
+ ExplainStmt *newnode = makeNode(ExplainStmt);
Node_Copy(from, newnode, query);
newnode->verbose = from->verbose;
@@ -2283,7 +2287,7 @@ _copyExplainStmt(ExplainStmt *from)
static CreateSeqStmt *
_copyCreateSeqStmt(CreateSeqStmt *from)
{
- CreateSeqStmt *newnode = makeNode(CreateSeqStmt);
+ CreateSeqStmt *newnode = makeNode(CreateSeqStmt);
if (from->seqname)
newnode->seqname = pstrdup(from->seqname);
@@ -2346,6 +2350,7 @@ _copyCreateTrigStmt(CreateTrigStmt *from)
newnode->lang = pstrdup(from->lang);
if (from->text)
newnode->text = pstrdup(from->text);
+
Node_Copy(from, newnode, attr);
if (from->when)
newnode->when = pstrdup(from->when);
@@ -2459,7 +2464,7 @@ _copyLockStmt(LockStmt *from)
static ConstraintsSetStmt *
_copyConstraintsSetStmt(ConstraintsSetStmt *from)
{
- ConstraintsSetStmt *newnode = makeNode(ConstraintsSetStmt);
+ ConstraintsSetStmt *newnode = makeNode(ConstraintsSetStmt);
Node_Copy(from, newnode, constraints);
newnode->deferred = from->deferred;
@@ -2470,7 +2475,7 @@ _copyConstraintsSetStmt(ConstraintsSetStmt *from)
static CreateGroupStmt *
_copyCreateGroupStmt(CreateGroupStmt *from)
{
- CreateGroupStmt *newnode = makeNode(CreateGroupStmt);
+ CreateGroupStmt *newnode = makeNode(CreateGroupStmt);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -2483,7 +2488,7 @@ _copyCreateGroupStmt(CreateGroupStmt *from)
static AlterGroupStmt *
_copyAlterGroupStmt(AlterGroupStmt *from)
{
- AlterGroupStmt *newnode = makeNode(AlterGroupStmt);
+ AlterGroupStmt *newnode = makeNode(AlterGroupStmt);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -2497,7 +2502,7 @@ _copyAlterGroupStmt(AlterGroupStmt *from)
static DropGroupStmt *
_copyDropGroupStmt(DropGroupStmt *from)
{
- DropGroupStmt *newnode = makeNode(DropGroupStmt);
+ DropGroupStmt *newnode = makeNode(DropGroupStmt);
if (from->name)
newnode->name = pstrdup(from->name);
@@ -2508,7 +2513,7 @@ _copyDropGroupStmt(DropGroupStmt *from)
static ReindexStmt *
_copyReindexStmt(ReindexStmt *from)
{
- ReindexStmt *newnode = makeNode(ReindexStmt);
+ ReindexStmt *newnode = makeNode(ReindexStmt);
newnode->reindexType = from->reindexType;
if (from->name)
@@ -2919,7 +2924,7 @@ copyObject(void *from)
retval = _copyReindexStmt(from);
break;
case T_CheckPointStmt:
- retval = (void*)makeNode(CheckPointStmt);
+ retval = (void *) makeNode(CheckPointStmt);
break;
case T_A_Expr:
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 9e16b79c6cc..06ee63bbacd 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -6,7 +6,7 @@
* NOTE: a general convention when copying or comparing plan nodes is
* that we ignore the executor state subnode. We do not need to look
* at it because no current uses of copyObject() or equal() need to
- * deal with already-executing plan trees. By leaving the state subnodes
+ * deal with already-executing plan trees. By leaving the state subnodes
* out, we avoid needing to write copy/compare routines for all the
* different executor state node types.
*
@@ -20,7 +20,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.87 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.88 2001/03/22 03:59:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
/* Macro for comparing string fields that might be NULL */
-#define equalstr(a, b) \
+#define equalstr(a, b) \
(((a) != NULL && (b) != NULL) ? (strcmp(a, b) == 0) : (a) == (b))
@@ -134,9 +134,9 @@ _equalOper(Oper *a, Oper *b)
return false;
/*
- * We do not examine opid or op_fcache, since these are
- * logically derived from opno, and they may not be set yet depending
- * on how far along the node is in the parse/plan pipeline.
+ * We do not examine opid or op_fcache, since these are logically
+ * derived from opno, and they may not be set yet depending on how far
+ * along the node is in the parse/plan pipeline.
*
* (Besides, op_fcache is executor state, which we don't check --- see
* notes at head of file.)
@@ -514,6 +514,7 @@ _equalRestrictInfo(RestrictInfo *a, RestrictInfo *b)
{
if (!equal(a->clause, b->clause))
return false;
+
/*
* ignore eval_cost, left/right_pathkey, and left/right_dispersion,
* since they may not be set yet, and should be derivable from the
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index aa83006c840..9b588150fda 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.38 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.39 2001/03/22 03:59:32 momjian Exp $
*
* NOTES
* XXX a few of the following functions are duplicated to handle
@@ -557,13 +557,12 @@ set_differencei(List *l1, List *l2)
List *
lreverse(List *l)
{
- List *result = NIL;
- List *i;
- foreach(i, l)
- {
- result = lcons(lfirst(i), result);
- }
- return result;
+ List *result = NIL;
+ List *i;
+
+ foreach(i, l)
+ result = lcons(lfirst(i), result);
+ return result;
}
/*
diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c
index f8bbb117291..d8f8310c5b7 100644
--- a/src/backend/nodes/makefuncs.c
+++ b/src/backend/nodes/makefuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/makefuncs.c,v 1.25 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/makefuncs.c,v 1.26 2001/03/22 03:59:32 momjian Exp $
*
* NOTES
* Creator functions in POSTGRES 4.2 are generated automatically. Most of
@@ -109,9 +109,10 @@ makeResdom(AttrNumber resno,
resdom->resname = resname;
/*
- * We always set the sorting/grouping fields to 0. If the caller wants
- * to change them he must do so explicitly. Few if any callers should
- * be doing that, so omitting these arguments reduces the chance of error.
+ * We always set the sorting/grouping fields to 0. If the caller
+ * wants to change them he must do so explicitly. Few if any callers
+ * should be doing that, so omitting these arguments reduces the
+ * chance of error.
*/
resdom->ressortgroupref = 0;
resdom->reskey = 0;
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index c8baef5fbfa..42dd9a2dab8 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/nodeFuncs.c,v 1.13 2001/01/24 19:42:56 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/nodeFuncs.c,v 1.14 2001/03/22 03:59:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -114,5 +114,5 @@ non_null(Expr *c)
else
return false;
}
-#endif
+#endif
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 97f931f5cc6..2c0cfed7ee4 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -5,7 +5,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.139 2001/01/24 19:42:57 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.140 2001/03/22 03:59:32 momjian Exp $
*
* NOTES
* Every (plan) node in POSTGRES has an associated "out" routine which
@@ -224,6 +224,7 @@ _outQuery(StringInfo str, Query *node)
if (node->utilityStmt)
{
+
/*
* Hack to make up for lack of outfuncs for utility-stmt nodes
*/
@@ -602,7 +603,7 @@ _outGroup(StringInfo str, Group *node)
static void
_outUnique(StringInfo str, Unique *node)
{
- int i;
+ int i;
appendStringInfo(str, " UNIQUE ");
_outPlanInfo(str, (Plan *) node);
@@ -616,7 +617,7 @@ _outUnique(StringInfo str, Unique *node)
static void
_outSetOp(StringInfo str, SetOp *node)
{
- int i;
+ int i;
appendStringInfo(str, " SETOP ");
_outPlanInfo(str, (Plan *) node);
@@ -889,7 +890,7 @@ _outFieldSelect(StringInfo str, FieldSelect *node)
_outNode(str, node->arg);
appendStringInfo(str, " :fieldnum %d :resulttype %u :resulttypmod %d ",
- node->fieldnum, node->resulttype, node->resulttypmod);
+ node->fieldnum, node->resulttype, node->resulttypmod);
}
/*
@@ -1279,14 +1280,14 @@ static void
_outStream(StringInfo str, Stream *node)
{
appendStringInfo(str,
- " STREAM :pathptr @ %p :cinfo @ %p :clausetype %p :upstream @ %p ",
+ " STREAM :pathptr @ %p :cinfo @ %p :clausetype %p :upstream @ %p ",
node->pathptr,
node->cinfo,
node->clausetype,
node->upstream);
appendStringInfo(str,
- " :downstream @ %p :groupup %d :groupcost %f :groupsel %f ",
+ " :downstream @ %p :groupup %d :groupcost %f :groupsel %f ",
node->downstream,
node->groupup,
node->groupcost,
@@ -1332,7 +1333,7 @@ _outValue(StringInfo str, Value *value)
{
switch (value->type)
{
- case T_Integer:
+ case T_Integer:
appendStringInfo(str, " %ld ", value->val.ival);
break;
case T_Float:
diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c
index 4f57a365c45..932f55ab885 100644
--- a/src/backend/nodes/print.c
+++ b/src/backend/nodes/print.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.46 2001/01/24 19:42:57 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.47 2001/03/22 03:59:32 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -290,7 +290,7 @@ plannode_type(Plan *p)
{
switch (nodeTag(p))
{
- case T_Plan:
+ case T_Plan:
return "PLAN";
case T_Result:
return "RESULT";
diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c
index beebe262f9f..1f41cc85718 100644
--- a/src/backend/nodes/read.c
+++ b/src/backend/nodes/read.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/read.c,v 1.28 2001/01/24 19:42:57 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/read.c,v 1.29 2001/03/22 03:59:32 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -41,9 +41,9 @@ stringToNode(char *str)
void *retval;
/*
- * We save and restore the pre-existing state of pg_strtok.
- * This makes the world safe for re-entrant invocation of stringToNode,
- * without incurring a lot of notational overhead by having to pass the
+ * We save and restore the pre-existing state of pg_strtok. This makes
+ * the world safe for re-entrant invocation of stringToNode, without
+ * incurring a lot of notational overhead by having to pass the
* next-character pointer around through all the readfuncs.c code.
*/
save_strtok = pg_strtok_ptr;
@@ -213,7 +213,7 @@ nodeTokenType(char *token, int length)
if (*numptr == '+' || *numptr == '-')
numptr++, numlen--;
if ((numlen > 0 && isdigit((unsigned char) *numptr)) ||
- (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1])))
+ (numlen > 1 && *numptr == '.' && isdigit((unsigned char) numptr[1])))
{
/*
@@ -357,14 +357,15 @@ nodeRead(bool read_car_only)
make_dotted_pair_cell = true;
break;
case T_BitString:
- {
- char * val = palloc(tok_len);
- /* skip leading 'b'*/
- strncpy(val, token + 1, tok_len - 1);
- val[tok_len - 1] = '\0';
- this_value = (Node *) makeBitString(val);
- break;
- }
+ {
+ char *val = palloc(tok_len);
+
+ /* skip leading 'b' */
+ strncpy(val, token + 1, tok_len - 1);
+ val[tok_len - 1] = '\0';
+ this_value = (Node *) makeBitString(val);
+ break;
+ }
default:
elog(ERROR, "nodeRead: Bad type %d", type);
this_value = NULL; /* keep compiler happy */
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index a6eb9de4dce..9a071e7a250 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.106 2001/02/12 21:03:03 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.107 2001/03/22 03:59:32 momjian Exp $
*
* NOTES
* Most of the read functions for plan nodes are tested. (In fact, they
@@ -35,7 +35,7 @@
/*
* NOTE: use atoi() to read values written with %d, or atoui() to read
* values written with %u in outfuncs.c. An exception is OID values,
- * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
+ * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u,
* but this will probably change in the future.)
*/
#define atoui(x) ((unsigned int) strtoul((x), NULL, 10))
@@ -64,7 +64,7 @@ toIntList(List *list)
foreach(l, list)
{
- Value *v = (Value *) lfirst(l);
+ Value *v = (Value *) lfirst(l);
if (!IsA(v, Integer))
elog(ERROR, "toIntList: unexpected datatype");
@@ -82,12 +82,12 @@ toOidList(List *list)
foreach(l, list)
{
- Value *v = (Value *) lfirst(l);
+ Value *v = (Value *) lfirst(l);
/*
* This is a bit tricky because OID is unsigned, and so nodeRead
- * might have concluded the value doesn't fit in an integer.
- * Must cope with T_Float as well.
+ * might have concluded the value doesn't fit in an integer. Must
+ * cope with T_Float as well.
*/
if (IsA(v, Integer))
{
@@ -119,16 +119,17 @@ _readQuery(void)
local_node = makeNode(Query);
- token = pg_strtok(&length); /* skip the :command */
- token = pg_strtok(&length); /* get the commandType */
+ token = pg_strtok(&length); /* skip the :command */
+ token = pg_strtok(&length); /* get the commandType */
local_node->commandType = atoi(token);
- token = pg_strtok(&length); /* skip :utility */
+ token = pg_strtok(&length); /* skip :utility */
token = pg_strtok(&length);
if (length == 0)
local_node->utilityStmt = NULL;
else
{
+
/*
* Hack to make up for lack of readfuncs for utility-stmt nodes
*
@@ -140,68 +141,68 @@ _readQuery(void)
local_node->utilityStmt = (Node *) n;
}
- token = pg_strtok(&length); /* skip the :resultRelation */
- token = pg_strtok(&length); /* get the resultRelation */
+ token = pg_strtok(&length); /* skip the :resultRelation */
+ token = pg_strtok(&length); /* get the resultRelation */
local_node->resultRelation = atoi(token);
- token = pg_strtok(&length); /* skip :into */
- token = pg_strtok(&length); /* get into */
+ token = pg_strtok(&length); /* skip :into */
+ token = pg_strtok(&length); /* get into */
local_node->into = nullable_string(token, length);
- token = pg_strtok(&length); /* skip :isPortal */
- token = pg_strtok(&length); /* get isPortal */
+ token = pg_strtok(&length); /* skip :isPortal */
+ token = pg_strtok(&length); /* get isPortal */
local_node->isPortal = strtobool(token);
- token = pg_strtok(&length); /* skip :isBinary */
- token = pg_strtok(&length); /* get isBinary */
+ token = pg_strtok(&length); /* skip :isBinary */
+ token = pg_strtok(&length); /* get isBinary */
local_node->isBinary = strtobool(token);
- token = pg_strtok(&length); /* skip :isTemp */
- token = pg_strtok(&length); /* get isTemp */
+ token = pg_strtok(&length); /* skip :isTemp */
+ token = pg_strtok(&length); /* get isTemp */
local_node->isTemp = strtobool(token);
- token = pg_strtok(&length); /* skip the :hasAggs */
- token = pg_strtok(&length); /* get hasAggs */
+ token = pg_strtok(&length); /* skip the :hasAggs */
+ token = pg_strtok(&length); /* get hasAggs */
local_node->hasAggs = strtobool(token);
- token = pg_strtok(&length); /* skip the :hasSubLinks */
- token = pg_strtok(&length); /* get hasSubLinks */
+ token = pg_strtok(&length); /* skip the :hasSubLinks */
+ token = pg_strtok(&length); /* get hasSubLinks */
local_node->hasSubLinks = strtobool(token);
- token = pg_strtok(&length); /* skip :rtable */
+ token = pg_strtok(&length); /* skip :rtable */
local_node->rtable = nodeRead(true);
- token = pg_strtok(&length); /* skip :jointree */
+ token = pg_strtok(&length); /* skip :jointree */
local_node->jointree = nodeRead(true);
- token = pg_strtok(&length); /* skip :rowMarks */
+ token = pg_strtok(&length); /* skip :rowMarks */
local_node->rowMarks = toIntList(nodeRead(true));
- token = pg_strtok(&length); /* skip :targetlist */
+ token = pg_strtok(&length); /* skip :targetlist */
local_node->targetList = nodeRead(true);
- token = pg_strtok(&length); /* skip :groupClause */
+ token = pg_strtok(&length); /* skip :groupClause */
local_node->groupClause = nodeRead(true);
- token = pg_strtok(&length); /* skip :havingQual */
+ token = pg_strtok(&length); /* skip :havingQual */
local_node->havingQual = nodeRead(true);
- token = pg_strtok(&length); /* skip :distinctClause */
+ token = pg_strtok(&length); /* skip :distinctClause */
local_node->distinctClause = nodeRead(true);
- token = pg_strtok(&length); /* skip :sortClause */
+ token = pg_strtok(&length); /* skip :sortClause */
local_node->sortClause = nodeRead(true);
- token = pg_strtok(&length); /* skip :limitOffset */
+ token = pg_strtok(&length); /* skip :limitOffset */
local_node->limitOffset = nodeRead(true);
- token = pg_strtok(&length); /* skip :limitCount */
+ token = pg_strtok(&length); /* skip :limitCount */
local_node->limitCount = nodeRead(true);
- token = pg_strtok(&length); /* skip :setOperations */
+ token = pg_strtok(&length); /* skip :setOperations */
local_node->setOperations = nodeRead(true);
- token = pg_strtok(&length); /* skip :resultRelations */
+ token = pg_strtok(&length); /* skip :resultRelations */
local_node->resultRelations = toIntList(nodeRead(true));
return local_node;
@@ -220,12 +221,12 @@ _readSortClause(void)
local_node = makeNode(SortClause);
- token = pg_strtok(&length); /* skip :tleSortGroupRef */
- token = pg_strtok(&length); /* get tleSortGroupRef */
+ token = pg_strtok(&length); /* skip :tleSortGroupRef */
+ token = pg_strtok(&length); /* get tleSortGroupRef */
local_node->tleSortGroupRef = atoui(token);
- token = pg_strtok(&length); /* skip :sortop */
- token = pg_strtok(&length); /* get sortop */
+ token = pg_strtok(&length); /* skip :sortop */
+ token = pg_strtok(&length); /* get sortop */
local_node->sortop = atooid(token);
return local_node;
@@ -244,12 +245,12 @@ _readGroupClause(void)
local_node = makeNode(GroupClause);
- token = pg_strtok(&length); /* skip :tleSortGroupRef */
- token = pg_strtok(&length); /* get tleSortGroupRef */
+ token = pg_strtok(&length); /* skip :tleSortGroupRef */
+ token = pg_strtok(&length); /* get tleSortGroupRef */
local_node->tleSortGroupRef = atoui(token);
- token = pg_strtok(&length); /* skip :sortop */
- token = pg_strtok(&length); /* get sortop */
+ token = pg_strtok(&length); /* skip :sortop */
+ token = pg_strtok(&length); /* get sortop */
local_node->sortop = atooid(token);
return local_node;
@@ -268,21 +269,21 @@ _readSetOperationStmt(void)
local_node = makeNode(SetOperationStmt);
- token = pg_strtok(&length); /* eat :op */
- token = pg_strtok(&length); /* get op */
+ token = pg_strtok(&length); /* eat :op */
+ token = pg_strtok(&length); /* get op */
local_node->op = (SetOperation) atoi(token);
- token = pg_strtok(&length); /* eat :all */
- token = pg_strtok(&length); /* get all */
+ token = pg_strtok(&length); /* eat :all */
+ token = pg_strtok(&length); /* get all */
local_node->all = strtobool(token);
- token = pg_strtok(&length); /* eat :larg */
+ token = pg_strtok(&length); /* eat :larg */
local_node->larg = nodeRead(true); /* get larg */
- token = pg_strtok(&length); /* eat :rarg */
+ token = pg_strtok(&length); /* eat :rarg */
local_node->rarg = nodeRead(true); /* get rarg */
- token = pg_strtok(&length); /* eat :colTypes */
+ token = pg_strtok(&length); /* eat :colTypes */
local_node->colTypes = toOidList(nodeRead(true));
return local_node;
@@ -298,32 +299,32 @@ _getPlan(Plan *node)
char *token;
int length;
- token = pg_strtok(&length); /* first token is :startup_cost */
- token = pg_strtok(&length); /* next is the actual cost */
+ token = pg_strtok(&length); /* first token is :startup_cost */
+ token = pg_strtok(&length); /* next is the actual cost */
node->startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* skip the :total_cost */
- token = pg_strtok(&length); /* next is the actual cost */
+ token = pg_strtok(&length); /* skip the :total_cost */
+ token = pg_strtok(&length); /* next is the actual cost */
node->total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* skip the :rows */
- token = pg_strtok(&length); /* get the plan_rows */
+ token = pg_strtok(&length); /* skip the :rows */
+ token = pg_strtok(&length); /* get the plan_rows */
node->plan_rows = atof(token);
- token = pg_strtok(&length); /* skip the :width */
- token = pg_strtok(&length); /* get the plan_width */
+ token = pg_strtok(&length); /* skip the :width */
+ token = pg_strtok(&length); /* get the plan_width */
node->plan_width = atoi(token);
- token = pg_strtok(&length); /* eat :qptargetlist */
+ token = pg_strtok(&length); /* eat :qptargetlist */
node->targetlist = nodeRead(true);
- token = pg_strtok(&length); /* eat :qpqual */
+ token = pg_strtok(&length); /* eat :qpqual */
node->qual = nodeRead(true);
- token = pg_strtok(&length); /* eat :lefttree */
+ token = pg_strtok(&length); /* eat :lefttree */
node->lefttree = (Plan *) nodeRead(true);
- token = pg_strtok(&length); /* eat :righttree */
+ token = pg_strtok(&length); /* eat :righttree */
node->righttree = (Plan *) nodeRead(true);
node->state = (EState *) NULL; /* never read in */
@@ -366,7 +367,7 @@ _readResult(void)
_getPlan((Plan *) local_node);
- token = pg_strtok(&length); /* eat :resconstantqual */
+ token = pg_strtok(&length); /* eat :resconstantqual */
local_node->resconstantqual = nodeRead(true); /* now read it */
return local_node;
@@ -390,11 +391,11 @@ _readAppend(void)
_getPlan((Plan *) local_node);
- token = pg_strtok(&length); /* eat :appendplans */
+ token = pg_strtok(&length); /* eat :appendplans */
local_node->appendplans = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :isTarget */
- token = pg_strtok(&length); /* get isTarget */
+ token = pg_strtok(&length); /* eat :isTarget */
+ token = pg_strtok(&length); /* get isTarget */
local_node->isTarget = strtobool(token);
return local_node;
@@ -412,11 +413,11 @@ _getJoin(Join *node)
_getPlan((Plan *) node);
- token = pg_strtok(&length); /* skip the :jointype */
- token = pg_strtok(&length); /* get the jointype */
+ token = pg_strtok(&length); /* skip the :jointype */
+ token = pg_strtok(&length); /* get the jointype */
node->jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* skip the :joinqual */
+ token = pg_strtok(&length); /* skip the :joinqual */
node->joinqual = nodeRead(true); /* get the joinqual */
}
@@ -475,7 +476,7 @@ _readMergeJoin(void)
_getJoin((Join *) local_node);
- token = pg_strtok(&length); /* eat :mergeclauses */
+ token = pg_strtok(&length); /* eat :mergeclauses */
local_node->mergeclauses = nodeRead(true); /* now read it */
return local_node;
@@ -498,11 +499,11 @@ _readHashJoin(void)
_getJoin((Join *) local_node);
- token = pg_strtok(&length); /* eat :hashclauses */
+ token = pg_strtok(&length); /* eat :hashclauses */
local_node->hashclauses = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :hashjoinop */
- token = pg_strtok(&length); /* get hashjoinop */
+ token = pg_strtok(&length); /* eat :hashjoinop */
+ token = pg_strtok(&length); /* get hashjoinop */
local_node->hashjoinop = atooid(token);
return local_node;
@@ -524,8 +525,8 @@ _getScan(Scan *node)
_getPlan((Plan *) node);
- token = pg_strtok(&length); /* eat :scanrelid */
- token = pg_strtok(&length); /* get scanrelid */
+ token = pg_strtok(&length); /* eat :scanrelid */
+ token = pg_strtok(&length); /* get scanrelid */
node->scanrelid = atoui(token);
}
@@ -582,17 +583,17 @@ _readIndexScan(void)
_getScan((Scan *) local_node);
- token = pg_strtok(&length); /* eat :indxid */
+ token = pg_strtok(&length); /* eat :indxid */
local_node->indxid = toOidList(nodeRead(true)); /* now read it */
- token = pg_strtok(&length); /* eat :indxqual */
+ token = pg_strtok(&length); /* eat :indxqual */
local_node->indxqual = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :indxqualorig */
+ token = pg_strtok(&length); /* eat :indxqualorig */
local_node->indxqualorig = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :indxorderdir */
- token = pg_strtok(&length); /* get indxorderdir */
+ token = pg_strtok(&length); /* eat :indxorderdir */
+ token = pg_strtok(&length); /* get indxorderdir */
local_node->indxorderdir = atoi(token);
return local_node;
@@ -615,11 +616,11 @@ _readTidScan(void)
_getScan((Scan *) local_node);
- token = pg_strtok(&length); /* eat :needrescan */
- token = pg_strtok(&length); /* get needrescan */
+ token = pg_strtok(&length); /* eat :needrescan */
+ token = pg_strtok(&length); /* get needrescan */
local_node->needRescan = atoi(token);
- token = pg_strtok(&length); /* eat :tideval */
+ token = pg_strtok(&length); /* eat :tideval */
local_node->tideval = nodeRead(true); /* now read it */
return local_node;
@@ -634,7 +635,7 @@ _readTidScan(void)
static SubqueryScan *
_readSubqueryScan(void)
{
- SubqueryScan *local_node;
+ SubqueryScan *local_node;
char *token;
int length;
@@ -642,8 +643,8 @@ _readSubqueryScan(void)
_getScan((Scan *) local_node);
- token = pg_strtok(&length); /* eat :subplan */
- local_node->subplan = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :subplan */
+ local_node->subplan = nodeRead(true); /* now read it */
return local_node;
}
@@ -665,8 +666,8 @@ _readSort(void)
_getPlan((Plan *) local_node);
- token = pg_strtok(&length); /* eat :keycount */
- token = pg_strtok(&length); /* get keycount */
+ token = pg_strtok(&length); /* eat :keycount */
+ token = pg_strtok(&length); /* get keycount */
local_node->keycount = atoi(token);
return local_node;
@@ -700,7 +701,7 @@ _readHash(void)
_getPlan((Plan *) local_node);
- token = pg_strtok(&length); /* eat :hashkey */
+ token = pg_strtok(&length); /* eat :hashkey */
local_node->hashkey = nodeRead(true);
return local_node;
@@ -725,36 +726,36 @@ _readResdom(void)
local_node = makeNode(Resdom);
- token = pg_strtok(&length); /* eat :resno */
- token = pg_strtok(&length); /* get resno */
+ token = pg_strtok(&length); /* eat :resno */
+ token = pg_strtok(&length); /* get resno */
local_node->resno = atoi(token);
- token = pg_strtok(&length); /* eat :restype */
- token = pg_strtok(&length); /* get restype */
+ token = pg_strtok(&length); /* eat :restype */
+ token = pg_strtok(&length); /* get restype */
local_node->restype = atooid(token);
- token = pg_strtok(&length); /* eat :restypmod */
- token = pg_strtok(&length); /* get restypmod */
+ token = pg_strtok(&length); /* eat :restypmod */
+ token = pg_strtok(&length); /* get restypmod */
local_node->restypmod = atoi(token);
- token = pg_strtok(&length); /* eat :resname */
- token = pg_strtok(&length); /* get the name */
+ token = pg_strtok(&length); /* eat :resname */
+ token = pg_strtok(&length); /* get the name */
local_node->resname = nullable_string(token, length);
- token = pg_strtok(&length); /* eat :reskey */
- token = pg_strtok(&length); /* get reskey */
+ token = pg_strtok(&length); /* eat :reskey */
+ token = pg_strtok(&length); /* get reskey */
local_node->reskey = atoui(token);
- token = pg_strtok(&length); /* eat :reskeyop */
- token = pg_strtok(&length); /* get reskeyop */
+ token = pg_strtok(&length); /* eat :reskeyop */
+ token = pg_strtok(&length); /* get reskeyop */
local_node->reskeyop = atooid(token);
- token = pg_strtok(&length); /* eat :ressortgroupref */
- token = pg_strtok(&length); /* get ressortgroupref */
+ token = pg_strtok(&length); /* eat :ressortgroupref */
+ token = pg_strtok(&length); /* get ressortgroupref */
local_node->ressortgroupref = atoui(token);
- token = pg_strtok(&length); /* eat :resjunk */
- token = pg_strtok(&length); /* get resjunk */
+ token = pg_strtok(&length); /* eat :resjunk */
+ token = pg_strtok(&length); /* get resjunk */
local_node->resjunk = strtobool(token);
return local_node;
@@ -775,12 +776,12 @@ _readExpr(void)
local_node = makeNode(Expr);
- token = pg_strtok(&length); /* eat :typeOid */
- token = pg_strtok(&length); /* get typeOid */
+ token = pg_strtok(&length); /* eat :typeOid */
+ token = pg_strtok(&length); /* get typeOid */
local_node->typeOid = atooid(token);
- token = pg_strtok(&length); /* eat :opType */
- token = pg_strtok(&length); /* get opType */
+ token = pg_strtok(&length); /* eat :opType */
+ token = pg_strtok(&length); /* get opType */
if (strncmp(token, "op", 2) == 0)
local_node->opType = OP_EXPR;
else if (strncmp(token, "func", 4) == 0)
@@ -796,10 +797,10 @@ _readExpr(void)
else
elog(ERROR, "_readExpr: unknown opType \"%.*s\"", length, token);
- token = pg_strtok(&length); /* eat :oper */
+ token = pg_strtok(&length); /* eat :oper */
local_node->oper = nodeRead(true);
- token = pg_strtok(&length); /* eat :args */
+ token = pg_strtok(&length); /* eat :args */
local_node->args = nodeRead(true); /* now read it */
return local_node;
@@ -820,17 +821,17 @@ _readCaseExpr(void)
local_node = makeNode(CaseExpr);
- token = pg_strtok(&length); /* eat :casetype */
- token = pg_strtok(&length); /* get casetype */
+ token = pg_strtok(&length); /* eat :casetype */
+ token = pg_strtok(&length); /* get casetype */
local_node->casetype = atooid(token);
- token = pg_strtok(&length); /* eat :arg */
+ token = pg_strtok(&length); /* eat :arg */
local_node->arg = nodeRead(true);
- token = pg_strtok(&length); /* eat :args */
+ token = pg_strtok(&length); /* eat :args */
local_node->args = nodeRead(true);
- token = pg_strtok(&length); /* eat :defresult */
+ token = pg_strtok(&length); /* eat :defresult */
local_node->defresult = nodeRead(true);
return local_node;
@@ -852,7 +853,7 @@ _readCaseWhen(void)
local_node = makeNode(CaseWhen);
local_node->expr = nodeRead(true);
- token = pg_strtok(&length); /* eat :then */
+ token = pg_strtok(&length); /* eat :then */
local_node->result = nodeRead(true);
return local_node;
@@ -873,32 +874,32 @@ _readVar(void)
local_node = makeNode(Var);
- token = pg_strtok(&length); /* eat :varno */
- token = pg_strtok(&length); /* get varno */
+ token = pg_strtok(&length); /* eat :varno */
+ token = pg_strtok(&length); /* get varno */
local_node->varno = atoui(token);
- token = pg_strtok(&length); /* eat :varattno */
- token = pg_strtok(&length); /* get varattno */
+ token = pg_strtok(&length); /* eat :varattno */
+ token = pg_strtok(&length); /* get varattno */
local_node->varattno = atoi(token);
- token = pg_strtok(&length); /* eat :vartype */
- token = pg_strtok(&length); /* get vartype */
+ token = pg_strtok(&length); /* eat :vartype */
+ token = pg_strtok(&length); /* get vartype */
local_node->vartype = atooid(token);
- token = pg_strtok(&length); /* eat :vartypmod */
- token = pg_strtok(&length); /* get vartypmod */
+ token = pg_strtok(&length); /* eat :vartypmod */
+ token = pg_strtok(&length); /* get vartypmod */
local_node->vartypmod = atoi(token);
- token = pg_strtok(&length); /* eat :varlevelsup */
- token = pg_strtok(&length); /* get varlevelsup */
+ token = pg_strtok(&length); /* eat :varlevelsup */
+ token = pg_strtok(&length); /* get varlevelsup */
local_node->varlevelsup = atoui(token);
- token = pg_strtok(&length); /* eat :varnoold */
- token = pg_strtok(&length); /* get varnoold */
+ token = pg_strtok(&length); /* eat :varnoold */
+ token = pg_strtok(&length); /* get varnoold */
local_node->varnoold = atoui(token);
- token = pg_strtok(&length); /* eat :varoattno */
- token = pg_strtok(&length); /* eat :varoattno */
+ token = pg_strtok(&length); /* eat :varoattno */
+ token = pg_strtok(&length); /* eat :varoattno */
local_node->varoattno = atoi(token);
return local_node;
@@ -919,32 +920,32 @@ _readArrayRef(void)
local_node = makeNode(ArrayRef);
- token = pg_strtok(&length); /* eat :refelemtype */
- token = pg_strtok(&length); /* get refelemtype */
+ token = pg_strtok(&length); /* eat :refelemtype */
+ token = pg_strtok(&length); /* get refelemtype */
local_node->refelemtype = atooid(token);
- token = pg_strtok(&length); /* eat :refattrlength */
- token = pg_strtok(&length); /* get refattrlength */
+ token = pg_strtok(&length); /* eat :refattrlength */
+ token = pg_strtok(&length); /* get refattrlength */
local_node->refattrlength = atoi(token);
- token = pg_strtok(&length); /* eat :refelemlength */
- token = pg_strtok(&length); /* get refelemlength */
+ token = pg_strtok(&length); /* eat :refelemlength */
+ token = pg_strtok(&length); /* get refelemlength */
local_node->refelemlength = atoi(token);
- token = pg_strtok(&length); /* eat :refelembyval */
- token = pg_strtok(&length); /* get refelembyval */
+ token = pg_strtok(&length); /* eat :refelembyval */
+ token = pg_strtok(&length); /* get refelembyval */
local_node->refelembyval = strtobool(token);
- token = pg_strtok(&length); /* eat :refupperindex */
+ token = pg_strtok(&length); /* eat :refupperindex */
local_node->refupperindexpr = nodeRead(true);
- token = pg_strtok(&length); /* eat :reflowerindex */
+ token = pg_strtok(&length); /* eat :reflowerindex */
local_node->reflowerindexpr = nodeRead(true);
- token = pg_strtok(&length); /* eat :refexpr */
+ token = pg_strtok(&length); /* eat :refexpr */
local_node->refexpr = nodeRead(true);
- token = pg_strtok(&length); /* eat :refassgnexpr */
+ token = pg_strtok(&length); /* eat :refassgnexpr */
local_node->refassgnexpr = nodeRead(true);
return local_node;
@@ -965,32 +966,30 @@ _readConst(void)
local_node = makeNode(Const);
- token = pg_strtok(&length); /* get :consttype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :consttype */
+ token = pg_strtok(&length); /* now read it */
local_node->consttype = atooid(token);
- token = pg_strtok(&length); /* get :constlen */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :constlen */
+ token = pg_strtok(&length); /* now read it */
local_node->constlen = atoi(token);
- token = pg_strtok(&length); /* get :constbyval */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :constbyval */
+ token = pg_strtok(&length); /* now read it */
local_node->constbyval = strtobool(token);
- token = pg_strtok(&length); /* get :constisnull */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :constisnull */
+ token = pg_strtok(&length); /* now read it */
local_node->constisnull = strtobool(token);
- token = pg_strtok(&length); /* get :constvalue */
+ token = pg_strtok(&length); /* get :constvalue */
if (local_node->constisnull)
{
- token = pg_strtok(&length); /* skip "NIL" */
+ token = pg_strtok(&length); /* skip "NIL" */
}
else
- {
local_node->constvalue = readDatum(local_node->constbyval);
- }
return local_node;
}
@@ -1010,12 +1009,12 @@ _readFunc(void)
local_node = makeNode(Func);
- token = pg_strtok(&length); /* get :funcid */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :funcid */
+ token = pg_strtok(&length); /* now read it */
local_node->funcid = atooid(token);
- token = pg_strtok(&length); /* get :functype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :functype */
+ token = pg_strtok(&length); /* now read it */
local_node->functype = atooid(token);
local_node->func_fcache = NULL;
@@ -1038,16 +1037,16 @@ _readOper(void)
local_node = makeNode(Oper);
- token = pg_strtok(&length); /* get :opno */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :opno */
+ token = pg_strtok(&length); /* now read it */
local_node->opno = atooid(token);
- token = pg_strtok(&length); /* get :opid */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :opid */
+ token = pg_strtok(&length); /* now read it */
local_node->opid = atooid(token);
- token = pg_strtok(&length); /* get :opresulttype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :opresulttype */
+ token = pg_strtok(&length); /* now read it */
local_node->opresulttype = atooid(token);
local_node->op_fcache = NULL;
@@ -1070,20 +1069,20 @@ _readParam(void)
local_node = makeNode(Param);
- token = pg_strtok(&length); /* get :paramkind */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :paramkind */
+ token = pg_strtok(&length); /* now read it */
local_node->paramkind = atoi(token);
- token = pg_strtok(&length); /* get :paramid */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :paramid */
+ token = pg_strtok(&length); /* now read it */
local_node->paramid = atoi(token);
- token = pg_strtok(&length); /* get :paramname */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :paramname */
+ token = pg_strtok(&length); /* now read it */
local_node->paramname = nullable_string(token, length);
- token = pg_strtok(&length); /* get :paramtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :paramtype */
+ token = pg_strtok(&length); /* now read it */
local_node->paramtype = atooid(token);
return local_node;
@@ -1104,27 +1103,27 @@ _readAggref(void)
local_node = makeNode(Aggref);
- token = pg_strtok(&length); /* eat :aggname */
- token = pg_strtok(&length); /* get aggname */
+ token = pg_strtok(&length); /* eat :aggname */
+ token = pg_strtok(&length); /* get aggname */
local_node->aggname = debackslash(token, length);
- token = pg_strtok(&length); /* eat :basetype */
- token = pg_strtok(&length); /* get basetype */
+ token = pg_strtok(&length); /* eat :basetype */
+ token = pg_strtok(&length); /* get basetype */
local_node->basetype = atooid(token);
- token = pg_strtok(&length); /* eat :aggtype */
- token = pg_strtok(&length); /* get aggtype */
+ token = pg_strtok(&length); /* eat :aggtype */
+ token = pg_strtok(&length); /* get aggtype */
local_node->aggtype = atooid(token);
- token = pg_strtok(&length); /* eat :target */
+ token = pg_strtok(&length); /* eat :target */
local_node->target = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :aggstar */
- token = pg_strtok(&length); /* get aggstar */
+ token = pg_strtok(&length); /* eat :aggstar */
+ token = pg_strtok(&length); /* get aggstar */
local_node->aggstar = strtobool(token);
- token = pg_strtok(&length); /* eat :aggdistinct */
- token = pg_strtok(&length); /* get aggdistinct */
+ token = pg_strtok(&length); /* eat :aggdistinct */
+ token = pg_strtok(&length); /* get aggdistinct */
local_node->aggdistinct = strtobool(token);
return local_node;
@@ -1145,21 +1144,21 @@ _readSubLink(void)
local_node = makeNode(SubLink);
- token = pg_strtok(&length); /* eat :subLinkType */
- token = pg_strtok(&length); /* get subLinkType */
+ token = pg_strtok(&length); /* eat :subLinkType */
+ token = pg_strtok(&length); /* get subLinkType */
local_node->subLinkType = atoi(token);
- token = pg_strtok(&length); /* eat :useor */
- token = pg_strtok(&length); /* get useor */
+ token = pg_strtok(&length); /* eat :useor */
+ token = pg_strtok(&length); /* get useor */
local_node->useor = strtobool(token);
- token = pg_strtok(&length); /* eat :lefthand */
+ token = pg_strtok(&length); /* eat :lefthand */
local_node->lefthand = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :oper */
+ token = pg_strtok(&length); /* eat :oper */
local_node->oper = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :subselect */
+ token = pg_strtok(&length); /* eat :subselect */
local_node->subselect = nodeRead(true); /* now read it */
return local_node;
@@ -1180,19 +1179,19 @@ _readFieldSelect(void)
local_node = makeNode(FieldSelect);
- token = pg_strtok(&length); /* eat :arg */
+ token = pg_strtok(&length); /* eat :arg */
local_node->arg = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :fieldnum */
- token = pg_strtok(&length); /* get fieldnum */
+ token = pg_strtok(&length); /* eat :fieldnum */
+ token = pg_strtok(&length); /* get fieldnum */
local_node->fieldnum = (AttrNumber) atoi(token);
- token = pg_strtok(&length); /* eat :resulttype */
- token = pg_strtok(&length); /* get resulttype */
+ token = pg_strtok(&length); /* eat :resulttype */
+ token = pg_strtok(&length); /* get resulttype */
local_node->resulttype = atooid(token);
- token = pg_strtok(&length); /* eat :resulttypmod */
- token = pg_strtok(&length); /* get resulttypmod */
+ token = pg_strtok(&length); /* eat :resulttypmod */
+ token = pg_strtok(&length); /* get resulttypmod */
local_node->resulttypmod = atoi(token);
return local_node;
@@ -1213,15 +1212,15 @@ _readRelabelType(void)
local_node = makeNode(RelabelType);
- token = pg_strtok(&length); /* eat :arg */
+ token = pg_strtok(&length); /* eat :arg */
local_node->arg = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :resulttype */
- token = pg_strtok(&length); /* get resulttype */
+ token = pg_strtok(&length); /* eat :resulttype */
+ token = pg_strtok(&length); /* get resulttype */
local_node->resulttype = atooid(token);
- token = pg_strtok(&length); /* eat :resulttypmod */
- token = pg_strtok(&length); /* get resulttypmod */
+ token = pg_strtok(&length); /* eat :resulttypmod */
+ token = pg_strtok(&length); /* get resulttypmod */
local_node->resulttypmod = atoi(token);
return local_node;
@@ -1242,7 +1241,7 @@ _readRangeTblRef(void)
local_node = makeNode(RangeTblRef);
- token = pg_strtok(&length); /* get rtindex */
+ token = pg_strtok(&length); /* get rtindex */
local_node->rtindex = atoi(token);
return local_node;
@@ -1263,11 +1262,11 @@ _readFromExpr(void)
local_node = makeNode(FromExpr);
- token = pg_strtok(&length); /* eat :fromlist */
- local_node->fromlist = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :fromlist */
+ local_node->fromlist = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :quals */
- local_node->quals = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :quals */
+ local_node->quals = nodeRead(true); /* now read it */
return local_node;
}
@@ -1287,34 +1286,34 @@ _readJoinExpr(void)
local_node = makeNode(JoinExpr);
- token = pg_strtok(&length); /* eat :jointype */
- token = pg_strtok(&length); /* get jointype */
+ token = pg_strtok(&length); /* eat :jointype */
+ token = pg_strtok(&length); /* get jointype */
local_node->jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* eat :isNatural */
- token = pg_strtok(&length); /* get :isNatural */
+ token = pg_strtok(&length); /* eat :isNatural */
+ token = pg_strtok(&length); /* get :isNatural */
local_node->isNatural = strtobool(token);
- token = pg_strtok(&length); /* eat :larg */
+ token = pg_strtok(&length); /* eat :larg */
local_node->larg = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :rarg */
+ token = pg_strtok(&length); /* eat :rarg */
local_node->rarg = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :using */
- local_node->using = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :using */
+ local_node->using = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :quals */
- local_node->quals = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :quals */
+ local_node->quals = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :alias */
- local_node->alias = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :alias */
+ local_node->alias = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :colnames */
- local_node->colnames = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :colnames */
+ local_node->colnames = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :colvars */
- local_node->colvars = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :colvars */
+ local_node->colvars = nodeRead(true); /* now read it */
return local_node;
}
@@ -1336,66 +1335,66 @@ _readRelOptInfo(void)
local_node = makeNode(RelOptInfo);
- token = pg_strtok(&length); /* get :relids */
+ token = pg_strtok(&length); /* get :relids */
local_node->relids = toIntList(nodeRead(true)); /* now read it */
- token = pg_strtok(&length); /* get :rows */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :rows */
+ token = pg_strtok(&length); /* now read it */
local_node->rows = atof(token);
- token = pg_strtok(&length); /* get :width */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :width */
+ token = pg_strtok(&length); /* now read it */
local_node->width = atoi(token);
- token = pg_strtok(&length); /* get :targetlist */
+ token = pg_strtok(&length); /* get :targetlist */
local_node->targetlist = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :pathlist */
+ token = pg_strtok(&length); /* get :pathlist */
local_node->pathlist = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :cheapest_startup_path */
+ token = pg_strtok(&length); /* get :cheapest_startup_path */
local_node->cheapest_startup_path = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :cheapest_total_path */
+ token = pg_strtok(&length); /* get :cheapest_total_path */
local_node->cheapest_total_path = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :pruneable */
- token = pg_strtok(&length); /* get :pruneable */
+ token = pg_strtok(&length); /* eat :pruneable */
+ token = pg_strtok(&length); /* get :pruneable */
local_node->pruneable = strtobool(token);
- token = pg_strtok(&length); /* get :issubquery */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :issubquery */
+ token = pg_strtok(&length); /* now read it */
local_node->issubquery = strtobool(token);
- token = pg_strtok(&length); /* get :indexed */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :indexed */
+ token = pg_strtok(&length); /* now read it */
local_node->indexed = strtobool(token);
- token = pg_strtok(&length); /* get :pages */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pages */
+ token = pg_strtok(&length); /* now read it */
local_node->pages = atol(token);
- token = pg_strtok(&length); /* get :tuples */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :tuples */
+ token = pg_strtok(&length); /* now read it */
local_node->tuples = atof(token);
- token = pg_strtok(&length); /* get :subplan */
- local_node->subplan = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :subplan */
+ local_node->subplan = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :baserestrictinfo */
- local_node->baserestrictinfo = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :baserestrictinfo */
+ local_node->baserestrictinfo = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :baserestrictcost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :baserestrictcost */
+ token = pg_strtok(&length); /* now read it */
local_node->baserestrictcost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :outerjoinset */
- local_node->outerjoinset = toIntList(nodeRead(true)); /* now read it */
+ token = pg_strtok(&length); /* get :outerjoinset */
+ local_node->outerjoinset = toIntList(nodeRead(true)); /* now read it */
- token = pg_strtok(&length); /* get :joininfo */
+ token = pg_strtok(&length); /* get :joininfo */
local_node->joininfo = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innerjoin */
+ token = pg_strtok(&length); /* get :innerjoin */
local_node->innerjoin = nodeRead(true); /* now read it */
return local_node;
@@ -1414,10 +1413,10 @@ _readTargetEntry(void)
local_node = makeNode(TargetEntry);
- token = pg_strtok(&length); /* get :resdom */
+ token = pg_strtok(&length); /* get :resdom */
local_node->resdom = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :expr */
+ token = pg_strtok(&length); /* get :expr */
local_node->expr = nodeRead(true); /* now read it */
return local_node;
@@ -1432,11 +1431,11 @@ _readAttr(void)
local_node = makeNode(Attr);
- token = pg_strtok(&length); /* eat :relname */
- token = pg_strtok(&length); /* get relname */
+ token = pg_strtok(&length); /* eat :relname */
+ token = pg_strtok(&length); /* get relname */
local_node->relname = debackslash(token, length);
- token = pg_strtok(&length); /* eat :attrs */
+ token = pg_strtok(&length); /* eat :attrs */
local_node->attrs = nodeRead(true); /* now read it */
return local_node;
@@ -1455,41 +1454,41 @@ _readRangeTblEntry(void)
local_node = makeNode(RangeTblEntry);
- token = pg_strtok(&length); /* eat :relname */
- token = pg_strtok(&length); /* get :relname */
+ token = pg_strtok(&length); /* eat :relname */
+ token = pg_strtok(&length); /* get :relname */
local_node->relname = nullable_string(token, length);
- token = pg_strtok(&length); /* eat :relid */
- token = pg_strtok(&length); /* get :relid */
+ token = pg_strtok(&length); /* eat :relid */
+ token = pg_strtok(&length); /* get :relid */
local_node->relid = atooid(token);
- token = pg_strtok(&length); /* eat :subquery */
- local_node->subquery = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :subquery */
+ local_node->subquery = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :alias */
- local_node->alias = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* eat :alias */
+ local_node->alias = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :eref */
+ token = pg_strtok(&length); /* eat :eref */
local_node->eref = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* eat :inh */
- token = pg_strtok(&length); /* get :inh */
+ token = pg_strtok(&length); /* eat :inh */
+ token = pg_strtok(&length); /* get :inh */
local_node->inh = strtobool(token);
- token = pg_strtok(&length); /* eat :inFromCl */
- token = pg_strtok(&length); /* get :inFromCl */
+ token = pg_strtok(&length); /* eat :inFromCl */
+ token = pg_strtok(&length); /* get :inFromCl */
local_node->inFromCl = strtobool(token);
- token = pg_strtok(&length); /* eat :checkForRead */
- token = pg_strtok(&length); /* get :checkForRead */
+ token = pg_strtok(&length); /* eat :checkForRead */
+ token = pg_strtok(&length); /* get :checkForRead */
local_node->checkForRead = strtobool(token);
- token = pg_strtok(&length); /* eat :checkForWrite */
- token = pg_strtok(&length); /* get :checkForWrite */
+ token = pg_strtok(&length); /* eat :checkForWrite */
+ token = pg_strtok(&length); /* get :checkForWrite */
local_node->checkForWrite = strtobool(token);
- token = pg_strtok(&length); /* eat :checkAsUser */
- token = pg_strtok(&length); /* get :checkAsUser */
+ token = pg_strtok(&length); /* eat :checkAsUser */
+ token = pg_strtok(&length); /* get :checkAsUser */
local_node->checkAsUser = atooid(token);
return local_node;
@@ -1510,19 +1509,19 @@ _readPath(void)
local_node = makeNode(Path);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->pathkeys = nodeRead(true); /* now read it */
return local_node;
@@ -1543,40 +1542,40 @@ _readIndexPath(void)
local_node = makeNode(IndexPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :indexid */
+ token = pg_strtok(&length); /* get :indexid */
local_node->indexid = toOidList(nodeRead(true));
- token = pg_strtok(&length); /* get :indexqual */
+ token = pg_strtok(&length); /* get :indexqual */
local_node->indexqual = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :indexscandir */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :indexscandir */
+ token = pg_strtok(&length); /* now read it */
local_node->indexscandir = (ScanDirection) atoi(token);
- token = pg_strtok(&length); /* get :joinrelids */
+ token = pg_strtok(&length); /* get :joinrelids */
local_node->joinrelids = toIntList(nodeRead(true));
- token = pg_strtok(&length); /* get :alljoinquals */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :alljoinquals */
+ token = pg_strtok(&length); /* now read it */
local_node->alljoinquals = strtobool(token);
- token = pg_strtok(&length); /* get :rows */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :rows */
+ token = pg_strtok(&length); /* now read it */
local_node->rows = atof(token);
return local_node;
@@ -1597,25 +1596,25 @@ _readTidPath(void)
local_node = makeNode(TidPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :tideval */
+ token = pg_strtok(&length); /* get :tideval */
local_node->tideval = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :unjoined_relids */
+ token = pg_strtok(&length); /* get :unjoined_relids */
local_node->unjoined_relids = toIntList(nodeRead(true));
return local_node;
@@ -1636,22 +1635,22 @@ _readAppendPath(void)
local_node = makeNode(AppendPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :subpaths */
+ token = pg_strtok(&length); /* get :subpaths */
local_node->subpaths = nodeRead(true); /* now read it */
return local_node;
@@ -1672,33 +1671,33 @@ _readNestPath(void)
local_node = makeNode(NestPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :jointype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :jointype */
+ token = pg_strtok(&length); /* now read it */
local_node->jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* get :outerjoinpath */
+ token = pg_strtok(&length); /* get :outerjoinpath */
local_node->outerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innerjoinpath */
+ token = pg_strtok(&length); /* get :innerjoinpath */
local_node->innerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :joinrestrictinfo */
- local_node->joinrestrictinfo = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :joinrestrictinfo */
+ local_node->joinrestrictinfo = nodeRead(true); /* now read it */
return local_node;
}
@@ -1718,41 +1717,41 @@ _readMergePath(void)
local_node = makeNode(MergePath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->jpath.path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :jointype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :jointype */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* get :outerjoinpath */
+ token = pg_strtok(&length); /* get :outerjoinpath */
local_node->jpath.outerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innerjoinpath */
+ token = pg_strtok(&length); /* get :innerjoinpath */
local_node->jpath.innerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :joinrestrictinfo */
- local_node->jpath.joinrestrictinfo = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :joinrestrictinfo */
+ local_node->jpath.joinrestrictinfo = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :path_mergeclauses */
+ token = pg_strtok(&length); /* get :path_mergeclauses */
local_node->path_mergeclauses = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :outersortkeys */
+ token = pg_strtok(&length); /* get :outersortkeys */
local_node->outersortkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innersortkeys */
+ token = pg_strtok(&length); /* get :innersortkeys */
local_node->innersortkeys = nodeRead(true); /* now read it */
return local_node;
@@ -1773,35 +1772,35 @@ _readHashPath(void)
local_node = makeNode(HashPath);
- token = pg_strtok(&length); /* get :pathtype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :pathtype */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.pathtype = atoi(token);
- token = pg_strtok(&length); /* get :startup_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :startup_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.startup_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :total_cost */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :total_cost */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.path.total_cost = (Cost) atof(token);
- token = pg_strtok(&length); /* get :pathkeys */
+ token = pg_strtok(&length); /* get :pathkeys */
local_node->jpath.path.pathkeys = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :jointype */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :jointype */
+ token = pg_strtok(&length); /* now read it */
local_node->jpath.jointype = (JoinType) atoi(token);
- token = pg_strtok(&length); /* get :outerjoinpath */
+ token = pg_strtok(&length); /* get :outerjoinpath */
local_node->jpath.outerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :innerjoinpath */
+ token = pg_strtok(&length); /* get :innerjoinpath */
local_node->jpath.innerjoinpath = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :joinrestrictinfo */
- local_node->jpath.joinrestrictinfo = nodeRead(true); /* now read it */
+ token = pg_strtok(&length); /* get :joinrestrictinfo */
+ local_node->jpath.joinrestrictinfo = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :path_hashclauses */
+ token = pg_strtok(&length); /* get :path_hashclauses */
local_node->path_hashclauses = nodeRead(true); /* now read it */
return local_node;
@@ -1822,11 +1821,11 @@ _readPathKeyItem(void)
local_node = makeNode(PathKeyItem);
- token = pg_strtok(&length); /* get :sortop */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :sortop */
+ token = pg_strtok(&length); /* now read it */
local_node->sortop = atooid(token);
- token = pg_strtok(&length); /* get :key */
+ token = pg_strtok(&length); /* get :key */
local_node->key = nodeRead(true); /* now read it */
return local_node;
@@ -1847,30 +1846,30 @@ _readRestrictInfo(void)
local_node = makeNode(RestrictInfo);
- token = pg_strtok(&length); /* get :clause */
+ token = pg_strtok(&length); /* get :clause */
local_node->clause = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :ispusheddown */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :ispusheddown */
+ token = pg_strtok(&length); /* now read it */
local_node->ispusheddown = strtobool(token);
- token = pg_strtok(&length); /* get :subclauseindices */
+ token = pg_strtok(&length); /* get :subclauseindices */
local_node->subclauseindices = nodeRead(true); /* now read it */
- token = pg_strtok(&length); /* get :mergejoinoperator */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :mergejoinoperator */
+ token = pg_strtok(&length); /* now read it */
local_node->mergejoinoperator = atooid(token);
- token = pg_strtok(&length); /* get :left_sortop */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :left_sortop */
+ token = pg_strtok(&length); /* now read it */
local_node->left_sortop = atooid(token);
- token = pg_strtok(&length); /* get :right_sortop */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :right_sortop */
+ token = pg_strtok(&length); /* now read it */
local_node->right_sortop = atooid(token);
- token = pg_strtok(&length); /* get :hashjoinoperator */
- token = pg_strtok(&length); /* now read it */
+ token = pg_strtok(&length); /* get :hashjoinoperator */
+ token = pg_strtok(&length); /* now read it */
local_node->hashjoinoperator = atooid(token);
/* eval_cost is not part of saved representation; compute on first use */
@@ -1899,10 +1898,10 @@ _readJoinInfo(void)
local_node = makeNode(JoinInfo);
- token = pg_strtok(&length); /* get :unjoined_relids */
+ token = pg_strtok(&length); /* get :unjoined_relids */
local_node->unjoined_relids = toIntList(nodeRead(true)); /* now read it */
- token = pg_strtok(&length); /* get :jinfo_restrictinfo */
+ token = pg_strtok(&length); /* get :jinfo_restrictinfo */
local_node->jinfo_restrictinfo = nodeRead(true); /* now read it */
return local_node;
@@ -1922,7 +1921,7 @@ _readIter(void)
local_node = makeNode(Iter);
- token = pg_strtok(&length); /* eat :iterexpr */
+ token = pg_strtok(&length); /* eat :iterexpr */
local_node->iterexpr = nodeRead(true); /* now read it */
return local_node;
@@ -2081,7 +2080,7 @@ readDatum(bool typbyval)
token = pg_strtok(&tokenLength);
length = atoui(token);
- token = pg_strtok(&tokenLength); /* skip the '[' */
+ token = pg_strtok(&tokenLength); /* skip the '[' */
if (typbyval)
{
@@ -2109,7 +2108,7 @@ readDatum(bool typbyval)
res = PointerGetDatum(s);
}
- token = pg_strtok(&tokenLength); /* skip the ']' */
+ token = pg_strtok(&tokenLength); /* skip the ']' */
if (token == NULL || token[0] != ']')
elog(ERROR, "readDatum: ']' expected, length = %lu",
(unsigned long) length);
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index 4422e03cb40..dd3d6bd5372 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: geqo_eval.c,v 1.57 2001/01/24 19:42:57 momjian Exp $
+ * $Id: geqo_eval.c,v 1.58 2001/03/22 03:59:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -49,8 +49,8 @@ geqo_eval(Query *root, List *initial_rels, Gene *tour, int num_gene)
* allocated inside gimme_tree().
*
* Since geqo_eval() will be called many times, we can't afford to let
- * all that memory go unreclaimed until end of statement. Note we make
- * the temp context a child of TransactionCommandContext, so that
+ * all that memory go unreclaimed until end of statement. Note we
+ * make the temp context a child of TransactionCommandContext, so that
* it will be freed even if we abort via elog(ERROR).
*/
mycontext = AllocSetContextCreate(TransactionCommandContext,
diff --git a/src/backend/optimizer/geqo/geqo_main.c b/src/backend/optimizer/geqo/geqo_main.c
index 2300f8677c1..86b5b334097 100644
--- a/src/backend/optimizer/geqo/geqo_main.c
+++ b/src/backend/optimizer/geqo/geqo_main.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: geqo_main.c,v 1.26 2001/01/24 19:42:57 momjian Exp $
+ * $Id: geqo_main.c,v 1.27 2001/03/22 03:59:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,11 +36,11 @@
/*
* Configuration options
*/
-int Geqo_pool_size;
-int Geqo_effort;
-int Geqo_generations;
+int Geqo_pool_size;
+int Geqo_effort;
+int Geqo_generations;
double Geqo_selection_bias;
-int Geqo_random_seed;
+int Geqo_random_seed;
static int gimme_pool_size(int nr_rel);
@@ -101,10 +101,10 @@ geqo(Query *root, int number_of_rels, List *initial_rels)
/* seed random number generator */
/* XXX why is this done every time around? */
- if (Geqo_random_seed >= 0)
- srandom((unsigned int) Geqo_random_seed);
- else
- srandom((unsigned int) time(NULL));
+ if (Geqo_random_seed >= 0)
+ srandom((unsigned int) Geqo_random_seed);
+ else
+ srandom((unsigned int) time(NULL));
/* allocate genetic pool memory */
pool = alloc_pool(pool_size, number_of_rels);
@@ -160,8 +160,8 @@ geqo(Query *root, int number_of_rels, List *initial_rels)
{
/* SELECTION */
- geqo_selection(momma, daddy, pool, Geqo_selection_bias);/* using linear bias
- * function */
+ geqo_selection(momma, daddy, pool, Geqo_selection_bias); /* using linear bias
+ * function */
@@ -293,15 +293,15 @@ gimme_pool_size(int nr_rel)
{
double size;
- if (Geqo_pool_size != 0)
- {
- if (Geqo_pool_size < MIN_GEQO_POOL_SIZE)
- return MIN_GEQO_POOL_SIZE;
- else if (Geqo_pool_size > MAX_GEQO_POOL_SIZE)
- return MAX_GEQO_POOL_SIZE;
- else
- return Geqo_pool_size;
- }
+ if (Geqo_pool_size != 0)
+ {
+ if (Geqo_pool_size < MIN_GEQO_POOL_SIZE)
+ return MIN_GEQO_POOL_SIZE;
+ else if (Geqo_pool_size > MAX_GEQO_POOL_SIZE)
+ return MAX_GEQO_POOL_SIZE;
+ else
+ return Geqo_pool_size;
+ }
size = pow(2.0, nr_rel + 1.0);
@@ -323,8 +323,8 @@ gimme_pool_size(int nr_rel)
static int
gimme_number_generations(int pool_size, int effort)
{
- if (Geqo_generations <= 0)
- return effort * (int) ceil(log((double) pool_size) / log(2.0));
- else
- return Geqo_generations;
+ if (Geqo_generations <= 0)
+ return effort * (int) ceil(log((double) pool_size) / log(2.0));
+ else
+ return Geqo_generations;
}
diff --git a/src/backend/optimizer/path/_deadcode/predmig.c b/src/backend/optimizer/path/_deadcode/predmig.c
index bb73132240a..1781f43db1d 100644
--- a/src/backend/optimizer/path/_deadcode/predmig.c
+++ b/src/backend/optimizer/path/_deadcode/predmig.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.8 2001/01/24 19:42:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/_deadcode/Attic/predmig.c,v 1.9 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -485,7 +485,7 @@ xfunc_form_groups(Query *queryInfo, Stream root, Stream bottom)
}
-/* ------------------- UTILITY FUNCTIONS ------------------------- */
+/* ------------------- UTILITY FUNCTIONS ------------------------- */
/*
** xfunc_free_stream
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 7d44c4dcfa0..1cf73dffff7 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.71 2001/02/03 21:17:52 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.72 2001/03/22 03:59:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,12 +33,12 @@ int geqo_rels = DEFAULT_GEQO_RELS;
static void set_base_rel_pathlists(Query *root);
static void set_plain_rel_pathlist(Query *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_inherited_rel_pathlist(Query *root, RelOptInfo *rel,
- RangeTblEntry *rte,
- List *inheritlist);
+ RangeTblEntry *rte,
+ List *inheritlist);
static RelOptInfo *make_one_rel_by_joins(Query *root, int levels_needed,
- List *initial_rels);
+ List *initial_rels);
#ifdef OPTIMIZER_DEBUG
static void debug_print_rel(Query *root, RelOptInfo *rel);
@@ -94,7 +94,7 @@ set_base_rel_pathlists(Query *root)
RangeTblEntry *rte;
List *inheritlist;
- Assert(length(rel->relids) == 1); /* better be base rel */
+ Assert(length(rel->relids) == 1); /* better be base rel */
rti = lfirsti(rel->relids);
rte = rt_fetch(rti, root->rtable);
@@ -103,24 +103,25 @@ set_base_rel_pathlists(Query *root)
/* Subquery --- generate a separate plan for it */
/*
- * If there are any restriction clauses that have been attached
- * to the subquery relation, consider pushing them down to become
- * HAVING quals of the subquery itself. (Not WHERE clauses, since
- * they may refer to subquery outputs that are aggregate results.
- * But planner.c will transfer them into the subquery's WHERE if
- * they do not.) This transformation is useful because it may
- * allow us to generate a better plan for the subquery than
- * evaluating all the subquery output rows and then filtering
- * them.
+ * If there are any restriction clauses that have been
+ * attached to the subquery relation, consider pushing them
+ * down to become HAVING quals of the subquery itself. (Not
+ * WHERE clauses, since they may refer to subquery outputs
+ * that are aggregate results. But planner.c will transfer
+ * them into the subquery's WHERE if they do not.) This
+ * transformation is useful because it may allow us to
+ * generate a better plan for the subquery than evaluating all
+ * the subquery output rows and then filtering them.
*
- * Currently, we do not push down clauses that contain subselects,
- * mainly because I'm not sure it will work correctly (the
- * subplan hasn't yet transformed sublinks to subselects).
- * Also, if the subquery contains set ops (UNION/INTERSECT/EXCEPT)
- * we do not push down any qual clauses, since the planner doesn't
- * support quals at the top level of a setop. (With suitable
- * analysis we could try to push the quals down into the component
- * queries of the setop, but getting it right is not trivial.)
+ * Currently, we do not push down clauses that contain
+ * subselects, mainly because I'm not sure it will work
+ * correctly (the subplan hasn't yet transformed sublinks to
+ * subselects). Also, if the subquery contains set ops
+ * (UNION/INTERSECT/EXCEPT) we do not push down any qual
+ * clauses, since the planner doesn't support quals at the top
+ * level of a setop. (With suitable analysis we could try to
+ * push the quals down into the component queries of the
+ * setop, but getting it right is not trivial.)
* Non-pushed-down clauses will get evaluated as qpquals of
* the SubqueryScan node.
*
@@ -136,8 +137,8 @@ set_base_rel_pathlists(Query *root)
foreach(lst, rel->baserestrictinfo)
{
- RestrictInfo *rinfo = (RestrictInfo *) lfirst(lst);
- Node *clause = (Node *) rinfo->clause;
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lst);
+ Node *clause = (Node *) rinfo->clause;
if (contain_subplans(clause))
{
@@ -146,13 +147,14 @@ set_base_rel_pathlists(Query *root)
}
else
{
+
/*
- * We need to replace Vars in the clause (which must
- * refer to outputs of the subquery) with copies of
- * the subquery's targetlist expressions. Note that
- * at this point, any uplevel Vars in the clause
- * should have been replaced with Params, so they
- * need no work.
+ * We need to replace Vars in the clause (which
+ * must refer to outputs of the subquery) with
+ * copies of the subquery's targetlist
+ * expressions. Note that at this point, any
+ * uplevel Vars in the clause should have been
+ * replaced with Params, so they need no work.
*/
clause = ResolveNew(clause, rti, 0,
rte->subquery->targetList,
@@ -160,11 +162,12 @@ set_base_rel_pathlists(Query *root)
rte->subquery->havingQual =
make_and_qual(rte->subquery->havingQual,
clause);
+
/*
* We need not change the subquery's hasAggs or
- * hasSublinks flags, since we can't be pushing down
- * any aggregates that weren't there before, and we
- * don't push down subselects at all.
+ * hasSublinks flags, since we can't be pushing
+ * down any aggregates that weren't there before,
+ * and we don't push down subselects at all.
*/
}
}
@@ -215,9 +218,9 @@ set_plain_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte)
/*
* Generate paths and add them to the rel's pathlist.
*
- * Note: add_path() will discard any paths that are dominated by
- * another available path, keeping only those paths that are
- * superior along at least one dimension of cost or sortedness.
+ * Note: add_path() will discard any paths that are dominated by another
+ * available path, keeping only those paths that are superior along at
+ * least one dimension of cost or sortedness.
*/
/* Consider sequential scan */
@@ -230,9 +233,9 @@ set_plain_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte)
create_index_paths(root, rel, indices);
/*
- * Note: create_or_index_paths depends on create_index_paths to
- * have marked OR restriction clauses with relevant indices; this
- * is why it doesn't need to be given the list of indices.
+ * Note: create_or_index_paths depends on create_index_paths to have
+ * marked OR restriction clauses with relevant indices; this is why it
+ * doesn't need to be given the list of indices.
*/
create_or_index_paths(root, rel, rel->baserestrictinfo);
@@ -258,8 +261,8 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
List *il;
/*
- * XXX for now, can't handle inherited expansion of FOR UPDATE;
- * can we do better?
+ * XXX for now, can't handle inherited expansion of FOR UPDATE; can we
+ * do better?
*/
if (intMember(parentRTindex, root->rowMarks))
elog(ERROR, "SELECT FOR UPDATE is not supported for inherit queries");
@@ -271,14 +274,14 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
rel->width = 0;
/*
- * Generate access paths for each table in the tree (parent AND children),
- * and pick the cheapest path for each table.
+ * Generate access paths for each table in the tree (parent AND
+ * children), and pick the cheapest path for each table.
*/
foreach(il, inheritlist)
{
- int childRTindex = lfirsti(il);
+ int childRTindex = lfirsti(il);
RangeTblEntry *childrte;
- Oid childOID;
+ Oid childOID;
RelOptInfo *childrel;
childrte = rt_fetch(childRTindex, root->rtable);
@@ -289,16 +292,18 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
* attach the RelOptInfo to the query's base_rel_list, however.
*
* NOTE: when childRTindex == parentRTindex, we create a second
- * RelOptInfo for the same relation. This RelOptInfo will represent
- * the parent table alone, whereas the original RelOptInfo represents
- * the union of the inheritance tree members.
+ * RelOptInfo for the same relation. This RelOptInfo will
+ * represent the parent table alone, whereas the original
+ * RelOptInfo represents the union of the inheritance tree
+ * members.
*/
childrel = make_base_rel(root, childRTindex);
/*
- * Copy the parent's targetlist and restriction quals to the child,
- * with attribute-number adjustment if needed. We don't bother
- * to copy the join quals, since we can't do any joining here.
+ * Copy the parent's targetlist and restriction quals to the
+ * child, with attribute-number adjustment if needed. We don't
+ * bother to copy the join quals, since we can't do any joining
+ * here.
*/
childrel->targetlist = (List *)
adjust_inherited_attrs((Node *) rel->targetlist,
@@ -328,8 +333,8 @@ set_inherited_rel_pathlist(Query *root, RelOptInfo *rel, RangeTblEntry *rte,
}
/*
- * Finally, build Append path and install it as the only access
- * path for the parent rel.
+ * Finally, build Append path and install it as the only access path
+ * for the parent rel.
*/
add_path(rel, (Path *) create_append_path(rel, subpaths));
@@ -350,9 +355,9 @@ make_fromexpr_rel(Query *root, FromExpr *from)
List *jt;
/*
- * Count the number of child jointree nodes. This is the depth
- * of the dynamic-programming algorithm we must employ to consider
- * all ways of joining the child nodes.
+ * Count the number of child jointree nodes. This is the depth of the
+ * dynamic-programming algorithm we must employ to consider all ways
+ * of joining the child nodes.
*/
levels_needed = length(from->fromlist);
@@ -374,6 +379,7 @@ make_fromexpr_rel(Query *root, FromExpr *from)
if (levels_needed == 1)
{
+
/*
* Single jointree node, so we're done.
*/
@@ -381,6 +387,7 @@ make_fromexpr_rel(Query *root, FromExpr *from)
}
else
{
+
/*
* Consider the different orders in which we could join the rels,
* using either GEQO or regular optimizer.
@@ -401,7 +408,7 @@ make_fromexpr_rel(Query *root, FromExpr *from)
* independent jointree items in the query. This is > 1.
*
* 'initial_rels' is a list of RelOptInfo nodes for each independent
- * jointree item. These are the components to be joined together.
+ * jointree item. These are the components to be joined together.
*
* Returns the final level of join relations, i.e., the relation that is
* the result of joining all the original relations together.
@@ -423,8 +430,8 @@ make_one_rel_by_joins(Query *root, int levels_needed, List *initial_rels)
* joinitems[j] is a list of all the j-item rels. Initially we set
* joinitems[1] to represent all the single-jointree-item relations.
*/
- joinitems = (List **) palloc((levels_needed+1) * sizeof(List *));
- MemSet(joinitems, 0, (levels_needed+1) * sizeof(List *));
+ joinitems = (List **) palloc((levels_needed + 1) * sizeof(List *));
+ MemSet(joinitems, 0, (levels_needed + 1) * sizeof(List *));
joinitems[1] = initial_rels;
diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c
index b404eabccdb..8493067f9a2 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/clausesel.c,v 1.41 2001/01/24 19:42:57 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/clausesel.c,v 1.42 2001/03/22 03:59:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -128,7 +128,8 @@ clauselist_selectivity(Query *root,
* behave in the simple way we are expecting.)
*
* NB: for consistency of results, this fragment of code had better
- * match what clause_selectivity() would do in the cases it handles.
+ * match what clause_selectivity() would do in the cases it
+ * handles.
*/
if (varRelid != 0 || NumRelids(clause) == 1)
{
@@ -148,7 +149,7 @@ clauselist_selectivity(Query *root,
get_leftop((Expr *) clause);
if (is_pseudo_constant_clause((Node *) other))
{
- Oid opno = ((Oper *) ((Expr *) clause)->oper)->opno;
+ Oid opno = ((Oper *) ((Expr *) clause)->oper)->opno;
RegProcedure oprrest = get_oprrest(opno);
if (!oprrest)
@@ -156,15 +157,16 @@ clauselist_selectivity(Query *root,
else
s2 = restriction_selectivity(oprrest, opno,
getrelid(relidx,
- root->rtable),
+ root->rtable),
attno,
constval, flag);
/*
- * If we reach here, we have computed the same result that
- * clause_selectivity would, so we can just use s2 if it's
- * the wrong oprrest. But if it's the right oprrest, add
- * the clause to rqlist for later processing.
+ * If we reach here, we have computed the same result
+ * that clause_selectivity would, so we can just use
+ * s2 if it's the wrong oprrest. But if it's the
+ * right oprrest, add the clause to rqlist for later
+ * processing.
*/
switch (oprrest)
{
@@ -384,18 +386,20 @@ clause_selectivity(Query *root,
if (rte->subquery)
{
+
/*
- * XXX not smart about subquery references...
- * any way to do better?
+ * XXX not smart about subquery references... any way to
+ * do better?
*/
s1 = 0.5;
}
else
{
+
/*
- * A Var at the top of a clause must be a bool Var.
- * This is equivalent to the clause reln.attribute = 't',
- * so we compute the selectivity as if that is what we have.
+ * A Var at the top of a clause must be a bool Var. This
+ * is equivalent to the clause reln.attribute = 't', so we
+ * compute the selectivity as if that is what we have.
*/
s1 = restriction_selectivity(F_EQSEL,
BooleanEqualOperator,
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 8e88e46d571..d5b343a90cf 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -41,7 +41,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.68 2001/02/16 00:03:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.69 2001/03/22 03:59:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,11 +67,11 @@
#define LOG6(x) (log(x) / 1.79175946922805)
-double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
-double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
-double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
-double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
-double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
+double effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
+double random_page_cost = DEFAULT_RANDOM_PAGE_COST;
+double cpu_tuple_cost = DEFAULT_CPU_TUPLE_COST;
+double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
+double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
Cost disable_cost = 100000000.0;
@@ -117,14 +117,14 @@ cost_seqscan(Path *path, RelOptInfo *baserel)
/*
* disk costs
*
- * The cost of reading a page sequentially is 1.0, by definition.
- * Note that the Unix kernel will typically do some amount of
- * read-ahead optimization, so that this cost is less than the
- * true cost of reading a page from disk. We ignore that issue
- * here, but must take it into account when estimating the cost of
- * non-sequential accesses!
+ * The cost of reading a page sequentially is 1.0, by definition. Note
+ * that the Unix kernel will typically do some amount of read-ahead
+ * optimization, so that this cost is less than the true cost of
+ * reading a page from disk. We ignore that issue here, but must take
+ * it into account when estimating the cost of non-sequential
+ * accesses!
*/
- run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
+ run_cost += baserel->pages; /* sequential fetches with cost 1.0 */
/* CPU costs */
cpu_per_tuple = cpu_tuple_cost + baserel->baserestrictcost;
@@ -600,12 +600,12 @@ cost_hashjoin(Path *path,
/*
* The number of tuple comparisons needed is the number of outer
* tuples times the typical hash bucket size. nodeHash.c tries for
- * average bucket loading of NTUP_PER_BUCKET, but that goal will
- * be reached only if data values are uniformly distributed among
- * the buckets. To be conservative, we scale up the target bucket
- * size by the number of inner rows times inner dispersion, giving
- * an estimate of the typical number of duplicates of each value.
- * We then charge one cpu_operator_cost per tuple comparison.
+ * average bucket loading of NTUP_PER_BUCKET, but that goal will be
+ * reached only if data values are uniformly distributed among the
+ * buckets. To be conservative, we scale up the target bucket size by
+ * the number of inner rows times inner dispersion, giving an estimate
+ * of the typical number of duplicates of each value. We then charge
+ * one cpu_operator_cost per tuple comparison.
*/
run_cost += cpu_operator_cost * outer_path->parent->rows *
NTUP_PER_BUCKET * ceil(inner_path->parent->rows * innerdispersion);
@@ -672,7 +672,7 @@ cost_qual_eval(List *quals)
foreach(l, quals)
{
- Node *qual = (Node *) lfirst(l);
+ Node *qual = (Node *) lfirst(l);
/*
* RestrictInfo nodes contain an eval_cost field reserved for this
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index ed5a53db0b9..064a2fafa50 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.102 2001/02/16 03:16:57 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.103 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -183,8 +183,8 @@ create_index_paths(Query *root,
restrictinfo_list);
/*
- * 3. Compute pathkeys describing index's ordering, if any,
- * then see how many of them are actually useful for this query.
+ * 3. Compute pathkeys describing index's ordering, if any, then
+ * see how many of them are actually useful for this query.
*/
index_pathkeys = build_index_pathkeys(root, rel, index,
ForwardScanDirection);
@@ -207,8 +207,9 @@ create_index_paths(Query *root,
NoMovementScanDirection));
/*
- * 5. If the index is ordered, a backwards scan might be interesting.
- * Currently this is only possible for a DESC query result ordering.
+ * 5. If the index is ordered, a backwards scan might be
+ * interesting. Currently this is only possible for a DESC query
+ * result ordering.
*/
if (index_is_ordered)
{
@@ -422,10 +423,11 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
if (and_clause((Node *) orsubclause))
{
+
/*
- * Extract relevant sub-subclauses in indexkey order. This is just
- * like group_clauses_by_indexkey() except that the input and output
- * are lists of bare clauses, not of RestrictInfo nodes.
+ * Extract relevant sub-subclauses in indexkey order. This is
+ * just like group_clauses_by_indexkey() except that the input and
+ * output are lists of bare clauses, not of RestrictInfo nodes.
*/
int *indexkeys = index->indexkeys;
Oid *classes = index->classlist;
@@ -446,8 +448,8 @@ extract_or_indexqual_conditions(RelOptInfo *rel,
}
/*
- * If no clauses match this key, we're done; we don't want to look
- * at keys to its right.
+ * If no clauses match this key, we're done; we don't want to
+ * look at keys to its right.
*/
if (clausegroup == NIL)
break;
@@ -748,8 +750,8 @@ match_clause_to_indexkey(RelOptInfo *rel,
/*
* Check for an indexqual that could be handled by a nestloop
* join. We need the index key to be compared against an
- * expression that uses none of the indexed relation's vars
- * and contains no non-cachable functions.
+ * expression that uses none of the indexed relation's vars and
+ * contains no non-cachable functions.
*/
if (match_index_to_operand(indexkey, leftop, rel, index))
{
@@ -793,7 +795,7 @@ match_clause_to_indexkey(RelOptInfo *rel,
* recognizing binary-compatible datatypes. For example, if we have
* an expression like "oid = 123", the operator will be oideqint4,
* which we need to replace with oideq in order to recognize it as
- * matching an oid_ops index on the oid field. A variant case is where
+ * matching an oid_ops index on the oid field. A variant case is where
* the expression is like "oid::int4 = 123", where the given operator
* will be int4eq and again we need to intuit that we want to use oideq.
*
@@ -832,13 +834,13 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
/*
* Maybe the index uses a binary-compatible operator set.
*
- * Get the nominal input types of the given operator and the actual
- * type (before binary-compatible relabeling) of the index key.
+ * Get the nominal input types of the given operator and the actual type
+ * (before binary-compatible relabeling) of the index key.
*/
oldoptup = SearchSysCache(OPEROID,
ObjectIdGetDatum(expr_op),
0, 0, 0);
- if (! HeapTupleIsValid(oldoptup))
+ if (!HeapTupleIsValid(oldoptup))
return InvalidOid; /* probably can't happen */
oldopform = (Form_pg_operator) GETSTRUCT(oldoptup);
opname = pstrdup(NameStr(oldopform->oprname));
@@ -848,7 +850,7 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
if (indexkey_on_left)
{
- Node *leftop = (Node *) get_leftop(clause);
+ Node *leftop = (Node *) get_leftop(clause);
if (leftop && IsA(leftop, RelabelType))
leftop = ((RelabelType *) leftop)->arg;
@@ -856,7 +858,7 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
}
else
{
- Node *rightop = (Node *) get_rightop(clause);
+ Node *rightop = (Node *) get_rightop(clause);
if (rightop && IsA(rightop, RelabelType))
rightop = ((RelabelType *) rightop)->arg;
@@ -874,9 +876,10 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
return InvalidOid;
/*
- * OK, look for operator of the same name with the indexkey's data type.
- * (In theory this might find a non-semantically-comparable operator,
- * but in practice that seems pretty unlikely for binary-compatible types.)
+ * OK, look for operator of the same name with the indexkey's data
+ * type. (In theory this might find a non-semantically-comparable
+ * operator, but in practice that seems pretty unlikely for
+ * binary-compatible types.)
*/
new_op = compatible_oper_opid(opname, indexkeytype, indexkeytype, true);
@@ -886,8 +889,8 @@ indexable_operator(Expr *clause, Oid opclass, Oid relam,
{
/*
- * OK, we found a binary-compatible operator of the same
- * name; now does it match the index?
+ * OK, we found a binary-compatible operator of the same name;
+ * now does it match the index?
*/
if (indexkey_on_left)
commuted_op = new_op;
@@ -1491,8 +1494,9 @@ match_index_to_operand(int indexkey,
RelOptInfo *rel,
IndexOptInfo *index)
{
+
/*
- * Ignore any RelabelType node above the indexkey. This is needed to
+ * Ignore any RelabelType node above the indexkey. This is needed to
* be able to apply indexscanning in binary-compatible-operator cases.
* Note: we can assume there is at most one RelabelType node;
* eval_const_expressions() will have simplified if more than one.
@@ -1670,7 +1674,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1687,7 +1691,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Like_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1704,7 +1708,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1721,7 +1725,7 @@ match_special_index_operator(Expr *clause, Oid opclass, Oid relam,
patt = DatumGetCString(DirectFunctionCall1(textout,
constvalue));
isIndexable = pattern_fixed_prefix(patt, Pattern_Type_Regex_IC,
- &prefix, &rest) != Pattern_Prefix_None;
+ &prefix, &rest) != Pattern_Prefix_None;
if (prefix)
pfree(prefix);
pfree(patt);
@@ -1983,8 +1987,8 @@ prefix_quals(Var *leftop, Oid expr_op,
result = makeList1(expr);
/*
- * If we can create a string larger than the prefix, we can say
- * "x < greaterstr".
+ * If we can create a string larger than the prefix, we can say "x <
+ * greaterstr".
*/
greaterstr = make_greater_string(prefix, datatype);
if (greaterstr)
@@ -2025,6 +2029,7 @@ find_operator(const char *opname, Oid datatype)
static Datum
string_to_datum(const char *str, Oid datatype)
{
+
/*
* We cheat a little by assuming that textin() will do for bpchar and
* varchar constants too...
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index cfbfb56c902..bfd246388b4 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.61 2001/01/24 19:42:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.62 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -25,32 +25,32 @@
#include "utils/lsyscache.h"
static void sort_inner_and_outer(Query *root, RelOptInfo *joinrel,
- RelOptInfo *outerrel, RelOptInfo *innerrel,
- List *restrictlist, List *mergeclause_list,
- JoinType jointype);
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ List *restrictlist, List *mergeclause_list,
+ JoinType jointype);
static void match_unsorted_outer(Query *root, RelOptInfo *joinrel,
- RelOptInfo *outerrel, RelOptInfo *innerrel,
- List *restrictlist, List *mergeclause_list,
- JoinType jointype);
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ List *restrictlist, List *mergeclause_list,
+ JoinType jointype);
#ifdef NOT_USED
static void match_unsorted_inner(Query *root, RelOptInfo *joinrel,
- RelOptInfo *outerrel, RelOptInfo *innerrel,
- List *restrictlist, List *mergeclause_list,
- JoinType jointype);
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ List *restrictlist, List *mergeclause_list,
+ JoinType jointype);
#endif
static void hash_inner_and_outer(Query *root, RelOptInfo *joinrel,
- RelOptInfo *outerrel, RelOptInfo *innerrel,
- List *restrictlist, JoinType jointype);
+ RelOptInfo *outerrel, RelOptInfo *innerrel,
+ List *restrictlist, JoinType jointype);
static Path *best_innerjoin(List *join_paths, List *outer_relid,
- JoinType jointype);
+ JoinType jointype);
static Selectivity estimate_dispersion(Query *root, Var *var);
static List *select_mergejoin_clauses(RelOptInfo *joinrel,
- RelOptInfo *outerrel,
- RelOptInfo *innerrel,
- List *restrictlist,
- JoinType jointype);
+ RelOptInfo *outerrel,
+ RelOptInfo *innerrel,
+ List *restrictlist,
+ JoinType jointype);
/*
@@ -160,26 +160,27 @@ sort_inner_and_outer(Query *root,
* generate a differently-sorted result path at essentially the same
* cost. We have no basis for choosing one over another at this level
* of joining, but some sort orders may be more useful than others for
- * higher-level mergejoins, so it's worth considering multiple orderings.
+ * higher-level mergejoins, so it's worth considering multiple
+ * orderings.
*
* Actually, it's not quite true that every mergeclause ordering will
* generate a different path order, because some of the clauses may be
- * redundant. Therefore, what we do is convert the mergeclause list to
- * a list of canonical pathkeys, and then consider different orderings
- * of the pathkeys.
+ * redundant. Therefore, what we do is convert the mergeclause list
+ * to a list of canonical pathkeys, and then consider different
+ * orderings of the pathkeys.
*
- * Generating a path for *every* permutation of the pathkeys doesn't
- * seem like a winning strategy; the cost in planning time is too high.
- * For now, we generate one path for each pathkey, listing that pathkey
- * first and the rest in random order. This should allow at
- * least a one-clause mergejoin without re-sorting against any other
- * possible mergejoin partner path. But if we've not guessed the
- * right ordering of secondary keys, we may end up evaluating
- * clauses as qpquals when they could have been done as mergeclauses.
- * We need to figure out a better way. (Two possible approaches: look
- * at all the relevant index relations to suggest plausible sort
- * orders, or make just one output path and somehow mark it as having
- * a sort-order that can be rearranged freely.)
+ * Generating a path for *every* permutation of the pathkeys doesn't seem
+ * like a winning strategy; the cost in planning time is too high. For
+ * now, we generate one path for each pathkey, listing that pathkey
+ * first and the rest in random order. This should allow at least a
+ * one-clause mergejoin without re-sorting against any other possible
+ * mergejoin partner path. But if we've not guessed the right
+ * ordering of secondary keys, we may end up evaluating clauses as
+ * qpquals when they could have been done as mergeclauses. We need to
+ * figure out a better way. (Two possible approaches: look at all the
+ * relevant index relations to suggest plausible sort orders, or make
+ * just one output path and somehow mark it as having a sort-order
+ * that can be rearranged freely.)
*/
all_pathkeys = make_pathkeys_for_mergeclauses(root,
mergeclause_list,
@@ -200,16 +201,17 @@ sort_inner_and_outer(Query *root,
lremove(front_pathkey,
listCopy(all_pathkeys)));
else
- cur_pathkeys = all_pathkeys; /* no work at first one... */
+ cur_pathkeys = all_pathkeys; /* no work at first one... */
/*
* Select mergeclause(s) that match this sort ordering. If we had
- * redundant merge clauses then we will get a subset of the original
- * clause list. There had better be some match, however...
+ * redundant merge clauses then we will get a subset of the
+ * original clause list. There had better be some match,
+ * however...
*/
cur_mergeclauses = find_mergeclauses_for_pathkeys(root,
cur_pathkeys,
- mergeclause_list);
+ mergeclause_list);
Assert(cur_mergeclauses != NIL);
/*
@@ -334,10 +336,12 @@ match_unsorted_outer(Query *root,
if (nestjoinOK)
{
+
/*
- * Always consider a nestloop join with this outer and cheapest-
- * total-cost inner. Consider nestloops using the cheapest-
- * startup-cost inner as well, and the best innerjoin indexpath.
+ * Always consider a nestloop join with this outer and
+ * cheapest- total-cost inner. Consider nestloops using the
+ * cheapest- startup-cost inner as well, and the best
+ * innerjoin indexpath.
*/
add_path(joinrel, (Path *)
create_nestloop_path(joinrel,
@@ -352,7 +356,7 @@ match_unsorted_outer(Query *root,
create_nestloop_path(joinrel,
jointype,
outerpath,
- innerrel->cheapest_startup_path,
+ innerrel->cheapest_startup_path,
restrictlist,
merge_pathkeys));
if (bestinnerjoin != NULL)
@@ -382,8 +386,8 @@ match_unsorted_outer(Query *root,
/*
* Generate a mergejoin on the basis of sorting the cheapest
* inner. Since a sort will be needed, only cheapest total cost
- * matters. (But create_mergejoin_path will do the right thing
- * if innerrel->cheapest_total_path is already correctly sorted.)
+ * matters. (But create_mergejoin_path will do the right thing if
+ * innerrel->cheapest_total_path is already correctly sorted.)
*/
add_path(joinrel, (Path *)
create_mergejoin_path(joinrel,
@@ -400,13 +404,14 @@ match_unsorted_outer(Query *root,
* Look for presorted inner paths that satisfy the innersortkey
* list or any truncation thereof. Here, we consider both cheap
* startup cost and cheap total cost. Ignore
- * innerrel->cheapest_total_path, since we already made a path with it.
+ * innerrel->cheapest_total_path, since we already made a path
+ * with it.
*/
num_sortkeys = length(innersortkeys);
if (num_sortkeys > 1)
- trialsortkeys = listCopy(innersortkeys); /* need modifiable copy */
+ trialsortkeys = listCopy(innersortkeys); /* need modifiable copy */
else
- trialsortkeys = innersortkeys; /* won't really truncate */
+ trialsortkeys = innersortkeys; /* won't really truncate */
cheapest_startup_inner = NULL;
cheapest_total_inner = NULL;
@@ -417,8 +422,8 @@ match_unsorted_outer(Query *root,
/*
* Look for an inner path ordered well enough for the first
- * 'sortkeycnt' innersortkeys. NB: trialsortkeys list
- * is modified destructively, which is why we made a copy...
+ * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is
+ * modified destructively, which is why we made a copy...
*/
trialsortkeys = ltruncate(sortkeycnt, trialsortkeys);
innerpath = get_cheapest_path_for_pathkeys(innerrel->pathlist,
@@ -478,8 +483,8 @@ match_unsorted_outer(Query *root,
{
newclauses =
find_mergeclauses_for_pathkeys(root,
- trialsortkeys,
- mergeclauses);
+ trialsortkeys,
+ mergeclauses);
Assert(newclauses != NIL);
}
else
@@ -601,7 +606,7 @@ match_unsorted_inner(Query *root,
if (startupouterpath != NULL && startupouterpath != totalouterpath)
{
merge_pathkeys = build_join_pathkeys(root, joinrel,
- startupouterpath->pathkeys);
+ startupouterpath->pathkeys);
add_path(joinrel, (Path *)
create_mergejoin_path(joinrel,
jointype,
@@ -696,8 +701,8 @@ hash_inner_and_outer(Query *root,
* estimate dispersion of inner var for costing purposes.
*
* Since we tend to visit the same clauses over and over when
- * planning a large query, we cache the dispersion estimates in the
- * RestrictInfo node to avoid repeated lookups of statistics.
+ * planning a large query, we cache the dispersion estimates in
+ * the RestrictInfo node to avoid repeated lookups of statistics.
*/
if (intMember(left->varno, outerrelids) &&
intMember(right->varno, innerrelids))
@@ -793,13 +798,13 @@ best_innerjoin(List *join_paths, Relids outer_relids, JoinType jointype)
foreach(join_path, join_paths)
{
- IndexPath *path = (IndexPath *) lfirst(join_path);
+ IndexPath *path = (IndexPath *) lfirst(join_path);
Assert(IsA(path, IndexPath));
/*
- * If processing an outer join, only use explicit join clauses in the
- * inner indexscan. For inner joins we need not be so picky.
+ * If processing an outer join, only use explicit join clauses in
+ * the inner indexscan. For inner joins we need not be so picky.
*/
if (isouterjoin && !path->alljoinquals)
continue;
@@ -879,15 +884,15 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
*right;
/*
- * If processing an outer join, only use its own join clauses in the
- * merge. For inner joins we need not be so picky.
+ * If processing an outer join, only use its own join clauses in
+ * the merge. For inner joins we need not be so picky.
*
* Furthermore, if it is a right/full join then *all* the explicit
- * join clauses must be mergejoinable, else the executor will fail.
- * If we are asked for a right join then just return NIL to indicate
- * no mergejoin is possible (we can handle it as a left join instead).
- * If we are asked for a full join then emit an error, because there
- * is no fallback.
+ * join clauses must be mergejoinable, else the executor will
+ * fail. If we are asked for a right join then just return NIL to
+ * indicate no mergejoin is possible (we can handle it as a left
+ * join instead). If we are asked for a full join then emit an
+ * error, because there is no fallback.
*/
if (isouterjoin)
{
@@ -897,7 +902,7 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
{
case JOIN_RIGHT:
if (restrictinfo->mergejoinoperator == InvalidOid)
- return NIL; /* not mergejoinable */
+ return NIL; /* not mergejoinable */
break;
case JOIN_FULL:
if (restrictinfo->mergejoinoperator == InvalidOid)
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 2492f17ea9b..929a977112d 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.51 2001/02/16 00:03:07 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.52 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
static RelOptInfo *make_join_rel(Query *root, RelOptInfo *rel1,
- RelOptInfo *rel2, JoinType jointype);
+ RelOptInfo *rel2, JoinType jointype);
/*
@@ -44,18 +44,19 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/*
* First, consider left-sided and right-sided plans, in which rels of
- * exactly level-1 member relations are joined against initial relations.
- * We prefer to join using join clauses, but if we find a rel of level-1
- * members that has no join clauses, we will generate Cartesian-product
- * joins against all initial rels not already contained in it.
+ * exactly level-1 member relations are joined against initial
+ * relations. We prefer to join using join clauses, but if we find a
+ * rel of level-1 members that has no join clauses, we will generate
+ * Cartesian-product joins against all initial rels not already
+ * contained in it.
*
- * In the first pass (level == 2), we try to join each initial rel to each
- * initial rel that appears later in joinrels[1]. (The mirror-image
- * joins are handled automatically by make_join_rel.) In later
- * passes, we try to join rels of size level-1 from joinrels[level-1]
- * to each initial rel in joinrels[1].
+ * In the first pass (level == 2), we try to join each initial rel to
+ * each initial rel that appears later in joinrels[1]. (The
+ * mirror-image joins are handled automatically by make_join_rel.) In
+ * later passes, we try to join rels of size level-1 from
+ * joinrels[level-1] to each initial rel in joinrels[1].
*/
- foreach(r, joinrels[level-1])
+ foreach(r, joinrels[level - 1])
{
RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
List *other_rels;
@@ -73,9 +74,9 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
* Note that if all available join clauses for this rel
* require more than one other rel, we will fail to make any
* joins against it here. That's OK; it'll be considered by
- * "bushy plan" join code in a higher-level pass where we
- * have those other rels collected into a join rel. See also
- * the last-ditch case below.
+ * "bushy plan" join code in a higher-level pass where we have
+ * those other rels collected into a join rel. See also the
+ * last-ditch case below.
*/
new_rels = make_rels_by_clause_joins(root,
old_rel,
@@ -94,16 +95,16 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
}
/*
- * At levels above 2 we will generate the same joined relation
- * in multiple ways --- for example (a join b) join c is the same
+ * At levels above 2 we will generate the same joined relation in
+ * multiple ways --- for example (a join b) join c is the same
* RelOptInfo as (b join c) join a, though the second case will
- * add a different set of Paths to it. To avoid making extra work
- * for subsequent passes, do not enter the same RelOptInfo into our
- * output list multiple times.
+ * add a different set of Paths to it. To avoid making extra work
+ * for subsequent passes, do not enter the same RelOptInfo into
+ * our output list multiple times.
*/
foreach(nr, new_rels)
{
- RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
+ RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
if (!ptrMember(jrel, result_rels))
result_rels = lcons(jrel, result_rels);
@@ -111,20 +112,21 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
}
/*
- * Now, consider "bushy plans" in which relations of k initial rels are
- * joined to relations of level-k initial rels, for 2 <= k <= level-2.
+ * Now, consider "bushy plans" in which relations of k initial rels
+ * are joined to relations of level-k initial rels, for 2 <= k <=
+ * level-2.
*
* We only consider bushy-plan joins for pairs of rels where there is a
* suitable join clause, in order to avoid unreasonable growth of
* planning time.
*/
- for (k = 2; ; k++)
+ for (k = 2;; k++)
{
int other_level = level - k;
/*
- * Since make_join_rel(x, y) handles both x,y and y,x cases,
- * we only need to go as far as the halfway point.
+ * Since make_join_rel(x, y) handles both x,y and y,x cases, we
+ * only need to go as far as the halfway point.
*/
if (k > other_level)
break;
@@ -139,7 +141,7 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
continue; /* we ignore clauseless joins here */
if (k == other_level)
- other_rels = lnext(r); /* only consider remaining rels */
+ other_rels = lnext(r); /* only consider remaining rels */
else
other_rels = joinrels[other_level];
@@ -153,8 +155,8 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/*
* OK, we can build a rel of the right level from this
- * pair of rels. Do so if there is at least one usable
- * join clause.
+ * pair of rels. Do so if there is at least one
+ * usable join clause.
*/
foreach(i, old_rel->joininfo)
{
@@ -170,7 +172,8 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/* Avoid making duplicate entries ... */
if (!ptrMember(jrel, result_rels))
result_rels = lcons(jrel, result_rels);
- break; /* need not consider more joininfos */
+ break; /* need not consider more
+ * joininfos */
}
}
}
@@ -180,31 +183,34 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
/*
* Last-ditch effort: if we failed to find any usable joins so far,
- * force a set of cartesian-product joins to be generated. This
+ * force a set of cartesian-product joins to be generated. This
* handles the special case where all the available rels have join
- * clauses but we cannot use any of the joins yet. An example is
+ * clauses but we cannot use any of the joins yet. An example is
*
* SELECT * FROM a,b,c WHERE (a.f1 + b.f2 + c.f3) = 0;
*
- * The join clause will be usable at level 3, but at level 2 we have
- * no choice but to make cartesian joins. We consider only left-sided
+ * The join clause will be usable at level 3, but at level 2 we have no
+ * choice but to make cartesian joins. We consider only left-sided
* and right-sided cartesian joins in this case (no bushy).
*/
if (result_rels == NIL)
{
- /* This loop is just like the first one, except we always call
+
+ /*
+ * This loop is just like the first one, except we always call
* make_rels_by_clauseless_joins().
*/
- foreach(r, joinrels[level-1])
+ foreach(r, joinrels[level - 1])
{
RelOptInfo *old_rel = (RelOptInfo *) lfirst(r);
List *other_rels;
if (level == 2)
- other_rels = lnext(r); /* only consider remaining initial
- * rels */
+ other_rels = lnext(r); /* only consider remaining initial
+ * rels */
else
- other_rels = joinrels[1]; /* consider all initial rels */
+ other_rels = joinrels[1]; /* consider all initial
+ * rels */
new_rels = make_rels_by_clauseless_joins(root,
old_rel,
@@ -212,7 +218,7 @@ make_rels_by_joins(Query *root, int level, List **joinrels)
foreach(nr, new_rels)
{
- RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
+ RelOptInfo *jrel = (RelOptInfo *) lfirst(nr);
if (!ptrMember(jrel, result_rels))
result_rels = lcons(jrel, result_rels);
@@ -266,6 +272,7 @@ make_rels_by_clause_joins(Query *root,
RelOptInfo *jrel;
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
+
/*
* Avoid entering same joinrel into our output list more
* than once. (make_rels_by_joins doesn't really care,
@@ -310,9 +317,10 @@ make_rels_by_clauseless_joins(Query *root,
RelOptInfo *jrel;
jrel = make_join_rel(root, old_rel, other_rel, JOIN_INNER);
+
/*
- * As long as given other_rels are distinct, don't need
- * to test to see if jrel is already part of output list.
+ * As long as given other_rels are distinct, don't need to
+ * test to see if jrel is already part of output list.
*/
result = lcons(jrel, result);
}
@@ -325,7 +333,7 @@ make_rels_by_clauseless_joins(Query *root,
/*
* make_jointree_rel
* Find or build a RelOptInfojoin rel representing a specific
- * jointree item. For JoinExprs, we only consider the construction
+ * jointree item. For JoinExprs, we only consider the construction
* path that corresponds exactly to what the user wrote.
*/
RelOptInfo *
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index e96a96f6deb..f93a027cd53 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.30 2001/01/24 19:42:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/pathkeys.c,v 1.31 2001/03/22 03:59:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -31,7 +31,7 @@
static PathKeyItem *makePathKeyItem(Node *key, Oid sortop);
static List *make_canonical_pathkey(Query *root, PathKeyItem *item);
static Var *find_indexkey_var(Query *root, RelOptInfo *rel,
- AttrNumber varattno);
+ AttrNumber varattno);
/*
@@ -89,10 +89,10 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
* into our new set. When done, we add the new set to the front of
* equi_key_list.
*
- * It may well be that the two items we're given are already known to
- * be equijoin-equivalent, in which case we don't need to change our
- * data structure. If we find both of them in the same equivalence
- * set to start with, we can quit immediately.
+ * It may well be that the two items we're given are already known to be
+ * equijoin-equivalent, in which case we don't need to change our data
+ * structure. If we find both of them in the same equivalence set to
+ * start with, we can quit immediately.
*
* This is a standard UNION-FIND problem, for which there exist better
* data structures than simple lists. If this code ever proves to be
@@ -109,7 +109,11 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
if (item1here || item2here)
{
- /* If find both in same equivalence set, no need to do any more */
+
+ /*
+ * If find both in same equivalence set, no need to do any
+ * more
+ */
if (item1here && item2here)
{
/* Better not have seen only one in an earlier set... */
@@ -126,7 +130,8 @@ add_equijoined_keys(Query *root, RestrictInfo *restrictinfo)
/*
* Remove old set from equi_key_list. NOTE this does not
- * change lnext(cursetlink), so the foreach loop doesn't break.
+ * change lnext(cursetlink), so the foreach loop doesn't
+ * break.
*/
root->equi_key_list = lremove(curset, root->equi_key_list);
freeList(curset); /* might as well recycle old cons cells */
@@ -171,8 +176,8 @@ generate_implied_equalities(Query *root)
continue;
/*
- * Match each item in the set with all that appear after it
- * (it's sufficient to generate A=B, need not process B=A too).
+ * Match each item in the set with all that appear after it (it's
+ * sufficient to generate A=B, need not process B=A too).
*/
foreach(ptr1, curset)
{
@@ -246,11 +251,12 @@ canonicalize_pathkeys(Query *root, List *pathkeys)
Assert(pathkey != NIL);
item = (PathKeyItem *) lfirst(pathkey);
cpathkey = make_canonical_pathkey(root, item);
+
/*
- * Eliminate redundant ordering requests --- ORDER BY A,A
- * is the same as ORDER BY A. We want to check this only
- * after we have canonicalized the keys, so that equivalent-key
- * knowledge is used when deciding if an item is redundant.
+ * Eliminate redundant ordering requests --- ORDER BY A,A is the
+ * same as ORDER BY A. We want to check this only after we have
+ * canonicalized the keys, so that equivalent-key knowledge is
+ * used when deciding if an item is redundant.
*/
if (!ptrMember(cpathkey, new_pathkeys))
new_pathkeys = lappend(new_pathkeys, cpathkey);
@@ -285,8 +291,8 @@ compare_pathkeys(List *keys1, List *keys2)
List *subkey2 = lfirst(key2);
/*
- * XXX would like to check that we've been given canonicalized input,
- * but query root not accessible here...
+ * XXX would like to check that we've been given canonicalized
+ * input, but query root not accessible here...
*/
#ifdef NOT_USED
Assert(ptrMember(subkey1, root->equi_key_list));
@@ -295,7 +301,7 @@ compare_pathkeys(List *keys1, List *keys2)
/*
* We will never have two subkeys where one is a subset of the
- * other, because of the canonicalization process. Either they
+ * other, because of the canonicalization process. Either they
* are equal or they ain't. Furthermore, we only need pointer
* comparison to detect equality.
*/
@@ -555,9 +561,10 @@ build_index_pathkeys(Query *root,
/* OK, make a sublist for this sort key */
item = makePathKeyItem((Node *) relvar, sortop);
cpathkey = make_canonical_pathkey(root, item);
+
/*
- * Eliminate redundant ordering info; could happen if query
- * is such that index keys are equijoined...
+ * Eliminate redundant ordering info; could happen if query is
+ * such that index keys are equijoined...
*/
if (!ptrMember(cpathkey, retval))
retval = lappend(retval, cpathkey);
@@ -693,7 +700,7 @@ make_pathkeys_for_sortclauses(List *sortclauses,
*
* RestrictInfo contains fields in which we may cache the result
* of looking up the canonical pathkeys for the left and right sides
- * of the mergeclause. (Note that in normal cases they will be the
+ * of the mergeclause. (Note that in normal cases they will be the
* same, but not if the mergeclause appears above an OUTER JOIN.)
* This is a worthwhile savings because these routines will be invoked
* many times when dealing with a many-relation query.
@@ -756,8 +763,8 @@ find_mergeclauses_for_pathkeys(Query *root,
/*
* We can match a pathkey against either left or right side of any
* mergejoin clause we haven't used yet. For the moment we use a
- * dumb "greedy" algorithm with no backtracking. Is it worth being
- * any smarter to make a longer list of usable mergeclauses?
+ * dumb "greedy" algorithm with no backtracking. Is it worth
+ * being any smarter to make a longer list of usable mergeclauses?
* Probably not.
*/
foreach(j, restrictinfos)
@@ -765,9 +772,10 @@ find_mergeclauses_for_pathkeys(Query *root,
RestrictInfo *restrictinfo = lfirst(j);
cache_mergeclause_pathkeys(root, restrictinfo);
+
/*
- * We can compare canonical pathkey sublists by simple
- * pointer equality; see compare_pathkeys.
+ * We can compare canonical pathkey sublists by simple pointer
+ * equality; see compare_pathkeys.
*/
if ((pathkey == restrictinfo->left_pathkey ||
pathkey == restrictinfo->right_pathkey) &&
@@ -830,7 +838,7 @@ make_pathkeys_for_mergeclauses(Query *root,
cache_mergeclause_pathkeys(root, restrictinfo);
key = (Node *) get_leftop(restrictinfo->clause);
- if (IsA(key, Var) && intMember(((Var *) key)->varno, rel->relids))
+ if (IsA(key, Var) &&intMember(((Var *) key)->varno, rel->relids))
{
/* Rel is left side of mergeclause */
pathkey = restrictinfo->left_pathkey;
@@ -838,7 +846,7 @@ make_pathkeys_for_mergeclauses(Query *root,
else
{
key = (Node *) get_rightop(restrictinfo->clause);
- if (IsA(key, Var) && intMember(((Var *) key)->varno, rel->relids))
+ if (IsA(key, Var) &&intMember(((Var *) key)->varno, rel->relids))
{
/* Rel is right side of mergeclause */
pathkey = restrictinfo->right_pathkey;
@@ -851,13 +859,14 @@ make_pathkeys_for_mergeclauses(Query *root,
}
/*
- * When we are given multiple merge clauses, it's possible that some
- * clauses refer to the same vars as earlier clauses. There's no
- * reason for us to specify sort keys like (A,B,A) when (A,B) will
- * do --- and adding redundant sort keys makes add_path think that
- * this sort order is different from ones that are really the same,
- * so don't do it. Since we now have a canonicalized pathkey,
- * a simple ptrMember test is sufficient to detect redundant keys.
+ * When we are given multiple merge clauses, it's possible that
+ * some clauses refer to the same vars as earlier clauses.
+ * There's no reason for us to specify sort keys like (A,B,A) when
+ * (A,B) will do --- and adding redundant sort keys makes add_path
+ * think that this sort order is different from ones that are
+ * really the same, so don't do it. Since we now have a
+ * canonicalized pathkey, a simple ptrMember test is sufficient to
+ * detect redundant keys.
*/
if (!ptrMember(pathkey, pathkeys))
pathkeys = lappend(pathkeys, pathkey);
@@ -911,6 +920,7 @@ pathkeys_useful_for_merging(Query *root, RelOptInfo *rel, List *pathkeys)
if (restrictinfo->mergejoinoperator == InvalidOid)
continue;
cache_mergeclause_pathkeys(root, restrictinfo);
+
/*
* We can compare canonical pathkey sublists by simple
* pointer equality; see compare_pathkeys.
@@ -984,7 +994,9 @@ truncate_useless_pathkeys(Query *root,
nuseful2 = pathkeys_useful_for_ordering(root, pathkeys);
if (nuseful2 > nuseful)
nuseful = nuseful2;
- /* Note: not safe to modify input list destructively, but we can avoid
+
+ /*
+ * Note: not safe to modify input list destructively, but we can avoid
* copying the list if we're not actually going to change it
*/
if (nuseful == length(pathkeys))
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 738b696306a..8c3b00289d3 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.103 2001/01/24 19:42:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.104 2001/03/22 03:59:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -42,19 +42,19 @@ static IndexScan *create_indexscan_plan(Query *root, IndexPath *best_path,
static TidScan *create_tidscan_plan(TidPath *best_path, List *tlist,
List *scan_clauses);
static SubqueryScan *create_subqueryscan_plan(Path *best_path,
- List *tlist, List *scan_clauses);
+ List *tlist, List *scan_clauses);
static NestLoop *create_nestloop_plan(NestPath *best_path, List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *outer_plan, List *outer_tlist,
- Plan *inner_plan, List *inner_tlist);
+ List *joinclauses, List *otherclauses,
+ Plan *outer_plan, List *outer_tlist,
+ Plan *inner_plan, List *inner_tlist);
static MergeJoin *create_mergejoin_plan(MergePath *best_path, List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *outer_plan, List *outer_tlist,
- Plan *inner_plan, List *inner_tlist);
+ List *joinclauses, List *otherclauses,
+ Plan *outer_plan, List *outer_tlist,
+ Plan *inner_plan, List *inner_tlist);
static HashJoin *create_hashjoin_plan(HashPath *best_path, List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *outer_plan, List *outer_tlist,
- Plan *inner_plan, List *inner_tlist);
+ List *joinclauses, List *otherclauses,
+ Plan *outer_plan, List *outer_tlist,
+ Plan *inner_plan, List *inner_tlist);
static List *fix_indxqual_references(List *indexquals, IndexPath *index_path);
static List *fix_indxqual_sublist(List *indexqual, int baserelid, Oid relam,
Form_pg_index index);
@@ -72,20 +72,20 @@ static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid,
static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
List *tideval);
static NestLoop *make_nestloop(List *tlist,
- List *joinclauses, List *otherclauses,
- Plan *lefttree, Plan *righttree,
- JoinType jointype);
+ List *joinclauses, List *otherclauses,
+ Plan *lefttree, Plan *righttree,
+ JoinType jointype);
static HashJoin *make_hashjoin(List *tlist,
- List *joinclauses, List *otherclauses,
- List *hashclauses,
- Plan *lefttree, Plan *righttree,
- JoinType jointype);
+ List *joinclauses, List *otherclauses,
+ List *hashclauses,
+ Plan *lefttree, Plan *righttree,
+ JoinType jointype);
static Hash *make_hash(List *tlist, Node *hashkey, Plan *lefttree);
static MergeJoin *make_mergejoin(List *tlist,
-