Location via proxy:   [ UP ]  
[Report a bug]   [Manage cookies]                
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruce Momjian2000-04-12 17:17:23 +0000
committerBruce Momjian2000-04-12 17:17:23 +0000
commit52f77df613cea1803ce86321c37229626d9f213c (patch)
treebd9ac9f667f295cb65f4c448a5bb5a062d656b27 /src/backend/access
parentdb4518729d85da83eafdacbcebaeb12618517595 (diff)
Ye-old pgindent run. Same 4-space tabs.
Diffstat (limited to 'src/backend/access')
-rw-r--r--src/backend/access/common/heaptuple.c24
-rw-r--r--src/backend/access/common/indextuple.c6
-rw-r--r--src/backend/access/common/tupdesc.c31
-rw-r--r--src/backend/access/gist/gist.c26
-rw-r--r--src/backend/access/gist/gistscan.c2
-rw-r--r--src/backend/access/hash/hash.c22
-rw-r--r--src/backend/access/hash/hashfunc.c8
-rw-r--r--src/backend/access/hash/hashscan.c4
-rw-r--r--src/backend/access/hash/hashsearch.c4
-rw-r--r--src/backend/access/heap/heapam.c84
-rw-r--r--src/backend/access/heap/hio.c26
-rw-r--r--src/backend/access/heap/tuptoaster.c12
-rw-r--r--src/backend/access/index/genam.c9
-rw-r--r--src/backend/access/index/indexam.c12
-rw-r--r--src/backend/access/index/istrat.c8
-rw-r--r--src/backend/access/nbtree/nbtcompare.c14
-rw-r--r--src/backend/access/nbtree/nbtinsert.c41
-rw-r--r--src/backend/access/nbtree/nbtpage.c4
-rw-r--r--src/backend/access/nbtree/nbtree.c39
-rw-r--r--src/backend/access/nbtree/nbtscan.c17
-rw-r--r--src/backend/access/nbtree/nbtsearch.c75
-rw-r--r--src/backend/access/nbtree/nbtsort.c36
-rw-r--r--src/backend/access/nbtree/nbtutils.c6
-rw-r--r--src/backend/access/rtree/rtree.c22
-rw-r--r--src/backend/access/rtree/rtscan.c4
-rw-r--r--src/backend/access/transam/rmgr.c2
-rw-r--r--src/backend/access/transam/transam.c3
-rw-r--r--src/backend/access/transam/varsup.c6
-rw-r--r--src/backend/access/transam/xact.c55
-rw-r--r--src/backend/access/transam/xlog.c712
30 files changed, 672 insertions, 642 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 4eec6ed6d07..d30e4c7fe64 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.61 2000/01/26 05:55:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.62 2000/04/12 17:14:36 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -137,9 +137,9 @@ DataFill(char *data,
*((int32 *) value[i]));
break;
default:
- Assert(att[i]->attlen >= 0);
- memmove(data, DatumGetPointer(value[i]),
- (size_t)(att[i]->attlen));
+ Assert(att[i]->attlen >= 0);
+ memmove(data, DatumGetPointer(value[i]),
+ (size_t) (att[i]->attlen));
break;
}
data = (char *) att_addlength((long) data, att[i]->attlen, value[i]);
@@ -326,7 +326,7 @@ nocachegetattr(HeapTuple tuple,
Form_pg_attribute *att = tupleDesc->attrs;
int slow = 0; /* do we have to walk nulls? */
- (void)isnull; /*not used*/
+ (void) isnull; /* not used */
#ifdef IN_MACRO
/* This is handled in the macro */
Assert(attnum > 0);
@@ -681,7 +681,7 @@ heap_formtuple(TupleDesc tupleDescriptor,
len += bitmaplen;
}
- hoff = len = MAXALIGN(len); /* be conservative here */
+ hoff = len = MAXALIGN(len); /* be conservative here */
len += ComputeDataSize(tupleDescriptor, value, nulls);
@@ -806,11 +806,9 @@ void
heap_freetuple(HeapTuple htup)
{
if (htup->t_data != NULL)
- if (htup->t_datamcxt != NULL && (char *)(htup->t_data) !=
- ((char *) htup + HEAPTUPLESIZE))
- {
+ if (htup->t_datamcxt != NULL && (char *) (htup->t_data) !=
+ ((char *) htup + HEAPTUPLESIZE))
elog(NOTICE, "TELL Jan Wieck: heap_freetuple() found separate t_data");
- }
pfree(htup);
}
@@ -835,7 +833,7 @@ heap_addheader(uint32 natts, /* max domain index */
len = offsetof(HeapTupleHeaderData, t_bits);
- hoff = len = MAXALIGN(len); /* be conservative */
+ hoff = len = MAXALIGN(len); /* be conservative */
len += structlen;
tuple = (HeapTuple) palloc(HEAPTUPLESIZE + len);
tuple->t_datamcxt = CurrentMemoryContext;
@@ -850,8 +848,8 @@ heap_addheader(uint32 natts, /* max domain index */
td->t_infomask = 0;
td->t_infomask |= HEAP_XMAX_INVALID;
- if (structlen > 0)
- memmove((char *) td + hoff, structure, (size_t)structlen);
+ if (structlen > 0)
+ memmove((char *) td + hoff, structure, (size_t) structlen);
return tuple;
}
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index 438318747d2..e11ddbc1dc9 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.42 2000/01/26 05:55:53 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.43 2000/04/12 17:14:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,7 +60,7 @@ index_formtuple(TupleDesc tupleDescriptor,
hoff = IndexInfoFindDataOffset(infomask);
size = hoff + ComputeDataSize(tupleDescriptor, value, null);
- size = MAXALIGN(size); /* be conservative */
+ size = MAXALIGN(size); /* be conservative */
tp = (char *) palloc(size);
tuple = (IndexTuple) tp;
@@ -134,7 +134,7 @@ nocache_index_getattr(IndexTuple tup,
int data_off; /* tuple data offset */
Form_pg_attribute *att = tupleDesc->attrs;
- (void)isnull;
+ (void) isnull;
/* ----------------
* sanity checks
* ----------------
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index b2af4ff932e..1e8d2ae0034 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.61 2000/01/31 04:35:48 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.62 2000/04/12 17:14:37 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -229,17 +229,19 @@ FreeTupleDesc(TupleDesc tupdesc)
bool
equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
{
- int i;
+ int i;
if (tupdesc1->natts != tupdesc2->natts)
return false;
for (i = 0; i < tupdesc1->natts; i++)
{
- Form_pg_attribute attr1 = tupdesc1->attrs[i];
- Form_pg_attribute attr2 = tupdesc2->attrs[i];
+ Form_pg_attribute attr1 = tupdesc1->attrs[i];
+ Form_pg_attribute attr2 = tupdesc2->attrs[i];
- /* We do not need to check every single field here, and in fact
- * some fields such as attdisbursion probably shouldn't be compared.
+ /*
+ * We do not need to check every single field here, and in fact
+ * some fields such as attdisbursion probably shouldn't be
+ * compared.
*/
if (strcmp(NameStr(attr1->attname), NameStr(attr2->attname)) != 0)
return false;
@@ -254,8 +256,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
}
if (tupdesc1->constr != NULL)
{
- TupleConstr *constr1 = tupdesc1->constr;
- TupleConstr *constr2 = tupdesc2->constr;
+ TupleConstr *constr1 = tupdesc1->constr;
+ TupleConstr *constr2 = tupdesc2->constr;
if (constr2 == NULL)
return false;
@@ -263,8 +265,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
return false;
for (i = 0; i < (int) constr1->num_defval; i++)
{
- AttrDefault *defval1 = constr1->defval + i;
- AttrDefault *defval2 = constr2->defval + i;
+ AttrDefault *defval1 = constr1->defval + i;
+ AttrDefault *defval2 = constr2->defval + i;
if (defval1->adnum != defval2->adnum)
return false;
@@ -275,8 +277,8 @@ equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2)
return false;
for (i = 0; i < (int) constr1->num_check; i++)
{
- ConstrCheck *check1 = constr1->check + i;
- ConstrCheck *check2 = constr2->check + i;
+ ConstrCheck *check1 = constr1->check + i;
+ ConstrCheck *check2 = constr2->check + i;
if (strcmp(check1->ccname, check2->ccname) != 0)
return false;
@@ -585,8 +587,9 @@ BuildDescForRelation(List *schema, char *relname)
constr->has_not_null = true;
desc->attrs[attnum - 1]->attnotnull = entry->is_not_null;
- /* Note we copy only pre-cooked default expressions.
- * Digestion of raw ones is someone else's problem.
+ /*
+ * Note we copy only pre-cooked default expressions. Digestion of
+ * raw ones is someone else's problem.
*/
if (entry->cooked_default != NULL)
{
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 9e93f1d652d..66a3702e438 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -6,7 +6,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.52 2000/03/17 02:36:00 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.53 2000/04/12 17:14:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,8 +52,10 @@ void gistdelete(Relation r, ItemPointer tid);
static IndexTuple gist_tuple_replacekey(Relation r, GISTENTRY entry, IndexTuple t);
static void gistcentryinit(GISTSTATE *giststate, GISTENTRY *e, char *pr,
Relation r, Page pg, OffsetNumber o, int b, bool l);
+
#ifdef GISTDEBUG
static char *int_range_out(INTRANGE *r);
+
#endif
/*
@@ -98,7 +100,7 @@ gistbuild(Relation heap,
/* no locking is needed */
- CommandCounterIncrement(); /* so we can see the new pg_index tuple */
+ CommandCounterIncrement(); /* so we can see the new pg_index tuple */
initGISTstate(&giststate, index);
@@ -186,7 +188,7 @@ gistbuild(Relation heap,
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
- if (! ExecQual((List *) pred, econtext, false))
+ if (!ExecQual((List *) pred, econtext, false))
continue;
#endif /* OMIT_PARTIAL_INDEX */
}
@@ -272,18 +274,18 @@ gistbuild(Relation heap,
/*
* Since we just counted the tuples in the heap, we update its stats
* in pg_class to guarantee that the planner takes advantage of the
- * index we just created. But, only update statistics during
- * normal index definitions, not for indices on system catalogs
- * created during bootstrap processing. We must close the relations
- * before updating statistics to guarantee that the relcache entries
- * are flushed when we increment the command counter in UpdateStats().
- * But we do not release any locks on the relations; those will be
- * held until end of transaction.
+ * index we just created. But, only update statistics during normal
+ * index definitions, not for indices on system catalogs created
+ * during bootstrap processing. We must close the relations before
+ * updating statistics to guarantee that the relcache entries are
+ * flushed when we increment the command counter in UpdateStats(). But
+ * we do not release any locks on the relations; those will be held
+ * until end of transaction.
*/
if (IsNormalProcessingMode())
{
- Oid hrelid = RelationGetRelid(heap);
- Oid irelid = RelationGetRelid(index);
+ Oid hrelid = RelationGetRelid(heap);
+ Oid irelid = RelationGetRelid(index);
bool inplace = IsReindexProcessing();
heap_close(heap, NoLock);
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index a7d1faf43db..2196e897e4a 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -266,7 +266,7 @@ gistdropscan(IndexScanDesc s)
prev = l;
if (l == (GISTScanList) NULL)
- elog(ERROR, "GiST scan list corrupted -- cannot find 0x%p", (void*)s);
+ elog(ERROR, "GiST scan list corrupted -- cannot find 0x%p", (void *) s);
if (prev == (GISTScanList) NULL)
GISTScans = l->gsl_next;
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index d116aa1a499..1e5bc15bf2f 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.36 2000/03/01 05:39:22 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.37 2000/04/12 17:14:43 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
@@ -149,7 +149,7 @@ hashbuild(Relation heap,
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
- if (! ExecQual((List *) pred, econtext, false))
+ if (!ExecQual((List *) pred, econtext, false))
continue;
#endif /* OMIT_PARTIAL_INDEX */
}
@@ -230,18 +230,18 @@ hashbuild(Relation heap,
/*
* Since we just counted the tuples in the heap, we update its stats
* in pg_class to guarantee that the planner takes advantage of the
- * index we just created. But, only update statistics during
- * normal index definitions, not for indices on system catalogs
- * created during bootstrap processing. We must close the relations
- * before updating statistics to guarantee that the relcache entries
- * are flushed when we increment the command counter in UpdateStats().
- * But we do not release any locks on the relations; those will be
- * held until end of transaction.
+ * index we just created. But, only update statistics during normal
+ * index definitions, not for indices on system catalogs created
+ * during bootstrap processing. We must close the relations before
+ * updating statistics to guarantee that the relcache entries are
+ * flushed when we increment the command counter in UpdateStats(). But
+ * we do not release any locks on the relations; those will be held
+ * until end of transaction.
*/
if (IsNormalProcessingMode())
{
- Oid hrelid = RelationGetRelid(heap);
- Oid irelid = RelationGetRelid(index);
+ Oid hrelid = RelationGetRelid(heap);
+ Oid irelid = RelationGetRelid(index);
bool inplace = IsReindexProcessing();
heap_close(heap, NoLock);
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index 78af6353f0b..80d9ac3741c 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.24 2000/02/21 03:36:46 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.25 2000/04/12 17:14:44 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -146,14 +146,14 @@ hashoidvector(Oid *key)
int i;
uint32 result = 0;
- for (i = INDEX_MAX_KEYS; --i >= 0; )
+ for (i = INDEX_MAX_KEYS; --i >= 0;)
result = (result << 1) ^ (~(uint32) key[i]);
return result;
}
/*
* Note: hashint2vector currently can't be used as a user hash table
- * hash function, because it has no pg_proc entry. We only need it
+ * hash function, because it has no pg_proc entry. We only need it
* for catcache indexing.
*/
uint32
@@ -162,7 +162,7 @@ hashint2vector(int16 *key)
int i;
uint32 result = 0;
- for (i = INDEX_MAX_KEYS; --i >= 0; )
+ for (i = INDEX_MAX_KEYS; --i >= 0;)
result = (result << 1) ^ (~(uint32) key[i]);
return result;
}
diff --git a/src/backend/access/hash/hashscan.c b/src/backend/access/hash/hashscan.c
index d44ba69b9bd..979baf8d70c 100644
--- a/src/backend/access/hash/hashscan.c
+++ b/src/backend/access/hash/hashscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.22 2000/01/26 05:55:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.23 2000/04/12 17:14:44 momjian Exp $
*
* NOTES
* Because we can be doing an index scan on a relation while we
@@ -75,7 +75,7 @@ _hash_dropscan(IndexScanDesc scan)
last = chk;
if (chk == (HashScanList) NULL)
- elog(ERROR, "hash scan list trashed; can't find 0x%p", (void*)scan);
+ elog(ERROR, "hash scan list trashed; can't find 0x%p", (void *) scan);
if (last == (HashScanList) NULL)
HashScans = chk->hashsl_next;
diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c
index d6ae498d19d..21f45f47137 100644
--- a/src/backend/access/hash/hashsearch.c
+++ b/src/backend/access/hash/hashsearch.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.23 2000/03/17 02:36:02 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.24 2000/04/12 17:14:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -351,7 +351,7 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf)
opaque = (HashPageOpaque) PageGetSpecialPointer(page);
Assert(opaque->hasho_bucket == bucket);
while (PageIsEmpty(page) &&
- BlockNumberIsValid(opaque->hasho_nextblkno))
+ BlockNumberIsValid(opaque->hasho_nextblkno))
_hash_readnext(rel, &buf, &page, &opaque);
maxoff = PageGetMaxOffsetNumber(page);
offnum = FirstOffsetNumber;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index d51705fa472..29d6c9e0f9e 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.66 2000/02/09 03:49:47 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.67 2000/04/12 17:14:45 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -23,7 +23,7 @@
* heap_fetch - retrive tuple with tid
* heap_insert - insert tuple into a relation
* heap_delete - delete a tuple from a relation
- * heap_update - replace a tuple in a relation with another tuple
+ * heap_update - replace a tuple in a relation with another tuple
* heap_markpos - mark scan position
* heap_restrpos - restore position to marked location
*
@@ -120,9 +120,9 @@ initscan(HeapScanDesc scan,
* ----------------
*/
scan->rs_ntup.t_datamcxt = scan->rs_ctup.t_datamcxt =
- scan->rs_ptup.t_datamcxt = NULL;
+ scan->rs_ptup.t_datamcxt = NULL;
scan->rs_ntup.t_data = scan->rs_ctup.t_data =
- scan->rs_ptup.t_data = NULL;
+ scan->rs_ptup.t_data = NULL;
scan->rs_nbuf = scan->rs_cbuf = scan->rs_pbuf = InvalidBuffer;
}
else if (atend)
@@ -188,8 +188,9 @@ unpinscan(HeapScanDesc scan)
if (BufferIsValid(scan->rs_nbuf))
ReleaseBuffer(scan->rs_nbuf);
- /* we don't bother to clear rs_pbuf etc --- caller must
- * reinitialize them if scan descriptor is not being deleted.
+ /*
+ * we don't bother to clear rs_pbuf etc --- caller must reinitialize
+ * them if scan descriptor is not being deleted.
*/
}
@@ -544,7 +545,7 @@ heap_open(Oid relationId, LOCKMODE lockmode)
if (lockmode == NoLock)
return r; /* caller must check RelationIsValid! */
- if (! RelationIsValid(r))
+ if (!RelationIsValid(r))
elog(ERROR, "Relation %u does not exist", relationId);
LockRelation(r, lockmode);
@@ -586,7 +587,7 @@ heap_openr(const char *relationName, LOCKMODE lockmode)
if (lockmode == NoLock)
return r; /* caller must check RelationIsValid! */
- if (! RelationIsValid(r))
+ if (!RelationIsValid(r))
elog(ERROR, "Relation '%s' does not exist", relationName);
LockRelation(r, lockmode);
@@ -646,7 +647,7 @@ heap_beginscan(Relation relation,
* sanity checks
* ----------------
*/
- if (! RelationIsValid(relation))
+ if (!RelationIsValid(relation))
elog(ERROR, "heap_beginscan: !RelationIsValid(relation)");
/* ----------------
@@ -659,7 +660,7 @@ heap_beginscan(Relation relation,
* Acquire AccessShareLock for the duration of the scan
*
* Note: we could get an SI inval message here and consequently have
- * to rebuild the relcache entry. The refcount increment above
+ * to rebuild the relcache entry. The refcount increment above
* ensures that we will rebuild it and not just flush it...
* ----------------
*/
@@ -681,6 +682,7 @@ heap_beginscan(Relation relation,
scan->rs_nkeys = (short) nkeys;
if (nkeys)
+
/*
* we do this here instead of in initscan() because heap_rescan
* also calls initscan() and we don't want to allocate memory
@@ -847,9 +849,7 @@ heap_getnext(HeapScanDesc scandesc, int backw)
if (scan->rs_ptup.t_data == scan->rs_ctup.t_data &&
BufferIsInvalid(scan->rs_pbuf))
- {
return NULL;
- }
/*
* Copy the "current" tuple/buffer to "next". Pin/unpin the
@@ -1095,8 +1095,10 @@ heap_fetch(Relation relation,
}
else
{
- /* All checks passed, so return the tuple as valid.
- * Caller is now responsible for releasing the buffer.
+
+ /*
+ * All checks passed, so return the tuple as valid. Caller is now
+ * responsible for releasing the buffer.
*/
*userbuf = buffer;
}
@@ -1109,17 +1111,18 @@ heap_fetch(Relation relation,
*/
ItemPointer
heap_get_latest_tid(Relation relation,
- Snapshot snapshot,
- ItemPointer tid)
+ Snapshot snapshot,
+ ItemPointer tid)
{
ItemId lp = NULL;
Buffer buffer;
PageHeader dp;
- OffsetNumber offnum;
- HeapTupleData tp;
- HeapTupleHeader t_data;
- ItemPointerData ctid;
- bool invalidBlock,linkend;
+ OffsetNumber offnum;
+ HeapTupleData tp;
+ HeapTupleHeader t_data;
+ ItemPointerData ctid;
+ bool invalidBlock,
+ linkend;
/* ----------------
* get the buffer from the relation descriptor
@@ -1149,11 +1152,11 @@ heap_get_latest_tid(Relation relation,
invalidBlock = false;
}
if (invalidBlock)
- {
+ {
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
return NULL;
- }
+ }
/* ----------------
* more sanity checks
@@ -1175,7 +1178,7 @@ heap_get_latest_tid(Relation relation,
snapshot, 0, (ScanKey) NULL);
linkend = true;
- if ((t_data->t_infomask & HEAP_XMAX_COMMITTED) &&
+ if ((t_data->t_infomask & HEAP_XMAX_COMMITTED) &&
!ItemPointerEquals(tid, &ctid))
linkend = false;
@@ -1186,7 +1189,7 @@ heap_get_latest_tid(Relation relation,
{
if (linkend)
return NULL;
- return heap_get_latest_tid(relation, snapshot, &ctid);
+ return heap_get_latest_tid(relation, snapshot, &ctid);
}
return tid;
@@ -1300,10 +1303,11 @@ l1:
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
if (TransactionIdDidAbort(xwait))
goto l1;
- /*
- * xwait is committed but if xwait had just marked
- * the tuple for update then some other xaction could
- * update this tuple before we got to this point.
+
+ /*
+ * xwait is committed but if xwait had just marked the tuple for
+ * update then some other xaction could update this tuple before
+ * we got to this point.
*/
if (tp.t_data->t_xmax != xwait)
goto l1;
@@ -1345,11 +1349,11 @@ l1:
}
/*
- * heap_update - replace a tuple
+ * heap_update - replace a tuple
*/
int
heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
- ItemPointer ctid)
+ ItemPointer ctid)
{
ItemId lp;
HeapTupleData oldtup;
@@ -1396,10 +1400,11 @@ l2:
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
if (TransactionIdDidAbort(xwait))
goto l2;
- /*
- * xwait is committed but if xwait had just marked
- * the tuple for update then some other xaction could
- * update this tuple before we got to this point.
+
+ /*
+ * xwait is committed but if xwait had just marked the tuple for
+ * update then some other xaction could update this tuple before
+ * we got to this point.
*/
if (oldtup.t_data->t_xmax != xwait)
goto l2;
@@ -1521,10 +1526,11 @@ l3:
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
if (TransactionIdDidAbort(xwait))
goto l3;
- /*
- * xwait is committed but if xwait had just marked
- * the tuple for update then some other xaction could
- * update this tuple before we got to this point.
+
+ /*
+ * xwait is committed but if xwait had just marked the tuple for
+ * update then some other xaction could update this tuple before
+ * we got to this point.
*/
if (tuple->t_data->t_xmax != xwait)
goto l3;
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index b269ca90170..3fc2a69df1a 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Id: hio.c,v 1.30 2000/03/17 02:36:02 tgl Exp $
+ * $Id: hio.c,v 1.31 2000/04/12 17:14:45 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -51,7 +51,7 @@ RelationPutHeapTuple(Relation relation,
IncrHeapAccessStat(global_RelationPutHeapTuple);
pageHeader = (Page) BufferGetPage(buffer);
- len = MAXALIGN(tuple->t_len); /* be conservative */
+ len = MAXALIGN(tuple->t_len); /* be conservative */
Assert(len <= PageGetFreeSpace(pageHeader));
offnum = PageAddItem((Page) pageHeader, (Item) tuple->t_data,
@@ -108,11 +108,11 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
ItemId itemId;
Item item;
- len = MAXALIGN(tuple->t_len); /* be conservative */
+ len = MAXALIGN(tuple->t_len); /* be conservative */
/*
- * If we're gonna fail for oversize tuple, do it right away...
- * this code should go away eventually.
+ * If we're gonna fail for oversize tuple, do it right away... this
+ * code should go away eventually.
*/
if (len > MaxTupleSize)
elog(ERROR, "Tuple is too big: size %u, max size %ld",
@@ -136,8 +136,8 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
lastblock = RelationGetNumberOfBlocks(relation);
/*
- * Get the last existing page --- may need to create the first one
- * if this is a virgin relation.
+ * Get the last existing page --- may need to create the first one if
+ * this is a virgin relation.
*/
if (lastblock == 0)
{
@@ -168,12 +168,14 @@ RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple)
if (len > PageGetFreeSpace(pageHeader))
{
+
/*
- * BUG: by elog'ing here, we leave the new buffer locked and not
- * marked dirty, which may result in an invalid page header
- * being left on disk. But we should not get here given the
- * test at the top of the routine, and the whole deal should
- * go away when we implement tuple splitting anyway...
+ * BUG: by elog'ing here, we leave the new buffer locked and
+ * not marked dirty, which may result in an invalid page
+ * header being left on disk. But we should not get here
+ * given the test at the top of the routine, and the whole
+ * deal should go away when we implement tuple splitting
+ * anyway...
*/
elog(ERROR, "Tuple is too big: size %u", len);
}
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 9176521a5fc..3aadfb2cde0 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -2,13 +2,13 @@
*
* tuptoaster.c
* Support routines for external and compressed storage of
- * variable size attributes.
+ * variable size attributes.
*
* Copyright (c) 2000, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.2 2000/01/20 21:50:59 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.3 2000/04/12 17:14:45 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -30,17 +30,17 @@
#ifdef TUPLE_TOASTER_ACTIVE
void
-heap_tuple_toast_attrs (Relation rel, HeapTuple newtup, HeapTuple oldtup)
+heap_tuple_toast_attrs(Relation rel, HeapTuple newtup, HeapTuple oldtup)
{
return;
}
-varattrib *
-heap_tuple_untoast_attr (varattrib *attr)
+varattrib *
+heap_tuple_untoast_attr(varattrib * attr)
{
elog(ERROR, "heap_tuple_untoast_attr() called");
}
-#endif /* TUPLE_TOASTER_ACTIVE */
+#endif /* TUPLE_TOASTER_ACTIVE */
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index 1bd02f839ad..3530320637b 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.24 2000/03/14 23:52:01 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.25 2000/04/12 17:14:47 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -62,7 +62,7 @@
*
* At the end of a scan, the AM's endscan routine undoes the locking,
* but does *not* call IndexScanEnd --- the higher-level index_endscan
- * routine does that. (We can't do it in the AM because index_endscan
+ * routine does that. (We can't do it in the AM because index_endscan
* still needs to touch the IndexScanDesc after calling the AM.)
*
* Because of this, the AM does not have a choice whether to call
@@ -114,7 +114,10 @@ RelationGetIndexScan(Relation relation,
ItemPointerSetInvalid(&scan->currentMarkData);
ItemPointerSetInvalid(&scan->nextMarkData);
- /* mark cached function lookup data invalid; it will be set on first use */
+ /*
+ * mark cached function lookup data invalid; it will be set on first
+ * use
+ */
scan->fn_getnext.fn_oid = InvalidOid;
if (numberOfKeys > 0)
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 23356931198..e0672667c7f 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.41 2000/03/14 23:52:01 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.42 2000/04/12 17:14:47 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relationId
@@ -115,10 +115,10 @@
* index_open - open an index relation by relationId
*
* presently the relcache routines do all the work we need
- * to open/close index relations. However, callers of index_open
+ * to open/close index relations. However, callers of index_open
* expect it to succeed, so we need to check for a failure return.
*
- * Note: we acquire no lock on the index. An AccessShareLock is
+ * Note: we acquire no lock on the index. An AccessShareLock is
* acquired by index_beginscan (and released by index_endscan).
* ----------------
*/
@@ -129,7 +129,7 @@ index_open(Oid relationId)
r = RelationIdGetRelation(relationId);
- if (! RelationIsValid(r))
+ if (!RelationIsValid(r))
elog(ERROR, "Index %u does not exist", relationId);
if (r->rd_rel->relkind != RELKIND_INDEX)
@@ -151,7 +151,7 @@ index_openr(char *relationName)
r = RelationNameGetRelation(relationName);
- if (! RelationIsValid(r))
+ if (!RelationIsValid(r))
elog(ERROR, "Index '%s' does not exist", relationName);
if (r->rd_rel->relkind != RELKIND_INDEX)
@@ -238,7 +238,7 @@ index_beginscan(Relation relation,
* Acquire AccessShareLock for the duration of the scan
*
* Note: we could get an SI inval message here and consequently have
- * to rebuild the relcache entry. The refcount increment above
+ * to rebuild the relcache entry. The refcount increment above
* ensures that we will rebuild it and not just flush it...
* ----------------
*/
diff --git a/src/backend/access/index/istrat.c b/src/backend/access/index/istrat.c
index fe956ead378..b0864e505fa 100644
--- a/src/backend/access/index/istrat.c
+++ b/src/backend/access/index/istrat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.41 2000/02/18 09:29:16 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.42 2000/04/12 17:14:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -477,7 +477,7 @@ OperatorRelationFillScanKeyEntry(Relation operatorRelation,
{
HeapTuple tuple;
HeapScanDesc scan = NULL;
- bool cachesearch = (!IsBootstrapProcessingMode()) && IsCacheInitialized();
+ bool cachesearch = (!IsBootstrapProcessingMode()) && IsCacheInitialized();
if (cachesearch)
{
@@ -547,7 +547,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
AttrNumber attributeNumber;
int attributeIndex;
Oid operatorClassObjectId[INDEX_MAX_KEYS];
- bool cachesearch = (!IsBootstrapProcessingMode()) && IsCacheInitialized();
+ bool cachesearch = (!IsBootstrapProcessingMode()) && IsCacheInitialized();
if (cachesearch)
{
@@ -674,7 +674,7 @@ IndexSupportInitialize(IndexStrategy indexStrategy,
aform = (Form_pg_amop) GETSTRUCT(tuple);
OperatorRelationFillScanKeyEntry(operatorRelation,
aform->amopopr,
- StrategyMapGetScanKeyEntry(map, aform->amopstrategy));
+ StrategyMapGetScanKeyEntry(map, aform->amopstrategy));
}
heap_endscan(scan);
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index f66b9a0e8e2..3e58c677a00 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.33 2000/02/10 19:51:38 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.34 2000/04/12 17:14:49 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -35,12 +35,12 @@ btint2cmp(int16 a, int16 b)
int32
btint4cmp(int32 a, int32 b)
{
- if (a > b)
- return 1;
- else if (a == b)
- return 0;
- else
- return -1;
+ if (a > b)
+ return 1;
+ else if (a == b)
+ return 0;
+ else
+ return -1;
}
int32
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index b891716a895..975b53d658a 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.56 2000/03/17 02:36:03 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.57 2000/04/12 17:14:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -21,10 +21,10 @@
static InsertIndexResult _bt_insertonpg(Relation rel, Buffer buf, BTStack stack, int keysz, ScanKey scankey, BTItem btitem, BTItem afteritem);
static Buffer _bt_split(Relation rel, Size keysz, ScanKey scankey,
- Buffer buf, OffsetNumber firstright);
+ Buffer buf, OffsetNumber firstright);
static OffsetNumber _bt_findsplitloc(Relation rel, Size keysz, ScanKey scankey,
- Page page, OffsetNumber start,
- OffsetNumber maxoff, Size llimit);
+ Page page, OffsetNumber start,
+ OffsetNumber maxoff, Size llimit);
static void _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf);
static OffsetNumber _bt_pgaddtup(Relation rel, Buffer buf, int keysz, ScanKey itup_scankey, Size itemsize, BTItem btitem, BTItem afteritem);
static bool _bt_goesonpg(Relation rel, Buffer buf, Size keysz, ScanKey scankey, BTItem afteritem);
@@ -267,21 +267,20 @@ _bt_insertonpg(Relation rel,
itemsz = IndexTupleDSize(btitem->bti_itup)
+ (sizeof(BTItemData) - sizeof(IndexTupleData));
- itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do
- * this but we need to be
- * consistent */
+ itemsz = MAXALIGN(itemsz); /* be safe, PageAddItem will do this but
+ * we need to be consistent */
/*
- * Check whether the item can fit on a btree page at all.
- * (Eventually, we ought to try to apply TOAST methods if not.)
- * We actually need to be able to fit three items on every page,
- * so restrict any one item to 1/3 the per-page available space.
- * Note that at this point, itemsz doesn't include the ItemId.
+ * Check whether the item can fit on a btree page at all. (Eventually,
+ * we ought to try to apply TOAST methods if not.) We actually need to
+ * be able to fit three items on every page, so restrict any one item
+ * to 1/3 the per-page available space. Note that at this point,
+ * itemsz doesn't include the ItemId.
*/
- if (itemsz > (PageGetPageSize(page)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData))
+ if (itemsz > (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %u exceeds maximum %lu",
itemsz,
- (PageGetPageSize(page)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData));
+ (PageGetPageSize(page) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
/*
* If we have to insert item on the leftmost page which is the first
@@ -415,8 +414,8 @@ _bt_insertonpg(Relation rel,
bool is_root = lpageop->btpo_flags & BTP_ROOT;
/*
- * Instead of splitting leaf page in the chain of duplicates
- * by new duplicate, insert it into some right page.
+ * Instead of splitting leaf page in the chain of duplicates by
+ * new duplicate, insert it into some right page.
*/
if ((lpageop->btpo_flags & BTP_CHAIN) &&
(lpageop->btpo_flags & BTP_LEAF) && keys_equal)
@@ -424,8 +423,9 @@ _bt_insertonpg(Relation rel,
rbuf = _bt_getbuf(rel, lpageop->btpo_next, BT_WRITE);
rpage = BufferGetPage(rbuf);
rpageop = (BTPageOpaque) PageGetSpecialPointer(rpage);
- /*
- * some checks
+
+ /*
+ * some checks
*/
if (!P_RIGHTMOST(rpageop)) /* non-rightmost page */
{ /* If we have the same hikey here then
@@ -442,6 +442,7 @@ _bt_insertonpg(Relation rel,
BTGreaterStrategyNumber))
elog(FATAL, "btree: hikey is out of order");
else if (rpageop->btpo_flags & BTP_CHAIN)
+
/*
* If hikey > scankey then it's last page in chain and
* BTP_CHAIN must be OFF
@@ -450,9 +451,7 @@ _bt_insertonpg(Relation rel,
}
else
/* rightmost page */
- {
Assert(!(rpageop->btpo_flags & BTP_CHAIN));
- }
_bt_relbuf(rel, buf, BT_WRITE);
return (_bt_insertonpg(rel, rbuf, stack, keysz,
scankey, btitem, afteritem));
@@ -708,7 +707,7 @@ l_spl: ;
*/
if (!parent_chained &&
MAXALIGN(IndexTupleDSize(lowLeftItem->bti_itup)) ==
- MAXALIGN(IndexTupleDSize(stack->bts_btitem->bti_itup)))
+ MAXALIGN(IndexTupleDSize(stack->bts_btitem->bti_itup)))
{
_bt_updateitem(rel, keysz, pbuf,
stack->bts_btitem, lowLeftItem);
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 60eaf3a9d23..1a623698f57 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.35 2000/01/26 05:55:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.36 2000/04/12 17:14:49 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -257,7 +257,7 @@ _bt_getroot(Relation rel, int access)
else
{
rootblkno = metad->btm_root;
- _bt_relbuf(rel, metabuf, BT_READ); /* done with the meta page */
+ _bt_relbuf(rel, metabuf, BT_READ); /* done with the meta page */
rootbuf = _bt_getbuf(rel, rootblkno, access);
}
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index da5dd70332e..72969d4d1b7 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.53 2000/02/18 09:29:54 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.54 2000/04/12 17:14:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -77,7 +77,7 @@ btbuild(Relation heap,
#endif
Node *pred,
*oldPred;
- BTSpool *spool = NULL;
+ BTSpool *spool = NULL;
bool isunique;
bool usefast;
@@ -185,7 +185,7 @@ btbuild(Relation heap,
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
- if (! ExecQual((List *) pred, econtext, false))
+ if (!ExecQual((List *) pred, econtext, false))
continue;
#endif /* OMIT_PARTIAL_INDEX */
}
@@ -276,9 +276,9 @@ btbuild(Relation heap,
}
/*
- * if we are doing bottom-up btree build, finish the build by
- * (1) completing the sort of the spool file, (2) inserting the
- * sorted tuples into btree pages and (3) building the upper levels.
+ * if we are doing bottom-up btree build, finish the build by (1)
+ * completing the sort of the spool file, (2) inserting the sorted
+ * tuples into btree pages and (3) building the upper levels.
*/
if (usefast)
{
@@ -298,26 +298,27 @@ btbuild(Relation heap,
/*
* Since we just counted the tuples in the heap, we update its stats
* in pg_class to guarantee that the planner takes advantage of the
- * index we just created. But, only update statistics during
- * normal index definitions, not for indices on system catalogs
- * created during bootstrap processing. We must close the relations
- * before updating statistics to guarantee that the relcache entries
- * are flushed when we increment the command counter in UpdateStats().
- * But we do not release any locks on the relations; those will be
- * held until end of transaction.
+ * index we just created. But, only update statistics during normal
+ * index definitions, not for indices on system catalogs created
+ * during bootstrap processing. We must close the relations before
+ * updating statistics to guarantee that the relcache entries are
+ * flushed when we increment the command counter in UpdateStats(). But
+ * we do not release any locks on the relations; those will be held
+ * until end of transaction.
*/
if (IsNormalProcessingMode())
{
- Oid hrelid = RelationGetRelid(heap);
- Oid irelid = RelationGetRelid(index);
+ Oid hrelid = RelationGetRelid(heap);
+ Oid irelid = RelationGetRelid(index);
bool inplace = IsReindexProcessing();
heap_close(heap, NoLock);
index_close(index);
+
/*
- UpdateStats(hrelid, nhtups, true);
- UpdateStats(irelid, nitups, false);
- */
+ * UpdateStats(hrelid, nhtups, true); UpdateStats(irelid, nitups,
+ * false);
+ */
UpdateStats(hrelid, nhtups, inplace);
UpdateStats(irelid, nitups, inplace);
if (oldPred != NULL)
@@ -623,7 +624,7 @@ _bt_restscan(IndexScanDesc scan)
BTItem item;
BlockNumber blkno;
- LockBuffer(buf, BT_READ); /* lock buffer first! */
+ LockBuffer(buf, BT_READ); /* lock buffer first! */
page = BufferGetPage(buf);
maxoff = PageGetMaxOffsetNumber(page);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
diff --git a/src/backend/access/nbtree/nbtscan.c b/src/backend/access/nbtree/nbtscan.c
index 95b1bf53ecb..37469365bcd 100644
--- a/src/backend/access/nbtree/nbtscan.c
+++ b/src/backend/access/nbtree/nbtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.30 2000/01/26 05:55:58 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.31 2000/04/12 17:14:49 momjian Exp $
*
*
* NOTES
@@ -52,13 +52,16 @@ static void _bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offn
void
AtEOXact_nbtree(void)
{
- /* Note: these actions should only be necessary during xact abort;
- * but they can't hurt during a commit.
+
+ /*
+ * Note: these actions should only be necessary during xact abort; but
+ * they can't hurt during a commit.
*/
- /* Reset the active-scans list to empty.
- * We do not need to free the list elements, because they're all
- * palloc()'d, so they'll go away at end of transaction anyway.
+ /*
+ * Reset the active-scans list to empty. We do not need to free the
+ * list elements, because they're all palloc()'d, so they'll go away
+ * at end of transaction anyway.
*/
BTScans = NULL;
@@ -96,7 +99,7 @@ _bt_dropscan(IndexScanDesc scan)
last = chk;
if (chk == (BTScanList) NULL)
- elog(ERROR, "btree scan list trashed; can't find 0x%p", (void*)scan);
+ elog(ERROR, "btree scan list trashed; can't find 0x%p", (void *) scan);
if (last == (BTScanList) NULL)
BTScans = chk->btsl_next;
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index f36da9743f8..cad117e5e61 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.58 2000/03/17 02:36:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.59 2000/04/12 17:14:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -299,9 +299,7 @@ _bt_skeycmp(Relation rel,
compare = -1; /* not-NULL key "<" NULL datum */
}
else
- {
compare = (int32) FMGR_PTR2(&entry->sk_func, keyDatum, attrDatum);
- }
if (compare != 0)
break; /* done when we find unequal attributes */
@@ -368,26 +366,26 @@ _bt_binsrch(Relation rel,
/*
* If there are no keys on the page, return the first available slot.
- * Note this covers two cases: the page is really empty (no keys),
- * or it contains only a high key. The latter case is possible after
+ * Note this covers two cases: the page is really empty (no keys), or
+ * it contains only a high key. The latter case is possible after
* vacuuming.
*/
if (high < low)
return low;
/*
- * Binary search to find the first key on the page >= scan key.
- * Loop invariant: all slots before 'low' are < scan key, all slots
- * at or after 'high' are >= scan key. Also, haveEq is true if the
- * tuple at 'high' is == scan key.
- * We can fall out when high == low.
+ * Binary search to find the first key on the page >= scan key. Loop
+ * invariant: all slots before 'low' are < scan key, all slots at or
+ * after 'high' are >= scan key. Also, haveEq is true if the tuple at
+ * 'high' is == scan key. We can fall out when high == low.
*/
high++; /* establish the loop invariant for high */
haveEq = false;
while (high > low)
{
- OffsetNumber mid = low + ((high - low) / 2);
+ OffsetNumber mid = low + ((high - low) / 2);
+
/* We have low <= mid < high, so mid points at a real slot */
result = _bt_compare(rel, itupdesc, page, keysz, scankey, mid);
@@ -403,7 +401,7 @@ _bt_binsrch(Relation rel,
/*--------------------
* At this point we have high == low, but be careful: they could point
- * past the last slot on the page. We also know that haveEq is true
+ * past the last slot on the page. We also know that haveEq is true
* if and only if there is an equal key (in which case high&low point
* at the first equal key).
*
@@ -443,18 +441,20 @@ _bt_binsrch(Relation rel,
if (haveEq)
{
+
/*
* There is an equal key. We return either the first equal key
* (which we just found), or the last lesser key.
*
- * We need not check srchtype != BT_DESCENT here, since if that
- * is true then natts == keysz by assumption.
+ * We need not check srchtype != BT_DESCENT here, since if that is
+ * true then natts == keysz by assumption.
*/
if (natts == keysz)
return low; /* return first equal key */
}
else
{
+
/*
* There is no equal key. We return either the first greater key
* (which we just found), or the last lesser key.
@@ -524,6 +524,7 @@ _bt_compare(Relation rel,
&& P_LEFTMOST(opaque)
&& offnum == P_HIKEY)
{
+
/*
* we just have to believe that this will only be called with
* offnum == P_HIKEY when P_HIKEY is the OffsetNumber of the first
@@ -702,11 +703,12 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
bool strategyCheck;
ScanKey scankeys = 0;
- int keysCount = 0;
- int *nKeyIs = 0;
- int i, j;
- StrategyNumber strat_total;
-
+ int keysCount = 0;
+ int *nKeyIs = 0;
+ int i,
+ j;
+ StrategyNumber strat_total;
+
rel = scan->relation;
so = (BTScanOpaque) scan->opaque;
@@ -723,15 +725,15 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
_bt_orderkeys(rel, so);
if (so->qual_ok)
- strategyCheck = true;
- }
+ strategyCheck = true;
+ }
strat_total = BTEqualStrategyNumber;
if (strategyCheck)
{
AttrNumber attno;
- nKeyIs = (int *)palloc(so->numberOfKeys*sizeof(int));
- for (i=0; i < so->numberOfKeys; i++)
+ nKeyIs = (int *) palloc(so->numberOfKeys * sizeof(int));
+ for (i = 0; i < so->numberOfKeys; i++)
{
attno = so->keyData[i].sk_attno;
if (attno == keysCount)
@@ -739,16 +741,16 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (attno > keysCount + 1)
break;
strat = _bt_getstrat(rel, attno,
- so->keyData[i].sk_procedure);
+ so->keyData[i].sk_procedure);
if (strat == strat_total ||
- strat == BTEqualStrategyNumber)
+ strat == BTEqualStrategyNumber)
{
nKeyIs[keysCount++] = i;
continue;
}
if (ScanDirectionIsBackward(dir) &&
- (strat == BTLessStrategyNumber ||
- strat == BTLessEqualStrategyNumber) )
+ (strat == BTLessStrategyNumber ||
+ strat == BTLessEqualStrategyNumber))
{
nKeyIs[keysCount++] = i;
strat_total = strat;
@@ -757,8 +759,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
continue;
}
if (ScanDirectionIsForward(dir) &&
- (strat == BTGreaterStrategyNumber ||
- strat == BTGreaterEqualStrategyNumber) )
+ (strat == BTGreaterStrategyNumber ||
+ strat == BTGreaterEqualStrategyNumber))
{
nKeyIs[keysCount++] = i;
strat_total = strat;
@@ -794,8 +796,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* at the right place in the scan.
*/
/* _bt_orderkeys disallows it, but it's place to add some code latter */
- scankeys = (ScanKey)palloc(keysCount*sizeof(ScanKeyData));
- for (i=0; i < keysCount; i++)
+ scankeys = (ScanKey) palloc(keysCount * sizeof(ScanKeyData));
+ for (i = 0; i < keysCount; i++)
{
j = nKeyIs[i];
if (so->keyData[j].sk_flags & SK_ISNULL)
@@ -804,12 +806,13 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
pfree(scankeys);
elog(ERROR, "_bt_first: btree doesn't support is(not)null, yet");
return ((RetrieveIndexResult) NULL);
- }
- proc = index_getprocid(rel, i+1, BTORDER_PROC);
- ScanKeyEntryInitialize(scankeys+i, so->keyData[j].sk_flags,
- i+1, proc, so->keyData[j].sk_argument);
+ }
+ proc = index_getprocid(rel, i + 1, BTORDER_PROC);
+ ScanKeyEntryInitialize(scankeys + i, so->keyData[j].sk_flags,
+ i + 1, proc, so->keyData[j].sk_argument);
}
- if (nKeyIs) pfree(nKeyIs);
+ if (nKeyIs)
+ pfree(nKeyIs);
stack = _bt_search(rel, keysCount, scankeys, &buf);
_bt_freestack(stack);
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index f9cbf7121ff..65e757e42b3 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -6,7 +6,7 @@
*
* We use tuplesort.c to sort the given index tuples into order.
* Then we scan the index tuples in order and build the btree pages
- * for each level. When we have only one page on a level, it must be the
+ * for each level. When we have only one page on a level, it must be the
* root -- it can be attached to the btree metapage and we are done.
*
* this code is moderately slow (~10% slower) compared to the regular
@@ -28,7 +28,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.51 2000/02/18 06:32:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.52 2000/04/12 17:14:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -70,12 +70,12 @@ struct BTSpool
static void _bt_load(Relation index, BTSpool *btspool);
static BTItem _bt_buildadd(Relation index, Size keysz, ScanKey scankey,
- BTPageState *state, BTItem bti, int flags);
+ BTPageState *state, BTItem bti, int flags);
static BTItem _bt_minitem(Page opage, BlockNumber oblkno, int atend);
static BTPageState *_bt_pagestate(Relation index, int flags,
- int level, bool doupper);
+ int level, bool doupper);
static void _bt_uppershutdown(Relation index, Size keysz, ScanKey scankey,
- BTPageState *state);
+ BTPageState *state);
/*
@@ -86,7 +86,7 @@ static void _bt_uppershutdown(Relation index, Size keysz, ScanKey scankey,
/*
* create and initialize a spool structure
*/
-BTSpool *
+BTSpool *
_bt_spoolinit(Relation index, bool isunique)
{
BTSpool *btspool = (BTSpool *) palloc(sizeof(BTSpool));
@@ -99,9 +99,9 @@ _bt_spoolinit(Relation index, bool isunique)
btspool->sortstate = tuplesort_begin_index(index, isunique, false);
/*
- * Currently, tuplesort provides sort functions on IndexTuples.
- * If we kept anything in a BTItem other than a regular IndexTuple,
- * we'd need to modify tuplesort to understand BTItems as such.
+ * Currently, tuplesort provides sort functions on IndexTuples. If we
+ * kept anything in a BTItem other than a regular IndexTuple, we'd
+ * need to modify tuplesort to understand BTItems as such.
*/
Assert(sizeof(BTItemData) == sizeof(IndexTupleData));
@@ -306,20 +306,20 @@ _bt_buildadd(Relation index, Size keysz, ScanKey scankey,
btisz = MAXALIGN(btisz);
/*
- * Check whether the item can fit on a btree page at all.
- * (Eventually, we ought to try to apply TOAST methods if not.)
- * We actually need to be able to fit three items on every page,
- * so restrict any one item to 1/3 the per-page available space.
- * Note that at this point, btisz doesn't include the ItemId.
+ * Check whether the item can fit on a btree page at all. (Eventually,
+ * we ought to try to apply TOAST methods if not.) We actually need to
+ * be able to fit three items on every page, so restrict any one item
+ * to 1/3 the per-page available space. Note that at this point, btisz
+ * doesn't include the ItemId.
*
* NOTE: similar code appears in _bt_insertonpg() to defend against
- * oversize items being inserted into an already-existing index.
- * But during creation of an index, we don't go through there.
+ * oversize items being inserted into an already-existing index. But
+ * during creation of an index, we don't go through there.
*/
- if (btisz > (PageGetPageSize(npage)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData))
+ if (btisz > (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) / 3 - sizeof(ItemIdData))
elog(ERROR, "btree: index item size %d exceeds maximum %ld",
btisz,
- (PageGetPageSize(npage)-sizeof(PageHeaderData)-MAXALIGN(sizeof(BTPageOpaqueData)))/3 - sizeof(ItemIdData));
+ (PageGetPageSize(npage) - sizeof(PageHeaderData) - MAXALIGN(sizeof(BTPageOpaqueData))) /3 - sizeof(ItemIdData));
if (pgspc < btisz)
{
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 9d1cc7b10d0..38b152e61b2 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.35 2000/02/18 06:32:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.36 2000/04/12 17:14:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -141,7 +141,8 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
uint16 numberOfKeys = so->numberOfKeys;
uint16 new_numberOfKeys = 0;
AttrNumber attno = 1;
- bool equalStrategyEnd, underEqualStrategy;
+ bool equalStrategyEnd,
+ underEqualStrategy;
if (numberOfKeys < 1)
return;
@@ -194,6 +195,7 @@ _bt_orderkeys(Relation relation, BTScanOpaque so)
elog(ERROR, "_bt_orderkeys: key(s) for attribute %d missed", attno + 1);
underEqualStrategy = (!equalStrategyEnd);
+
/*
* If = has been specified, no other key will be used. In case
* of key < 2 && key == 1 and so on we have to set qual_ok to
diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c
index 34cf0b6c937..53e5345a55d 100644
--- a/src/backend/access/rtree/rtree.c
+++ b/src/backend/access/rtree/rtree.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.44 2000/03/01 05:39:23 inoue Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.45 2000/04/12 17:14:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -181,7 +181,7 @@ rtbuild(Relation heap,
#ifndef OMIT_PARTIAL_INDEX
/* SetSlotContents(slot, htup); */
slot->val = htup;
- if (! ExecQual((List *) pred, econtext, false))
+ if (!ExecQual((List *) pred, econtext, false))
continue;
#endif /* OMIT_PARTIAL_INDEX */
}
@@ -249,18 +249,18 @@ rtbuild(Relation heap,
/*
* Since we just counted the tuples in the heap, we update its stats
* in pg_class to guarantee that the planner takes advantage of the
- * index we just created. But, only update statistics during
- * normal index definitions, not for indices on system catalogs
- * created during bootstrap processing. We must close the relations
- * before updating statistics to guarantee that the relcache entries
- * are flushed when we increment the command counter in UpdateStats().
- * But we do not release any locks on the relations; those will be
- * held until end of transaction.
+ * index we just created. But, only update statistics during normal
+ * index definitions, not for indices on system catalogs created
+ * during bootstrap processing. We must close the relations before
+ * updating statistics to guarantee that the relcache entries are
+ * flushed when we increment the command counter in UpdateStats(). But
+ * we do not release any locks on the relations; those will be held
+ * until end of transaction.
*/
if (IsNormalProcessingMode())
{
- Oid hrelid = RelationGetRelid(heap);
- Oid irelid = RelationGetRelid(index);
+ Oid hrelid = RelationGetRelid(heap);
+ Oid irelid = RelationGetRelid(index);
bool inplace = IsReindexProcessing();
heap_close(heap, NoLock);
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index ada38612778..71e2acf5f88 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.31 2000/01/26 05:56:00 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.32 2000/04/12 17:14:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -268,7 +268,7 @@ rtdropscan(IndexScanDesc s)
prev = l;
if (l == (RTScanList) NULL)
- elog(ERROR, "rtree scan list corrupted -- cannot find 0x%p", (void*)s);
+ elog(ERROR, "rtree scan list corrupted -- cannot find 0x%p", (void *) s);
if (prev == (RTScanList) NULL)
RTScans = l->rtsl_next;
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index 36d50f08c0c..4efb53ea08c 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -1,4 +1,4 @@
#include "postgres.h"
#include "access/rmgr.h"
-RmgrData *RmgrTable = NULL;
+RmgrData *RmgrTable = NULL;
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index a5af2bdc533..acca4a901a2 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.33 2000/01/26 05:56:03 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.34 2000/04/12 17:14:52 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -162,6 +162,7 @@ TransactionLogTest(TransactionId transactionId, /* transaction id to test */
if (!fail)
{
+
/*
* DO NOT cache status for transactions in unknown state !!!
*/
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 6a31bfe429e..5ce334aa299 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.27 2000/03/31 02:43:31 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.28 2000/04/12 17:14:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -331,8 +331,8 @@ ReadNewTransactionId(TransactionId *xid)
SpinAcquire(OidGenLockId); /* not good for concurrency... */
/*
- * Note that we don't check is ShmemVariableCache->xid_count equal
- * to 0 or not. This will work as long as we don't call
+ * Note that we don't check is ShmemVariableCache->xid_count equal to
+ * 0 or not. This will work as long as we don't call
* ReadNewTransactionId() before GetNewTransactionId().
*/
if (ShmemVariableCache->nextXid == 0)
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 2522cca46c1..688741511fb 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.63 2000/04/09 04:43:16 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.64 2000/04/12 17:14:53 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -160,7 +160,7 @@
#include "utils/portal.h"
#include "utils/relcache.h"
-extern bool SharedBufferChanged;
+extern bool SharedBufferChanged;
static void AbortTransaction(void);
static void AtAbort_Cache(void);
@@ -517,8 +517,8 @@ CommandCounterIncrement()
CurrentTransactionStateData.scanCommandId = CurrentTransactionStateData.commandId;
/*
- * make cache changes visible to me. AtCommit_LocalCache()
- * instead of AtCommit_Cache() is called here.
+ * make cache changes visible to me. AtCommit_LocalCache() instead of
+ * AtCommit_Cache() is called here.
*/
AtCommit_LocalCache();
AtStart_Cache();
@@ -627,16 +627,15 @@ RecordTransactionCommit()
*/
xid = GetCurrentTransactionId();
- /*
- * flush the buffer manager pages. Note: if we have stable
- * main memory, dirty shared buffers are not flushed
- * plai 8/7/90
+ /*
+ * flush the buffer manager pages. Note: if we have stable main
+ * memory, dirty shared buffers are not flushed plai 8/7/90
*/
leak = BufferPoolCheckLeak();
/*
- * If no one shared buffer was changed by this transaction then
- * we don't flush shared buffers and don't record commit status.
+ * If no one shared buffer was changed by this transaction then we
+ * don't flush shared buffers and don't record commit status.
*/
if (SharedBufferChanged)
{
@@ -645,13 +644,13 @@ RecordTransactionCommit()
ResetBufferPool(true);
/*
- * have the transaction access methods record the status
- * of this transaction id in the pg_log relation.
+ * have the transaction access methods record the status of this
+ * transaction id in the pg_log relation.
*/
TransactionIdCommit(xid);
/*
- * Now write the log info to the disk too.
+ * Now write the log info to the disk too.
*/
leak = BufferPoolCheckLeak();
FlushBufferPool();
@@ -751,10 +750,10 @@ RecordTransactionAbort()
*/
xid = GetCurrentTransactionId();
- /*
- * Have the transaction access methods record the status of
- * this transaction id in the pg_log relation. We skip it
- * if no one shared buffer was changed by this transaction.
+ /*
+ * Have the transaction access methods record the status of this
+ * transaction id in the pg_log relation. We skip it if no one shared
+ * buffer was changed by this transaction.
*/
if (SharedBufferChanged && !TransactionIdDidCommit(xid))
TransactionIdAbort(xid);
@@ -936,7 +935,7 @@ CommitTransaction()
/* ----------------
* Tell the trigger manager that this transaction is about to be
* committed. He'll invoke all trigger deferred until XACT before
- * we really start on committing the transaction.
+ * we really start on committing the transaction.
* ----------------
*/
DeferredTriggerEndXact();
@@ -965,13 +964,13 @@ CommitTransaction()
RecordTransactionCommit();
/*
- * Let others know about no transaction in progress by me.
- * Note that this must be done _before_ releasing locks we hold
- * and SpinAcquire(SInvalLock) is required: UPDATE with xid 0 is
- * blocked by xid 1' UPDATE, xid 1 is doing commit while xid 2
- * gets snapshot - if xid 2' GetSnapshotData sees xid 1 as running
- * then it must see xid 0 as running as well or it will see two
- * tuple versions - one deleted by xid 1 and one inserted by xid 0.
+ * Let others know about no transaction in progress by me. Note that
+ * this must be done _before_ releasing locks we hold and
+ * SpinAcquire(SInvalLock) is required: UPDATE with xid 0 is blocked
+ * by xid 1' UPDATE, xid 1 is doing commit while xid 2 gets snapshot -
+ * if xid 2' GetSnapshotData sees xid 1 as running then it must see
+ * xid 0 as running as well or it will see two tuple versions - one
+ * deleted by xid 1 and one inserted by xid 0.
*/
if (MyProc != (PROC *) NULL)
{
@@ -995,7 +994,7 @@ CommitTransaction()
* ----------------
*/
s->state = TRANS_DEFAULT;
- SharedBufferChanged = false; /* safest place to do it */
+ SharedBufferChanged = false;/* safest place to do it */
}
@@ -1031,7 +1030,7 @@ AbortTransaction()
/* ----------------
* Tell the trigger manager that this transaction is about to be
- * aborted.
+ * aborted.
* ----------------
*/
DeferredTriggerAbortXact();
@@ -1070,7 +1069,7 @@ AbortTransaction()
* ----------------
*/
s->state = TRANS_DEFAULT;
- SharedBufferChanged = false; /* safest place to do it */
+ SharedBufferChanged = false;/* safest place to do it */
}
/* --------------------------------
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 7d197cb5a3a..b672cf2c7a9 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -1,4 +1,4 @@
-/*-------------------------------------------------------------------------
+/*-------------------------------------------------------------------------
*
* xlog.c
*
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2000, PostgreSQL, Inc
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.12 2000/03/20 07:25:39 vadim Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.13 2000/04/12 17:14:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,64 +44,64 @@ bool StopIfError = false;
SPINLOCK ControlFileLockId;
SPINLOCK XidGenLockId;
-extern bool ReleaseDataFile(void);
+extern bool ReleaseDataFile(void);
-extern VariableCache ShmemVariableCache;
+extern VariableCache ShmemVariableCache;
-#define MinXLOGbuffers 4
+#define MinXLOGbuffers 4
typedef struct XLgwrRqst
{
- XLogRecPtr Write; /* byte (1-based) to write out */
- XLogRecPtr Flush; /* byte (1-based) to flush */
+ XLogRecPtr Write; /* byte (1-based) to write out */
+ XLogRecPtr Flush; /* byte (1-based) to flush */
} XLgwrRqst;
typedef struct XLgwrResult
{
- XLogRecPtr Write; /* bytes written out */
- XLogRecPtr Flush; /* bytes flushed */
+ XLogRecPtr Write; /* bytes written out */
+ XLogRecPtr Flush; /* bytes flushed */
} XLgwrResult;
typedef struct XLogCtlInsert
{
- XLgwrResult LgwrResult;
- XLogRecPtr PrevRecord;
- uint16 curridx; /* current block index in cache */
- XLogPageHeader currpage;
- char *currpos;
+ XLgwrResult LgwrResult;
+ XLogRecPtr PrevRecord;
+ uint16 curridx; /* current block index in cache */
+ XLogPageHeader currpage;
+ char *currpos;
} XLogCtlInsert;
typedef struct XLogCtlWrite
{
- XLgwrResult LgwrResult;
- uint16 curridx; /* index of next block to write */
+ XLgwrResult LgwrResult;
+ uint16 curridx; /* index of next block to write */
} XLogCtlWrite;
#ifndef HAS_TEST_AND_SET
-#define TAS(lck) 0
-#define S_UNLOCK(lck)
-#define S_INIT_LOCK(lck)
+#define TAS(lck) 0
+#define S_UNLOCK(lck)
+#define S_INIT_LOCK(lck)
#endif
typedef struct XLogCtlData
{
- XLogCtlInsert Insert;
- XLgwrRqst LgwrRqst;
- XLgwrResult LgwrResult;
- XLogCtlWrite Write;
- char *pages;
- XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
- uint32 XLogCacheByte;
- uint32 XLogCacheBlck;
+ XLogCtlInsert Insert;
+ XLgwrRqst LgwrRqst;
+ XLgwrResult LgwrResult;
+ XLogCtlWrite Write;
+ char *pages;
+ XLogRecPtr *xlblocks; /* 1st byte ptr-s + BLCKSZ */
+ uint32 XLogCacheByte;
+ uint32 XLogCacheBlck;
#ifdef HAS_TEST_AND_SET
- slock_t insert_lck;
- slock_t info_lck;
- slock_t lgwr_lck;
+ slock_t insert_lck;
+ slock_t info_lck;
+ slock_t lgwr_lck;
#endif
} XLogCtlData;
-static XLogCtlData *XLogCtl = NULL;
+static XLogCtlData *XLogCtl = NULL;
typedef enum DBState
{
@@ -114,69 +114,69 @@ typedef enum DBState
typedef struct ControlFileData
{
- uint32 logId; /* current log file id */
- uint32 logSeg; /* current log file segment (1-based) */
- XLogRecPtr checkPoint; /* last check point record ptr */
- time_t time; /* time stamp of last modification */
- DBState state; /* */
+ uint32 logId; /* current log file id */
+ uint32 logSeg; /* current log file segment (1-based) */
+ XLogRecPtr checkPoint; /* last check point record ptr */
+ time_t time; /* time stamp of last modification */
+ DBState state; /* */
/*
- * this data is used to make sure that configuration of this DB
- * is compatible with the current backend
+ * this data is used to make sure that configuration of this DB is
+ * compatible with the current backend
*/
- uint32 blcksz; /* block size for this DB */
- uint32 relseg_size; /* blocks per segment of large relation */
- uint32 catalog_version_no; /* internal version number */
+ uint32 blcksz; /* block size for this DB */
+ uint32 relseg_size; /* blocks per segment of large relation */
+ uint32 catalog_version_no; /* internal version number */
/*
- * MORE DATA FOLLOWS AT THE END OF THIS STRUCTURE
- * - locations of data dirs
+ * MORE DATA FOLLOWS AT THE END OF THIS STRUCTURE - locations of data
+ * dirs
*/
} ControlFileData;
-static ControlFileData *ControlFile = NULL;
+static ControlFileData *ControlFile = NULL;
typedef struct CheckPoint
{
- XLogRecPtr redo; /* next RecPtr available when we */
- /* began to create CheckPoint */
- /* (i.e. REDO start point) */
- XLogRecPtr undo; /* first record of oldest in-progress */
- /* transaction when we started */
- /* (i.e. UNDO end point) */
- TransactionId nextXid;
- Oid nextOid;
+ XLogRecPtr redo; /* next RecPtr available when we */
+ /* began to create CheckPoint */
+ /* (i.e. REDO start point) */
+ XLogRecPtr undo; /* first record of oldest in-progress */
+ /* transaction when we started */
+ /* (i.e. UNDO end point) */
+ TransactionId nextXid;
+ Oid nextOid;
} CheckPoint;
-/*
- * We break each log file in 16Mb segments
+/*
+ * We break each log file in 16Mb segments
*/
#define XLogSegSize (16*1024*1024)
-#define XLogLastSeg (0xffffffff / XLogSegSize)
-#define XLogFileSize (XLogLastSeg * XLogSegSize)
+#define XLogLastSeg (0xffffffff / XLogSegSize)
+#define XLogFileSize (XLogLastSeg * XLogSegSize)
-#define XLogFileName(path, log, seg) \
+#define XLogFileName(path, log, seg) \
snprintf(path, MAXPGPATH, "%s%c%08X%08X", \
XLogDir, SEP_CHAR, log, seg)
-#define PrevBufIdx(curridx) \
+#define PrevBufIdx(curridx) \
((curridx == 0) ? XLogCtl->XLogCacheBlck : (curridx - 1))
-#define NextBufIdx(curridx) \
+#define NextBufIdx(curridx) \
((curridx == XLogCtl->XLogCacheBlck) ? 0 : (curridx + 1))
-#define XLByteLT(left, right) \
+#define XLByteLT(left, right) \
(right.xlogid > left.xlogid || \
(right.xlogid == left.xlogid && right.xrecoff > left.xrecoff))
-#define XLByteLE(left, right) \
+#define XLByteLE(left, right) \
(right.xlogid > left.xlogid || \
(right.xlogid == left.xlogid && right.xrecoff >= left.xrecoff))
-#define XLByteEQ(left, right) \
+#define XLByteEQ(left, right) \
(right.xlogid == left.xlogid && right.xrecoff == left.xrecoff)
-#define InitXLBuffer(curridx) (\
+#define InitXLBuffer(curridx) (\
XLogCtl->xlblocks[curridx].xrecoff = \
(XLogCtl->xlblocks[Insert->curridx].xrecoff == XLogFileSize) ? \
BLCKSZ : (XLogCtl->xlblocks[Insert->curridx].xrecoff + BLCKSZ), \
@@ -192,46 +192,46 @@ typedef struct CheckPoint
Insert->currpage->xlp_info = 0 \
)
-#define XRecOffIsValid(xrecoff) \
+#define XRecOffIsValid(xrecoff) \
(xrecoff % BLCKSZ >= SizeOfXLogPHD && \
(BLCKSZ - xrecoff % BLCKSZ) >= SizeOfXLogRecord)
-static void GetFreeXLBuffer(void);
-static void XLogWrite(char *buffer);
-static int XLogFileInit(uint32 log, uint32 seg);
-static int XLogFileOpen(uint32 log, uint32 seg, bool econt);
-static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, char *buffer);
-static char *str_time(time_t tnow);
-
-static XLgwrResult LgwrResult = {{0, 0}, {0, 0}};
-static XLgwrRqst LgwrRqst = {{0, 0}, {0, 0}};
-
-static int logFile = -1;
-static uint32 logId = 0;
-static uint32 logSeg = 0;
-static uint32 logOff = 0;
-
-static XLogRecPtr ReadRecPtr;
-static XLogRecPtr EndRecPtr;
-static int readFile = -1;
-static uint32 readId = 0;
-static uint32 readSeg = 0;
-static uint32 readOff = 0;
-static char readBuf[BLCKSZ];
-static XLogRecord *nextRecord = NULL;
+static void GetFreeXLBuffer(void);
+static void XLogWrite(char *buffer);
+static int XLogFileInit(uint32 log, uint32 seg);
+static int XLogFileOpen(uint32 log, uint32 seg, bool econt);
+static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, char *buffer);
+static char *str_time(time_t tnow);
+
+static XLgwrResult LgwrResult = {{0, 0}, {0, 0}};
+static XLgwrRqst LgwrRqst = {{0, 0}, {0, 0}};
+
+static int logFile = -1;
+static uint32 logId = 0;
+static uint32 logSeg = 0;
+static uint32 logOff = 0;
+
+static XLogRecPtr ReadRecPtr;
+static XLogRecPtr EndRecPtr;
+static int readFile = -1;
+static uint32 readId = 0;
+static uint32 readSeg = 0;
+static uint32 readOff = 0;
+static char readBuf[BLCKSZ];
+static XLogRecord *nextRecord = NULL;
XLogRecPtr
XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
{
- XLogCtlInsert *Insert = &XLogCtl->Insert;
- XLogRecord *record;
- XLogSubRecord *subrecord;
- XLogRecPtr RecPtr;
- uint32 len = hdrlen + buflen,
- freespace,
- wlen;
- uint16 curridx;
- bool updrqst = false;
+ XLogCtlInsert *Insert = &XLogCtl->Insert;
+ XLogRecord *record;
+ XLogSubRecord *subrecord;
+ XLogRecPtr RecPtr;
+ uint32 len = hdrlen + buflen,
+ freespace,
+ wlen;
+ uint16 curridx;
+ bool updrqst = false;
if (len == 0 || len > MAXLOGRECSZ)
elog(STOP, "XLogInsert: invalid record len %u", len);
@@ -242,7 +242,7 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
bool do_lgwr = true;
unsigned i = 0;
- for ( ; ; )
+ for (;;)
{
/* try to read LgwrResult while waiting for insert lock */
if (!TAS(&(XLogCtl->info_lck)))
@@ -250,14 +250,15 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
LgwrRqst = XLogCtl->LgwrRqst;
LgwrResult = XLogCtl->LgwrResult;
S_UNLOCK(&(XLogCtl->info_lck));
+
/*
* If cache is half filled then try to acquire lgwr lock
* and do LGWR work, but only once.
*/
- if (do_lgwr &&
- (LgwrRqst.Write.xlogid != LgwrResult.Write.xlogid ||
- (LgwrRqst.Write.xrecoff - LgwrResult.Write.xrecoff >=
- XLogCtl->XLogCacheByte / 2)))
+ if (do_lgwr &&
+ (LgwrRqst.Write.xlogid != LgwrResult.Write.xlogid ||
+ (LgwrRqst.Write.xrecoff - LgwrResult.Write.xrecoff >=
+ XLogCtl->XLogCacheByte / 2)))
{
if (!TAS(&(XLogCtl->lgwr_lck)))
{
@@ -282,13 +283,13 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
}
}
- freespace = ((char*) Insert->currpage) + BLCKSZ - Insert->currpos;
+ freespace = ((char *) Insert->currpage) + BLCKSZ - Insert->currpos;
if (freespace < SizeOfXLogRecord)
{
curridx = NextBufIdx(Insert->curridx);
if (XLByteLE(XLogCtl->xlblocks[curridx], LgwrResult.Write))
InitXLBuffer(curridx);
- else
+ else
GetFreeXLBuffer();
freespace = BLCKSZ - SizeOfXLogPHD;
}
@@ -296,7 +297,7 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
curridx = Insert->curridx;
freespace -= SizeOfXLogRecord;
- record = (XLogRecord*) Insert->currpos;
+ record = (XLogRecord *) Insert->currpos;
record->xl_prev = Insert->PrevRecord;
if (rmid != RM_XLOG_ID)
record->xl_xact_prev = MyLastRecPtr;
@@ -310,9 +311,9 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
record->xl_info = (len > freespace) ? XLR_TO_BE_CONTINUED : 0;
record->xl_rmid = rmid;
RecPtr.xlogid = XLogCtl->xlblocks[curridx].xlogid;
- RecPtr.xrecoff =
- XLogCtl->xlblocks[curridx].xrecoff - BLCKSZ +
- Insert->currpos - ((char*) Insert->currpage);
+ RecPtr.xrecoff =
+ XLogCtl->xlblocks[curridx].xrecoff - BLCKSZ +
+ Insert->currpos - ((char *) Insert->currpage);
if (MyLastRecPtr.xrecoff == 0 && rmid != RM_XLOG_ID)
{
SpinAcquire(SInvalLock);
@@ -339,8 +340,8 @@ XLogInsert(RmgrId rmid, char *hdr, uint32 hdrlen, char *buf, uint32 buflen)
buf += wlen;
Insert->currpos += wlen;
}
- Insert->currpos = ((char*)Insert->currpage) +
- DOUBLEALIGN(Insert->currpos - ((char*)Insert->currpage));
+ Insert->currpos = ((char *) Insert->currpage) +
+ DOUBLEALIGN(Insert->currpos - ((char *) Insert->currpage));
len = hdrlen + buflen;
}
@@ -360,7 +361,7 @@ nbuf:
}
freespace = BLCKSZ - SizeOfXLogPHD - SizeOfXLogSubRecord;
Insert->currpage->xlp_info |= XLP_FIRST_IS_SUBRECORD;
- subrecord = (XLogSubRecord*) Insert->currpos;
+ subrecord = (XLogSubRecord *) Insert->currpos;
Insert->currpos += SizeOfXLogSubRecord;
if (hdrlen > freespace)
{
@@ -398,17 +399,19 @@ nbuf:
}
subrecord->xl_info = 0;
RecPtr.xlogid = XLogCtl->xlblocks[curridx].xlogid;
- RecPtr.xrecoff = XLogCtl->xlblocks[curridx].xrecoff -
- BLCKSZ + SizeOfXLogPHD + subrecord->xl_len;
- Insert->currpos = ((char*)Insert->currpage) +
- DOUBLEALIGN(Insert->currpos - ((char*)Insert->currpage));
+ RecPtr.xrecoff = XLogCtl->xlblocks[curridx].xrecoff -
+ BLCKSZ + SizeOfXLogPHD + subrecord->xl_len;
+ Insert->currpos = ((char *) Insert->currpage) +
+ DOUBLEALIGN(Insert->currpos - ((char *) Insert->currpage));
}
- freespace = ((char*) Insert->currpage) + BLCKSZ - Insert->currpos;
+ freespace = ((char *) Insert->currpage) + BLCKSZ - Insert->currpos;
+
/*
* All done! Update global LgwrRqst if some block was filled up.
*/
if (freespace < SizeOfXLogRecord)
- updrqst = true; /* curridx is filled and available for writing out */
+ updrqst = true; /* curridx is filled and available for
+ * writing out */
else
curridx = PrevBufIdx(curridx);
LgwrRqst.Write = XLogCtl->xlblocks[curridx];
@@ -419,7 +422,7 @@ nbuf:
{
unsigned i = 0;
- for ( ; ; )
+ for (;;)
{
if (!TAS(&(XLogCtl->info_lck)))
{
@@ -433,21 +436,21 @@ nbuf:
}
return (RecPtr);
-}
+}
void
XLogFlush(XLogRecPtr record)
{
- XLogRecPtr WriteRqst;
- char buffer[BLCKSZ];
- char *usebuf = NULL;
- unsigned i = 0;
- bool force_lgwr = false;
+ XLogRecPtr WriteRqst;
+ char buffer[BLCKSZ];
+ char *usebuf = NULL;
+ unsigned i = 0;
+ bool force_lgwr = false;
if (XLByteLE(record, LgwrResult.Flush))
return;
WriteRqst = LgwrRqst.Write;
- for ( ; ; )
+ for (;;)
{
/* try to read LgwrResult */
if (!TAS(&(XLogCtl->info_lck)))
@@ -470,9 +473,9 @@ XLogFlush(XLogRecPtr record)
/* if something was added to log cache then try to flush this too */
if (!TAS(&(XLogCtl->insert_lck)))
{
- XLogCtlInsert *Insert = &XLogCtl->Insert;
- uint32 freespace =
- ((char*) Insert->currpage) + BLCKSZ - Insert->currpos;
+ XLogCtlInsert *Insert = &XLogCtl->Insert;
+ uint32 freespace =
+ ((char *) Insert->currpage) + BLCKSZ - Insert->currpos;
if (freespace < SizeOfXLogRecord) /* buffer is full */
{
@@ -485,14 +488,14 @@ XLogFlush(XLogRecPtr record)
memcpy(usebuf, Insert->currpage, BLCKSZ - freespace);
memset(usebuf + BLCKSZ - freespace, 0, freespace);
WriteRqst = XLogCtl->xlblocks[Insert->curridx];
- WriteRqst.xrecoff = WriteRqst.xrecoff - BLCKSZ +
- Insert->currpos - ((char*) Insert->currpage);
+ WriteRqst.xrecoff = WriteRqst.xrecoff - BLCKSZ +
+ Insert->currpos - ((char *) Insert->currpage);
}
S_UNLOCK(&(XLogCtl->insert_lck));
force_lgwr = true;
}
- if (force_lgwr || WriteRqst.xlogid > record.xlogid ||
- (WriteRqst.xlogid == record.xlogid &&
+ if (force_lgwr || WriteRqst.xlogid > record.xlogid ||
+ (WriteRqst.xlogid == record.xlogid &&
WriteRqst.xrecoff >= record.xrecoff + BLCKSZ))
{
if (!TAS(&(XLogCtl->lgwr_lck)))
@@ -518,12 +521,12 @@ XLogFlush(XLogRecPtr record)
s_lock_sleep(i++);
}
- if (logFile >= 0 && (LgwrResult.Write.xlogid != logId ||
- (LgwrResult.Write.xrecoff - 1) / XLogSegSize != logSeg))
+ if (logFile >= 0 && (LgwrResult.Write.xlogid != logId ||
+ (LgwrResult.Write.xrecoff - 1) / XLogSegSize != logSeg))
{
if (close(logFile) != 0)
- elog(STOP, "Close(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Close(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
logFile = -1;
}
@@ -536,11 +539,11 @@ XLogFlush(XLogRecPtr record)
}
if (fsync(logFile) != 0)
- elog(STOP, "Fsync(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Fsync(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
LgwrResult.Flush = LgwrResult.Write;
- for (i = 0; ; )
+ for (i = 0;;)
{
if (!TAS(&(XLogCtl->info_lck)))
{
@@ -562,12 +565,12 @@ XLogFlush(XLogRecPtr record)
static void
GetFreeXLBuffer()
{
- XLogCtlInsert *Insert = &XLogCtl->Insert;
- XLogCtlWrite *Write = &XLogCtl->Write;
- uint16 curridx = NextBufIdx(Insert->curridx);
+ XLogCtlInsert *Insert = &XLogCtl->Insert;
+ XLogCtlWrite *Write = &XLogCtl->Write;
+ uint16 curridx = NextBufIdx(Insert->curridx);
LgwrRqst.Write = XLogCtl->xlblocks[Insert->curridx];
- for ( ; ; )
+ for (;;)
{
if (!TAS(&(XLogCtl->info_lck)))
{
@@ -581,6 +584,7 @@ GetFreeXLBuffer()
return;
}
}
+
/*
* LgwrResult lock is busy or un-updated. Try to acquire lgwr lock
* and write full blocks.
@@ -595,9 +599,10 @@ GetFreeXLBuffer()
InitXLBuffer(curridx);
return;
}
- /*
- * Have to write buffers while holding insert lock -
- * not good...
+
+ /*
+ * Have to write buffers while holding insert lock - not
+ * good...
*/
XLogWrite(NULL);
S_UNLOCK(&(XLogCtl->lgwr_lck));
@@ -613,22 +618,22 @@ GetFreeXLBuffer()
static void
XLogWrite(char *buffer)
{
- XLogCtlWrite *Write = &XLogCtl->Write;
- char *from;
- uint32 wcnt = 0;
- int i = 0;
+ XLogCtlWrite *Write = &XLogCtl->Write;
+ char *from;
+ uint32 wcnt = 0;
+ int i = 0;
- for ( ; XLByteLT(LgwrResult.Write, LgwrRqst.Write); )
+ for (; XLByteLT(LgwrResult.Write, LgwrRqst.Write);)
{
LgwrResult.Write = XLogCtl->xlblocks[Write->curridx];
- if (LgwrResult.Write.xlogid != logId ||
+ if (LgwrResult.Write.xlogid != logId ||
(LgwrResult.Write.xrecoff - 1) / XLogSegSize != logSeg)
{
if (wcnt > 0)
{
if (fsync(logFile) != 0)
- elog(STOP, "Fsync(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Fsync(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
if (LgwrResult.Write.xlogid != logId)
LgwrResult.Flush.xrecoff = XLogFileSize;
else
@@ -648,8 +653,8 @@ XLogWrite(char *buffer)
if (logFile >= 0)
{
if (close(logFile) != 0)
- elog(STOP, "Close(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Close(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
logFile = -1;
}
logId = LgwrResult.Write.xlogid;
@@ -675,9 +680,9 @@ XLogWrite(char *buffer)
if (logOff != (LgwrResult.Write.xrecoff - BLCKSZ) % XLogSegSize)
{
logOff = (LgwrResult.Write.xrecoff - BLCKSZ) % XLogSegSize;
- if (lseek(logFile, (off_t)logOff, SEEK_SET) < 0)
- elog(STOP, "Lseek(logfile %u seg %u off %u) failed: %d",
- logId, logSeg, logOff, errno);
+ if (lseek(logFile, (off_t) logOff, SEEK_SET) < 0)
+ elog(STOP, "Lseek(logfile %u seg %u off %u) failed: %d",
+ logId, logSeg, logOff, errno);
}
if (buffer != NULL && XLByteLT(LgwrRqst.Write, LgwrResult.Write))
@@ -686,8 +691,8 @@ XLogWrite(char *buffer)
from = XLogCtl->pages + Write->curridx * BLCKSZ;
if (write(logFile, from, BLCKSZ) != BLCKSZ)
- elog(STOP, "Write(logfile %u seg %u off %u) failed: %d",
- logId, logSeg, logOff, errno);
+ elog(STOP, "Write(logfile %u seg %u off %u) failed: %d",
+ logId, logSeg, logOff, errno);
wcnt++;
logOff += BLCKSZ;
@@ -700,16 +705,16 @@ XLogWrite(char *buffer)
if (wcnt == 0)
elog(STOP, "XLogWrite: nothing written");
- if (XLByteLT(LgwrResult.Flush, LgwrRqst.Flush) &&
+ if (XLByteLT(LgwrResult.Flush, LgwrRqst.Flush) &&
XLByteLE(LgwrRqst.Flush, LgwrResult.Write))
{
if (fsync(logFile) != 0)
- elog(STOP, "Fsync(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Fsync(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
LgwrResult.Flush = LgwrResult.Write;
}
- for ( ; ; )
+ for (;;)
{
if (!TAS(&(XLogCtl->info_lck)))
{
@@ -727,54 +732,54 @@ XLogWrite(char *buffer)
static int
XLogFileInit(uint32 log, uint32 seg)
{
- char path[MAXPGPATH];
- int fd;
+ char path[MAXPGPATH];
+ int fd;
XLogFileName(path, log, seg);
unlink(path);
tryAgain:
#ifndef __CYGWIN__
- fd = open(path, O_RDWR|O_CREAT|O_EXCL, S_IRUSR|S_IWUSR);
+ fd = open(path, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
#else
- fd = open(path, O_RDWR|O_CREAT|O_EXCL|O_BINARY, S_IRUSR|S_IWUSR);
+ fd = open(path, O_RDWR | O_CREAT | O_EXCL | O_BINARY, S_IRUSR | S_IWUSR);
#endif
if (fd < 0 && (errno == EMFILE || errno == ENFILE))
{
fd = errno;
if (!ReleaseDataFile())
- elog(STOP, "Create(logfile %u seg %u) failed: %d (and no one data file can be closed)",
- logId, logSeg, fd);
+ elog(STOP, "Create(logfile %u seg %u) failed: %d (and no one data file can be closed)",
+ logId, logSeg, fd);
goto tryAgain;
}
if (fd < 0)
- elog(STOP, "Init(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Init(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
if (lseek(fd, XLogSegSize - 1, SEEK_SET) != (off_t) (XLogSegSize - 1))
- elog(STOP, "Lseek(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Lseek(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
if (write(fd, "", 1) != 1)
- elog(STOP, "Init(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Init(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
if (fsync(fd) != 0)
- elog(STOP, "Fsync(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Fsync(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
if (lseek(fd, 0, SEEK_SET) < 0)
- elog(STOP, "Lseek(logfile %u seg %u off %u) failed: %d",
- log, seg, 0, errno);
+ elog(STOP, "Lseek(logfile %u seg %u off %u) failed: %d",
+ log, seg, 0, errno);
- return(fd);
+ return (fd);
}
static int
XLogFileOpen(uint32 log, uint32 seg, bool econt)
{
- char path[MAXPGPATH];
- int fd;
+ char path[MAXPGPATH];
+ int fd;
XLogFileName(path, log, seg);
@@ -788,8 +793,8 @@ tryAgain:
{
fd = errno;
if (!ReleaseDataFile())
- elog(STOP, "Open(logfile %u seg %u) failed: %d (and no one data file can be closed)",
- logId, logSeg, fd);
+ elog(STOP, "Open(logfile %u seg %u) failed: %d (and no one data file can be closed)",
+ logId, logSeg, fd);
goto tryAgain;
}
if (fd < 0)
@@ -797,24 +802,24 @@ tryAgain:
if (econt && errno == ENOENT)
{
elog(LOG, "Open(logfile %u seg %u) failed: file doesn't exist",
- logId, logSeg);
+ logId, logSeg);
return (fd);
}
- elog(STOP, "Open(logfile %u seg %u) failed: %d",
- logId, logSeg, errno);
+ elog(STOP, "Open(logfile %u seg %u) failed: %d",
+ logId, logSeg, errno);
}
- return(fd);
+ return (fd);
}
-static XLogRecord*
+static XLogRecord *
ReadRecord(XLogRecPtr *RecPtr, char *buffer)
{
- XLogRecord *record;
- XLogRecPtr tmpRecPtr = EndRecPtr;
- bool nextmode = (RecPtr == NULL);
- int emode = (nextmode) ? LOG : STOP;
- bool noBlck = false;
+ XLogRecord *record;
+ XLogRecPtr tmpRecPtr = EndRecPtr;
+ bool nextmode = (RecPtr == NULL);
+ int emode = (nextmode) ? LOG : STOP;
+ bool noBlck = false;
if (nextmode)
{
@@ -835,10 +840,10 @@ ReadRecord(XLogRecPtr *RecPtr, char *buffer)
}
else if (!XRecOffIsValid(RecPtr->xrecoff))
elog(STOP, "ReadRecord: invalid record offset in (%u, %u)",
- RecPtr->xlogid, RecPtr->xrecoff);
+ RecPtr->xlogid, RecPtr->xrecoff);
- if (readFile >= 0 && (RecPtr->xlogid != readId ||
- RecPtr->xrecoff / XLogSegSize != readSeg))
+ if (readFile >= 0 && (RecPtr->xlogid != readId ||
+ RecPtr->xrecoff / XLogSegSize != readSeg))
{
close(readFile);
readFile = -1;
@@ -856,59 +861,59 @@ ReadRecord(XLogRecPtr *RecPtr, char *buffer)
if (noBlck || readOff != (RecPtr->xrecoff % XLogSegSize) / BLCKSZ)
{
readOff = (RecPtr->xrecoff % XLogSegSize) / BLCKSZ;
- if (lseek(readFile, (off_t)(readOff * BLCKSZ), SEEK_SET) < 0)
- elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
- readId, readSeg, readOff, errno);
+ if (lseek(readFile, (off_t) (readOff * BLCKSZ), SEEK_SET) < 0)
+ elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
+ readId, readSeg, readOff, errno);
if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
- elog(STOP, "ReadRecord: read(logfile %u seg %u off %u) failed: %d",
- readId, readSeg, readOff, errno);
- if (((XLogPageHeader)readBuf)->xlp_magic != XLOG_PAGE_MAGIC)
+ elog(STOP, "ReadRecord: read(logfile %u seg %u off %u) failed: %d",
+ readId, readSeg, readOff, errno);
+ if (((XLogPageHeader) readBuf)->xlp_magic != XLOG_PAGE_MAGIC)
{
elog(emode, "ReadRecord: invalid magic number %u in logfile %u seg %u off %u",
- ((XLogPageHeader)readBuf)->xlp_magic,
- readId, readSeg, readOff);
+ ((XLogPageHeader) readBuf)->xlp_magic,
+ readId, readSeg, readOff);
goto next_record_is_invalid;
}
}
- if ((((XLogPageHeader)readBuf)->xlp_info & XLP_FIRST_IS_SUBRECORD) &&
+ if ((((XLogPageHeader) readBuf)->xlp_info & XLP_FIRST_IS_SUBRECORD) &&
RecPtr->xrecoff % BLCKSZ == SizeOfXLogPHD)
{
elog(emode, "ReadRecord: subrecord is requested by (%u, %u)",
- RecPtr->xlogid, RecPtr->xrecoff);
+ RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
- record = (XLogRecord*)((char*) readBuf + RecPtr->xrecoff % BLCKSZ);
+ record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
got_record:;
- if (record->xl_len == 0 || record->xl_len >
+ if (record->xl_len == 0 || record->xl_len >
(BLCKSZ - RecPtr->xrecoff % BLCKSZ - SizeOfXLogRecord))
{
elog(emode, "ReadRecord: invalid record len %u in (%u, %u)",
- record->xl_len, RecPtr->xlogid, RecPtr->xrecoff);
+ record->xl_len, RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
if (record->xl_rmid > RM_MAX_ID)
{
elog(emode, "ReadRecord: invalid resource managed id %u in (%u, %u)",
- record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff);
+ record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
nextRecord = NULL;
if (record->xl_info & XLR_TO_BE_CONTINUED)
{
- XLogSubRecord *subrecord;
- uint32 len = record->xl_len;
+ XLogSubRecord *subrecord;
+ uint32 len = record->xl_len;
if (record->xl_len + RecPtr->xrecoff % BLCKSZ + SizeOfXLogRecord != BLCKSZ)
{
elog(emode, "ReadRecord: invalid fragmented record len %u in (%u, %u)",
- record->xl_len, RecPtr->xlogid, RecPtr->xrecoff);
+ record->xl_len, RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
memcpy(buffer, record, record->xl_len + SizeOfXLogRecord);
- record = (XLogRecord*) buffer;
+ record = (XLogRecord *) buffer;
buffer += record->xl_len + SizeOfXLogRecord;
- for ( ; ; )
+ for (;;)
{
readOff++;
if (readOff == XLogSegSize / BLCKSZ)
@@ -926,114 +931,113 @@ got_record:;
goto next_record_is_invalid;
}
if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
- elog(STOP, "ReadRecord: read(logfile %u seg %u off %u) failed: %d",
- readId, readSeg, readOff, errno);
- if (((XLogPageHeader)readBuf)->xlp_magic != XLOG_PAGE_MAGIC)
+ elog(STOP, "ReadRecord: read(logfile %u seg %u off %u) failed: %d",
+ readId, readSeg, readOff, errno);
+ if (((XLogPageHeader) readBuf)->xlp_magic != XLOG_PAGE_MAGIC)
{
elog(emode, "ReadRecord: invalid magic number %u in logfile %u seg %u off %u",
- ((XLogPageHeader)readBuf)->xlp_magic,
- readId, readSeg, readOff);
+ ((XLogPageHeader) readBuf)->xlp_magic,
+ readId, readSeg, readOff);
goto next_record_is_invalid;
}
- if (!(((XLogPageHeader)readBuf)->xlp_info & XLP_FIRST_IS_SUBRECORD))
+ if (!(((XLogPageHeader) readBuf)->xlp_info & XLP_FIRST_IS_SUBRECORD))
{
elog(emode, "ReadRecord: there is no subrecord flag in logfile %u seg %u off %u",
- readId, readSeg, readOff);
+ readId, readSeg, readOff);
goto next_record_is_invalid;
}
- subrecord = (XLogSubRecord*)((char*) readBuf + SizeOfXLogPHD);
- if (subrecord->xl_len == 0 || subrecord->xl_len >
+ subrecord = (XLogSubRecord *) ((char *) readBuf + SizeOfXLogPHD);
+ if (subrecord->xl_len == 0 || subrecord->xl_len >
(BLCKSZ - SizeOfXLogPHD - SizeOfXLogSubRecord))
{
elog(emode, "ReadRecord: invalid subrecord len %u in logfile %u seg %u off %u",
- subrecord->xl_len, readId, readSeg, readOff);
+ subrecord->xl_len, readId, readSeg, readOff);
goto next_record_is_invalid;
}
len += subrecord->xl_len;
if (len > MAXLOGRECSZ)
{
elog(emode, "ReadRecord: too long record len %u in (%u, %u)",
- len, RecPtr->xlogid, RecPtr->xrecoff);
+ len, RecPtr->xlogid, RecPtr->xrecoff);
goto next_record_is_invalid;
}
- memcpy(buffer, (char*)subrecord + SizeOfXLogSubRecord, subrecord->xl_len);
+ memcpy(buffer, (char *) subrecord + SizeOfXLogSubRecord, subrecord->xl_len);
buffer += subrecord->xl_len;
if (subrecord->xl_info & XLR_TO_BE_CONTINUED)
{
- if (subrecord->xl_len +
+ if (subrecord->xl_len +
SizeOfXLogPHD + SizeOfXLogSubRecord != BLCKSZ)
{
elog(emode, "ReadRecord: invalid fragmented subrecord len %u in logfile %u seg %u off %u",
- subrecord->xl_len, readId, readSeg, readOff);
+ subrecord->xl_len, readId, readSeg, readOff);
goto next_record_is_invalid;
}
continue;
}
break;
}
- if (BLCKSZ - SizeOfXLogRecord >=
+ if (BLCKSZ - SizeOfXLogRecord >=
subrecord->xl_len + SizeOfXLogPHD + SizeOfXLogSubRecord)
{
- nextRecord = (XLogRecord*)
- ((char*)subrecord + subrecord->xl_len + SizeOfXLogSubRecord);
+ nextRecord = (XLogRecord *)
+ ((char *) subrecord + subrecord->xl_len + SizeOfXLogSubRecord);
}
EndRecPtr.xlogid = readId;
- EndRecPtr.xrecoff = readSeg * XLogSegSize + readOff * BLCKSZ +
+ EndRecPtr.xrecoff = readSeg * XLogSegSize + readOff * BLCKSZ +
SizeOfXLogPHD + SizeOfXLogSubRecord + subrecord->xl_len;
ReadRecPtr = *RecPtr;
- return(record);
+ return (record);
}
- if (BLCKSZ - SizeOfXLogRecord >=
+ if (BLCKSZ - SizeOfXLogRecord >=
record->xl_len + RecPtr->xrecoff % BLCKSZ + SizeOfXLogRecord)
- {
- nextRecord = (XLogRecord*)((char*)record + record->xl_len + SizeOfXLogRecord);
- }
+ nextRecord = (XLogRecord *) ((char *) record + record->xl_len + SizeOfXLogRecord);
EndRecPtr.xlogid = RecPtr->xlogid;
EndRecPtr.xrecoff = RecPtr->xrecoff + record->xl_len + SizeOfXLogRecord;
ReadRecPtr = *RecPtr;
- return(record);
+ return (record);
next_record_is_invalid:;
close(readFile);
readFile = -1;
nextRecord = NULL;
memset(buffer, 0, SizeOfXLogRecord);
- record = (XLogRecord*) buffer;
+ record = (XLogRecord *) buffer;
+
/*
* If we assumed that next record began on the same page where
* previous one ended - zero end of page.
*/
if (XLByteEQ(tmpRecPtr, EndRecPtr))
{
- Assert (EndRecPtr.xrecoff % BLCKSZ > (SizeOfXLogPHD + SizeOfXLogSubRecord) &&
- BLCKSZ - EndRecPtr.xrecoff % BLCKSZ >= SizeOfXLogRecord);
+ Assert(EndRecPtr.xrecoff % BLCKSZ > (SizeOfXLogPHD + SizeOfXLogSubRecord) &&
+ BLCKSZ - EndRecPtr.xrecoff % BLCKSZ >= SizeOfXLogRecord);
readId = EndRecPtr.xlogid;
readSeg = EndRecPtr.xrecoff / XLogSegSize;
readOff = (EndRecPtr.xrecoff % XLogSegSize) / BLCKSZ;
elog(LOG, "Formatting logfile %u seg %u block %u at offset %u",
- readId, readSeg, readOff, EndRecPtr.xrecoff % BLCKSZ);
+ readId, readSeg, readOff, EndRecPtr.xrecoff % BLCKSZ);
readFile = XLogFileOpen(readId, readSeg, false);
- if (lseek(readFile, (off_t)(readOff * BLCKSZ), SEEK_SET) < 0)
- elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
- readId, readSeg, readOff, errno);
+ if (lseek(readFile, (off_t) (readOff * BLCKSZ), SEEK_SET) < 0)
+ elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
+ readId, readSeg, readOff, errno);
if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
- elog(STOP, "ReadRecord: read(logfile %u seg %u off %u) failed: %d",
- readId, readSeg, readOff, errno);
- memset(readBuf + EndRecPtr.xrecoff % BLCKSZ, 0,
- BLCKSZ - EndRecPtr.xrecoff % BLCKSZ);
- if (lseek(readFile, (off_t)(readOff * BLCKSZ), SEEK_SET) < 0)
- elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
- readId, readSeg, readOff, errno);
+ elog(STOP, "ReadRecord: read(logfile %u seg %u off %u) failed: %d",
+ readId, readSeg, readOff, errno);
+ memset(readBuf + EndRecPtr.xrecoff % BLCKSZ, 0,
+ BLCKSZ - EndRecPtr.xrecoff % BLCKSZ);
+ if (lseek(readFile, (off_t) (readOff * BLCKSZ), SEEK_SET) < 0)
+ elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
+ readId, readSeg, readOff, errno);
if (write(readFile, readBuf, BLCKSZ) != BLCKSZ)
- elog(STOP, "ReadRecord: write(logfile %u seg %u off %u) failed: %d",
- readId, readSeg, readOff, errno);
+ elog(STOP, "ReadRecord: write(logfile %u seg %u off %u) failed: %d",
+ readId, readSeg, readOff, errno);
readOff++;
}
else
{
- Assert (EndRecPtr.xrecoff % BLCKSZ == 0 ||
- BLCKSZ - EndRecPtr.xrecoff % BLCKSZ < SizeOfXLogRecord);
+ Assert(EndRecPtr.xrecoff % BLCKSZ == 0 ||
+ BLCKSZ - EndRecPtr.xrecoff % BLCKSZ < SizeOfXLogRecord);
readId = tmpRecPtr.xlogid;
readSeg = tmpRecPtr.xrecoff / XLogSegSize;
readOff = (tmpRecPtr.xrecoff % XLogSegSize) / BLCKSZ;
@@ -1043,26 +1047,26 @@ next_record_is_invalid:;
{
if (!XLByteEQ(tmpRecPtr, EndRecPtr))
elog(LOG, "Formatting logfile %u seg %u block %u at offset 0",
- readId, readSeg, readOff);
+ readId, readSeg, readOff);
readOff *= BLCKSZ;
memset(readBuf, 0, BLCKSZ);
readFile = XLogFileOpen(readId, readSeg, false);
- if (lseek(readFile, (off_t)readOff, SEEK_SET) < 0)
- elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
- readId, readSeg, readOff, errno);
+ if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0)
+ elog(STOP, "ReadRecord: lseek(logfile %u seg %u off %u) failed: %d",
+ readId, readSeg, readOff, errno);
while (readOff < XLogSegSize)
{
if (write(readFile, readBuf, BLCKSZ) != BLCKSZ)
- elog(STOP, "ReadRecord: write(logfile %u seg %u off %u) failed: %d",
- readId, readSeg, readOff, errno);
+ elog(STOP, "ReadRecord: write(logfile %u seg %u off %u) failed: %d",
+ readId, readSeg, readOff, errno);
readOff += BLCKSZ;
}
}
if (readFile >= 0)
{
if (fsync(readFile) < 0)
- elog(STOP, "ReadRecord: fsync(logfile %u seg %u) failed: %d",
- readId, readSeg, errno);
+ elog(STOP, "ReadRecord: fsync(logfile %u seg %u) failed: %d",
+ readId, readSeg, errno);
close(readFile);
readFile = -1;
}
@@ -1084,19 +1088,19 @@ next_record_is_invalid:;
readId++;
}
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
XLogFileName(path, readId, readSeg);
unlink(path);
}
- return(record);
+ return (record);
}
void
UpdateControlFile()
{
- int fd;
+ int fd;
tryAgain:
#ifndef __CYGWIN__
@@ -1108,8 +1112,8 @@ tryAgain:
{
fd = errno;
if (!ReleaseDataFile())
- elog(STOP, "Open(cntlfile) failed: %d (and no one data file can be closed)",
- fd);
+ elog(STOP, "Open(cntlfile) failed: %d (and no one data file can be closed)",
+ fd);
goto tryAgain;
}
if (fd < 0)
@@ -1132,23 +1136,23 @@ XLOGShmemSize()
if (XLOGbuffers < MinXLOGbuffers)
XLOGbuffers = MinXLOGbuffers;
- return(sizeof(XLogCtlData) + BLCKSZ * XLOGbuffers +
+ return (sizeof(XLogCtlData) + BLCKSZ * XLOGbuffers +
sizeof(XLogRecPtr) * XLOGbuffers + BLCKSZ);
}
void
XLOGShmemInit(void)
{
- bool found;
+ bool found;
if (XLOGbuffers < MinXLOGbuffers)
XLOGbuffers = MinXLOGbuffers;
- ControlFile = (ControlFileData*)
+ ControlFile = (ControlFileData *)
ShmemInitStruct("Control File", BLCKSZ, &found);
Assert(!found);
- XLogCtl = (XLogCtlData*)
- ShmemInitStruct("XLOG Ctl", sizeof(XLogCtlData) + BLCKSZ * XLOGbuffers +
+ XLogCtl = (XLogCtlData *)
+ ShmemInitStruct("XLOG Ctl", sizeof(XLogCtlData) + BLCKSZ * XLOGbuffers +
sizeof(XLogRecPtr) * XLOGbuffers, &found);
Assert(!found);
}
@@ -1159,43 +1163,45 @@ XLOGShmemInit(void)
void
BootStrapXLOG()
{
- int fd;
- char buffer[BLCKSZ];
- CheckPoint checkPoint;
+ int fd;
+ char buffer[BLCKSZ];
+ CheckPoint checkPoint;
#ifdef NOT_USED
- XLogPageHeader page = (XLogPageHeader)buffer;
- XLogRecord *record;
+ XLogPageHeader page = (XLogPageHeader) buffer;
+ XLogRecord *record;
+
#endif
#ifndef __CYGWIN__
- fd = open(ControlFilePath, O_RDWR|O_CREAT|O_EXCL, S_IRUSR|S_IWUSR);
+ fd = open(ControlFilePath, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR);
#else
- fd = open(ControlFilePath, O_RDWR|O_CREAT|O_EXCL|O_BINARY, S_IRUSR|S_IWUSR);
+ fd = open(ControlFilePath, O_RDWR | O_CREAT | O_EXCL | O_BINARY, S_IRUSR | S_IWUSR);
#endif
if (fd < 0)
- elog(STOP, "BootStrapXLOG failed to create control file (%s): %d",
- ControlFilePath, errno);
+ elog(STOP, "BootStrapXLOG failed to create control file (%s): %d",
+ ControlFilePath, errno);
checkPoint.redo.xlogid = 0;
checkPoint.redo.xrecoff = SizeOfXLogPHD;
checkPoint.undo = checkPoint.redo;
checkPoint.nextXid = FirstTransactionId;
- checkPoint.nextOid = BootstrapObjectIdData;
+ checkPoint.nextOid = BootstrapObjectIdData;
#ifdef NOT_USED
memset(buffer, 0, BLCKSZ);
page->xlp_magic = XLOG_PAGE_MAGIC;
page->xlp_info = 0;
- record = (XLogRecord*) ((char*)page + SizeOfXLogPHD);
- record->xl_prev.xlogid = 0; record->xl_prev.xrecoff = 0;
+ record = (XLogRecord *) ((char *) page + SizeOfXLogPHD);
+ record->xl_prev.xlogid = 0;
+ record->xl_prev.xrecoff = 0;
record->xl_xact_prev = record->xl_prev;
record->xl_xid = InvalidTransactionId;
record->xl_len = sizeof(checkPoint);
record->xl_info = 0;
record->xl_rmid = RM_XLOG_ID;
- memcpy((char*)record + SizeOfXLogRecord, &checkPoint, sizeof(checkPoint));
+ memcpy((char *) record + SizeOfXLogRecord, &checkPoint, sizeof(checkPoint));
logFile = XLogFileInit(0, 0);
@@ -1211,7 +1217,7 @@ BootStrapXLOG()
#endif
memset(buffer, 0, BLCKSZ);
- ControlFile = (ControlFileData*) buffer;
+ ControlFile = (ControlFileData *) buffer;
ControlFile->logId = 0;
ControlFile->logSeg = 1;
ControlFile->checkPoint = checkPoint.redo;
@@ -1230,16 +1236,16 @@ BootStrapXLOG()
close(fd);
}
-static char*
+static char *
str_time(time_t tnow)
{
- char *result = ctime(&tnow);
- char *p = strchr(result, '\n');
+ char *result = ctime(&tnow);
+ char *p = strchr(result, '\n');
if (p != NULL)
*p = 0;
- return(result);
+ return (result);
}
/*
@@ -1249,21 +1255,22 @@ void
StartupXLOG()
{
#ifdef NOT_USED
- XLogCtlInsert *Insert;
- CheckPoint checkPoint;
- XLogRecPtr RecPtr,
- LastRec;
- XLogRecord *record;
- char buffer[MAXLOGRECSZ+SizeOfXLogRecord];
- int recovery = 0;
- bool sie_saved = false;
+ XLogCtlInsert *Insert;
+ CheckPoint checkPoint;
+ XLogRecPtr RecPtr,
+ LastRec;
+ XLogRecord *record;
+ char buffer[MAXLOGRECSZ + SizeOfXLogRecord];
+ int recovery = 0;
+ bool sie_saved = false;
+
#endif
- int fd;
+ int fd;
elog(LOG, "Data Base System is starting up at %s", str_time(time(NULL)));
- XLogCtl->xlblocks = (XLogRecPtr*) (((char *)XLogCtl) + sizeof(XLogCtlData));
- XLogCtl->pages = ((char *)XLogCtl->xlblocks + sizeof(XLogRecPtr) * XLOGbuffers);
+ XLogCtl->xlblocks = (XLogRecPtr *) (((char *) XLogCtl) + sizeof(XLogCtlData));
+ XLogCtl->pages = ((char *) XLogCtl->xlblocks + sizeof(XLogRecPtr) * XLOGbuffers);
XLogCtl->XLogCacheByte = BLCKSZ * XLOGbuffers;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers);
@@ -1291,8 +1298,8 @@ tryAgain:
{
fd = errno;
if (!ReleaseDataFile())
- elog(STOP, "Open(\"%s\") failed: %d (and no one data file can be closed)",
- ControlFilePath, fd);
+ elog(STOP, "Open(\"%s\") failed: %d (and no one data file can be closed)",
+ ControlFilePath, fd);
goto tryAgain;
}
if (fd < 0)
@@ -1303,10 +1310,10 @@ tryAgain:
close(fd);
- if (ControlFile->logSeg == 0 ||
- ControlFile->time <= 0 ||
- ControlFile->state < DB_SHUTDOWNED ||
- ControlFile->state > DB_IN_PRODUCTION ||
+ if (ControlFile->logSeg == 0 ||
+ ControlFile->time <= 0 ||
+ ControlFile->state < DB_SHUTDOWNED ||
+ ControlFile->state > DB_IN_PRODUCTION ||
!XRecOffIsValid(ControlFile->checkPoint.xrecoff))
elog(STOP, "Control file context is broken");
@@ -1323,20 +1330,20 @@ tryAgain:
if (ControlFile->state == DB_SHUTDOWNED)
elog(LOG, "Data Base System was shut down at %s",
- str_time(ControlFile->time));
+ str_time(ControlFile->time));
else if (ControlFile->state == DB_SHUTDOWNING)
elog(LOG, "Data Base System was interrupted when shutting down at %s",
- str_time(ControlFile->time));
+ str_time(ControlFile->time));
else if (ControlFile->state == DB_IN_RECOVERY)
{
elog(LOG, "Data Base System was interrupted being in recovery at %s\n"
- "\tThis propably means that some data blocks are corrupted\n"
- "\tAnd you will have to use last backup for recovery",
- str_time(ControlFile->time));
+ "\tThis propably means that some data blocks are corrupted\n"
+ "\tAnd you will have to use last backup for recovery",
+ str_time(ControlFile->time));
}
else if (ControlFile->state == DB_IN_PRODUCTION)
elog(LOG, "Data Base System was interrupted being in production at %s",
- str_time(ControlFile->time));
+ str_time(ControlFile->time));
#ifdef NOT_USED
@@ -1350,14 +1357,14 @@ tryAgain:
elog(STOP, "Invalid RMID in checkPoint record");
if (record->xl_len != sizeof(checkPoint))
elog(STOP, "Invalid length of checkPoint record");
- checkPoint = *((CheckPoint*)((char*)record + SizeOfXLogRecord));
+ checkPoint = *((CheckPoint *) ((char *) record + SizeOfXLogRecord));
elog(LOG, "Redo record at (%u, %u); Undo record at (%u, %u)",
- checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
- checkPoint.undo.xlogid, checkPoint.undo.xrecoff);
+ checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
+ checkPoint.undo.xlogid, checkPoint.undo.xrecoff);
elog(LOG, "NextTransactionId: %u; NextOid: %u",
- checkPoint.nextXid, checkPoint.nextOid);
- if (checkPoint.nextXid < FirstTransactionId ||
+ checkPoint.nextXid, checkPoint.nextOid);
+ if (checkPoint.nextXid < FirstTransactionId ||
checkPoint.nextOid < BootstrapObjectIdData)
#ifdef XLOG
elog(STOP, "Invalid NextTransactionId/NextOid");
@@ -1389,7 +1396,7 @@ tryAgain:
if (recovery > 0)
{
elog(LOG, "The DataBase system was not properly shut down\n"
- "\tAutomatic recovery is in progress...");
+ "\tAutomatic recovery is in progress...");
ControlFile->state = DB_IN_RECOVERY;
ControlFile->time = time(NULL);
UpdateControlFile();
@@ -1400,14 +1407,15 @@ tryAgain:
/* Is REDO required ? */
if (XLByteLT(checkPoint.redo, RecPtr))
record = ReadRecord(&(checkPoint.redo), buffer);
- else /* read past CheckPoint record */
+ else
+/* read past CheckPoint record */
record = ReadRecord(NULL, buffer);
/* REDO */
if (record->xl_len != 0)
{
- elog(LOG, "Redo starts at (%u, %u)",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
+ elog(LOG, "Redo starts at (%u, %u)",
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
do
{
#ifdef XLOG
@@ -1417,8 +1425,8 @@ tryAgain:
RmgrTable[record->xl_rmid].rm_redo(EndRecPtr, record);
record = ReadRecord(NULL, buffer);
} while (record->xl_len != 0);
- elog(LOG, "Redo done at (%u, %u)",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
+ elog(LOG, "Redo done at (%u, %u)",
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
LastRec = ReadRecPtr;
}
else
@@ -1431,18 +1439,18 @@ tryAgain:
RecPtr = ReadRecPtr;
if (XLByteLT(checkPoint.undo, RecPtr))
{
- elog(LOG, "Undo starts at (%u, %u)",
- RecPtr.xlogid, RecPtr.xrecoff);
+ elog(LOG, "Undo starts at (%u, %u)",
+ RecPtr.xlogid, RecPtr.xrecoff);
do
{
record = ReadRecord(&RecPtr, buffer);
- if (TransactionIdIsValid(record->xl_xid) &&
+ if (TransactionIdIsValid(record->xl_xid) &&
!TransactionIdDidCommit(record->xl_xid))
RmgrTable[record->xl_rmid].rm_undo(record);
RecPtr = record->xl_prev;
} while (XLByteLE(checkPoint.undo, RecPtr));
- elog(LOG, "Undo done at (%u, %u)",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
+ elog(LOG, "Undo done at (%u, %u)",
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
}
else
{
@@ -1458,19 +1466,19 @@ tryAgain:
logOff = 0;
logFile = XLogFileOpen(logId, logSeg, false);
XLogCtl->xlblocks[0].xlogid = logId;
- XLogCtl->xlblocks[0].xrecoff =
- ((EndRecPtr.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
+ XLogCtl->xlblocks[0].xrecoff =
+ ((EndRecPtr.xrecoff - 1) / BLCKSZ + 1) * BLCKSZ;
Insert = &XLogCtl->Insert;
- memcpy((char*)(Insert->currpage), readBuf, BLCKSZ);
- Insert->currpos = ((char*) Insert->currpage) +
+ memcpy((char *) (Insert->currpage), readBuf, BLCKSZ);
+ Insert->currpos = ((char *) Insert->currpage) +
(EndRecPtr.xrecoff + BLCKSZ - XLogCtl->xlblocks[0].xrecoff);
Insert->PrevRecord = ControlFile->checkPoint;
if (recovery > 0)
{
- int i;
+ int i;
- /*
+ /*
* Let resource managers know that recovery is done
*/
for (i = 0; i <= RM_MAX_ID; i++)
@@ -1479,7 +1487,7 @@ tryAgain:
StopIfError = sie_saved;
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
ControlFile->state = DB_IN_PRODUCTION;
ControlFile->time = time(NULL);
@@ -1508,11 +1516,11 @@ void
CreateCheckPoint(bool shutdown)
{
#ifdef NOT_USED
- CheckPoint checkPoint;
- XLogRecPtr recptr;
- XLogCtlInsert *Insert = &XLogCtl->Insert;
- uint32 freespace;
- uint16 curridx;
+ CheckPoint checkPoint;
+ XLogRecPtr recptr;
+ XLogCtlInsert *Insert = &XLogCtl->Insert;
+ uint32 freespace;
+ uint16 curridx;
memset(&checkPoint, 0, sizeof(checkPoint));
if (shutdown)
@@ -1531,21 +1539,21 @@ CreateCheckPoint(bool shutdown)
elog(STOP, "XLog insert lock is busy while data base is shutting down");
(void) select(0, NULL, NULL, NULL, &delay);
}
- freespace = ((char*) Insert->currpage) + BLCKSZ - Insert->currpos;
+ freespace = ((char *) Insert->currpage) + BLCKSZ - Insert->currpos;
if (freespace < SizeOfXLogRecord)
{
curridx = NextBufIdx(Insert->curridx);
if (XLByteLE(XLogCtl->xlblocks[curridx], LgwrResult.Write))
InitXLBuffer(curridx);
- else
+ else
GetFreeXLBuffer();
freespace = BLCKSZ - SizeOfXLogPHD;
}
else
curridx = Insert->curridx;
checkPoint.redo.xlogid = XLogCtl->xlblocks[curridx].xlogid;
- checkPoint.redo.xrecoff = XLogCtl->xlblocks[curridx].xrecoff - BLCKSZ +
- Insert->currpos - ((char*) Insert->currpage);
+ checkPoint.redo.xrecoff = XLogCtl->xlblocks[curridx].xrecoff - BLCKSZ +
+ Insert->currpos - ((char *) Insert->currpage);
S_UNLOCK(&(XLogCtl->insert_lck));
SpinAcquire(XidGenLockId);
@@ -1563,14 +1571,14 @@ CreateCheckPoint(bool shutdown)
if (shutdown && checkPoint.undo.xrecoff != 0)
elog(STOP, "Active transaction while data base is shutting down");
- recptr = XLogInsert(RM_XLOG_ID, (char*)&checkPoint, sizeof(checkPoint), NULL, 0);
+ recptr = XLogInsert(RM_XLOG_ID, (char *) &checkPoint, sizeof(checkPoint), NULL, 0);
if (shutdown && !XLByteEQ(checkPoint.redo, MyLastRecPtr))
elog(STOP, "XLog concurrent activity while data base is shutting down");
XLogFlush(recptr);
-#endif /* NOT_USED */
+#endif /* NOT_USED */
SpinAcquire(ControlFileLockId);
if (shutdown)