diff options
author | Bruce Momjian | 2014-05-06 16:12:18 +0000 |
---|---|---|
committer | Bruce Momjian | 2014-05-06 16:12:18 +0000 |
commit | 0a7832005792fa6dad171f9cadb8d587fe0dd800 (patch) | |
tree | 365cfc42c521a52607e41394b08ef44d338d8fc1 /src/backend | |
parent | fb85cd4320414c3f6e9c8bc69ec944200ae1e493 (diff) |
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was
applied to back branches, so this change should not effect backpatching.
Diffstat (limited to 'src/backend')
423 files changed, 5241 insertions, 5051 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index c64ede9dac5..009ebe7a1cb 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -21,7 +21,7 @@ * tuptoaster.c. * * This change will break any code that assumes it needn't detoast values - * that have been put into a tuple but never sent to disk. Hopefully there + * that have been put into a tuple but never sent to disk. Hopefully there * are few such places. * * Varlenas still have alignment 'i' (or 'd') in pg_type/pg_attribute, since @@ -387,7 +387,7 @@ nocachegetattr(HeapTuple tuple, /* * Otherwise, check for non-fixed-length attrs up to and including - * target. If there aren't any, it's safe to cheaply initialize the + * target. If there aren't any, it's safe to cheaply initialize the * cached offsets for these attrs. */ if (HeapTupleHasVarWidth(tuple)) @@ -454,7 +454,7 @@ nocachegetattr(HeapTuple tuple, * * Note - This loop is a little tricky. For each non-null attribute, * we have to first account for alignment padding before the attr, - * then advance over the attr based on its length. Nulls have no + * then advance over the attr based on its length. Nulls have no * storage and no alignment padding either. We can use/set * attcacheoff until we reach either a null or a var-width attribute. */ @@ -549,7 +549,7 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) /* * cmin and cmax are now both aliases for the same field, which - * can in fact also be a combo command id. XXX perhaps we should + * can in fact also be a combo command id. XXX perhaps we should * return the "real" cmin or cmax if possible, that is if we are * inside the originating transaction? */ @@ -709,7 +709,7 @@ heap_form_tuple(TupleDesc tupleDescriptor, len += data_len; /* - * Allocate and zero the space needed. Note that the tuple body and + * Allocate and zero the space needed. Note that the tuple body and * HeapTupleData management structure are allocated in one chunk. */ tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len); diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index 7da10e9a74a..5fd400990b7 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -71,7 +71,7 @@ index_form_tuple(TupleDesc tupleDescriptor, /* * If value is stored EXTERNAL, must fetch it so we are not depending - * on outside storage. This should be improved someday. + * on outside storage. This should be improved someday. */ if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i]))) { @@ -280,7 +280,7 @@ nocache_index_getattr(IndexTuple tup, /* * Otherwise, check for non-fixed-length attrs up to and including - * target. If there aren't any, it's safe to cheaply initialize the + * target. If there aren't any, it's safe to cheaply initialize the * cached offsets for these attrs. */ if (IndexTupleHasVarwidths(tup)) @@ -347,7 +347,7 @@ nocache_index_getattr(IndexTuple tup, * * Note - This loop is a little tricky. For each non-null attribute, * we have to first account for alignment padding before the attr, - * then advance over the attr based on its length. Nulls have no + * then advance over the attr based on its length. Nulls have no * storage and no alignment padding either. We can use/set * attcacheoff until we reach either a null or a var-width attribute. */ diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index af59aa1a406..c7fa727485c 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -182,7 +182,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo) * or some similar function; it does not contain a full set of fields. * The targetlist will be NIL when executing a utility function that does * not have a plan. If the targetlist isn't NIL then it is a Query node's - * targetlist; it is up to us to ignore resjunk columns in it. The formats[] + * targetlist; it is up to us to ignore resjunk columns in it. The formats[] * array pointer might be NULL (if we are doing Describe on a prepared stmt); * send zeroes for the format codes in that case. */ diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 530a1aee7bb..522b671993e 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -540,7 +540,7 @@ add_real_reloption(bits32 kinds, char *name, char *desc, double default_val, * Add a new string reloption * * "validator" is an optional function pointer that can be used to test the - * validity of the values. It must elog(ERROR) when the argument string is + * validity of the values. It must elog(ERROR) when the argument string is * not acceptable for the variable. Note that the default value must pass * the validation. */ @@ -868,7 +868,7 @@ extractRelOptions(HeapTuple tuple, TupleDesc tupdesc, Oid amoptions) * is returned. * * Note: values of type int, bool and real are allocated as part of the - * returned array. Values of type string are allocated separately and must + * returned array. Values of type string are allocated separately and must * be freed by the caller. */ relopt_value * @@ -1205,7 +1205,7 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind) {"check_option", RELOPT_TYPE_STRING, offsetof(StdRdOptions, check_option_offset)}, {"user_catalog_table", RELOPT_TYPE_BOOL, - offsetof(StdRdOptions, user_catalog_table)} + offsetof(StdRdOptions, user_catalog_table)} }; options = parseRelOptions(reloptions, validate, kind, &numoptions); diff --git a/src/backend/access/common/tupconvert.c b/src/backend/access/common/tupconvert.c index 1b6c6d957c9..2e48b32ba3b 100644 --- a/src/backend/access/common/tupconvert.c +++ b/src/backend/access/common/tupconvert.c @@ -5,7 +5,7 @@ * * These functions provide conversion between rowtypes that are logically * equivalent but might have columns in a different order or different sets - * of dropped columns. There is some overlap of functionality with the + * of dropped columns. There is some overlap of functionality with the * executor's "junkfilter" routines, but these functions work on bare * HeapTuples rather than TupleTableSlots. * diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index 74cfb6499a5..f3b36893f78 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -581,7 +581,7 @@ TupleDescInitEntryCollation(TupleDesc desc, * Given a relation schema (list of ColumnDef nodes), build a TupleDesc. * * Note: the default assumption is no OIDs; caller may modify the returned - * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in + * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in * later on. */ TupleDesc diff --git a/src/backend/access/gin/ginarrayproc.c b/src/backend/access/gin/ginarrayproc.c index 32dbed68c77..66cea28113a 100644 --- a/src/backend/access/gin/ginarrayproc.c +++ b/src/backend/access/gin/ginarrayproc.c @@ -197,7 +197,7 @@ ginarrayconsistent(PG_FUNCTION_ARGS) /* * Must have all elements in check[] true; no discrimination - * against nulls here. This is because array_contain_compare and + * against nulls here. This is because array_contain_compare and * array_eq handle nulls differently ... */ res = true; @@ -279,9 +279,10 @@ ginarraytriconsistent(PG_FUNCTION_ARGS) res = GIN_MAYBE; break; case GinEqualStrategy: + /* * Must have all elements in check[] true; no discrimination - * against nulls here. This is because array_contain_compare and + * against nulls here. This is because array_contain_compare and * array_eq handle nulls differently ... */ res = GIN_MAYBE; diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c index 9b0f82fc904..27f88e0eb21 100644 --- a/src/backend/access/gin/ginbtree.c +++ b/src/backend/access/gin/ginbtree.c @@ -251,6 +251,7 @@ ginFindParents(GinBtree btree, GinBtreeStack *stack) Assert(blkno != btree->rootBlkno); ptr->blkno = blkno; ptr->buffer = buffer; + /* * parent may be wrong, but if so, the ginFinishSplit call will * recurse to call ginFindParents again to fix it. @@ -328,7 +329,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, GinPlaceToPageRC rc; uint16 xlflags = 0; Page childpage = NULL; - Page newlpage = NULL, newrpage = NULL; + Page newlpage = NULL, + newrpage = NULL; if (GinPageIsData(page)) xlflags |= GIN_INSERT_ISDATA; @@ -346,8 +348,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, } /* - * Try to put the incoming tuple on the page. placeToPage will decide - * if the page needs to be split. + * Try to put the incoming tuple on the page. placeToPage will decide if + * the page needs to be split. */ rc = btree->placeToPage(btree, stack->buffer, stack, insertdata, updateblkno, @@ -371,7 +373,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, XLogRecPtr recptr; XLogRecData rdata[3]; ginxlogInsert xlrec; - BlockIdData childblknos[2]; + BlockIdData childblknos[2]; xlrec.node = btree->index->rd_node; xlrec.blkno = BufferGetBlockNumber(stack->buffer); @@ -449,7 +451,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, data.flags = xlflags; if (childbuf != InvalidBuffer) { - Page childpage = BufferGetPage(childbuf); + Page childpage = BufferGetPage(childbuf); + GinPageGetOpaque(childpage)->flags &= ~GIN_INCOMPLETE_SPLIT; data.leftChildBlkno = BufferGetBlockNumber(childbuf); @@ -505,8 +508,8 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, /* * Construct a new root page containing downlinks to the new left - * and right pages. (do this in a temporary copy first rather - * than overwriting the original page directly, so that we can still + * and right pages. (do this in a temporary copy first rather than + * overwriting the original page directly, so that we can still * abort gracefully if this fails.) */ newrootpg = PageGetTempPage(newrpage); @@ -604,7 +607,7 @@ ginPlaceToPage(GinBtree btree, GinBtreeStack *stack, else { elog(ERROR, "unknown return code from GIN placeToPage method: %d", rc); - return false; /* keep compiler quiet */ + return false; /* keep compiler quiet */ } } @@ -627,8 +630,8 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack, bool first = true; /* - * freestack == false when we encounter an incompletely split page during a - * scan, while freestack == true is used in the normal scenario that a + * freestack == false when we encounter an incompletely split page during + * a scan, while freestack == true is used in the normal scenario that a * split is finished right after the initial insert. */ if (!freestack) @@ -650,8 +653,8 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack, * then continue with the current one. * * Note: we have to finish *all* incomplete splits we encounter, even - * if we have to move right. Otherwise we might choose as the target - * a page that has no downlink in the parent, and splitting it further + * if we have to move right. Otherwise we might choose as the target a + * page that has no downlink in the parent, and splitting it further * would fail. */ if (GinPageIsIncompleteSplit(BufferGetPage(parent->buffer))) diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c index 9f3009b5894..3af027187ac 100644 --- a/src/backend/access/gin/ginbulk.c +++ b/src/backend/access/gin/ginbulk.c @@ -187,7 +187,7 @@ ginInsertBAEntry(BuildAccumulator *accum, * Since the entries are being inserted into a balanced binary tree, you * might think that the order of insertion wouldn't be critical, but it turns * out that inserting the entries in sorted order results in a lot of - * rebalancing operations and is slow. To prevent this, we attempt to insert + * rebalancing operations and is slow. To prevent this, we attempt to insert * the nodes in an order that will produce a nearly-balanced tree if the input * is in fact sorted. * diff --git a/src/backend/access/gin/gindatapage.c b/src/backend/access/gin/gindatapage.c index c11ed858833..272a9ca7c09 100644 --- a/src/backend/access/gin/gindatapage.c +++ b/src/backend/access/gin/gindatapage.c @@ -49,8 +49,8 @@ typedef struct dlist_head segments; /* a list of leafSegmentInfos */ /* - * The following fields represent how the segments are split across - * pages, if a page split is required. Filled in by leafRepackItems. + * The following fields represent how the segments are split across pages, + * if a page split is required. Filled in by leafRepackItems. */ dlist_node *lastleft; /* last segment on left page */ int lsize; /* total size on left page */ @@ -61,7 +61,7 @@ typedef struct typedef struct { - dlist_node node; /* linked list pointers */ + dlist_node node; /* linked list pointers */ /*------------- * 'action' indicates the status of this in-memory segment, compared to @@ -83,9 +83,9 @@ typedef struct int nmodifieditems; /* - * The following fields represent the items in this segment. If 'items' - * is not NULL, it contains a palloc'd array of the itemsin this segment. - * If 'seg' is not NULL, it contains the items in an already-compressed + * The following fields represent the items in this segment. If 'items' is + * not NULL, it contains a palloc'd array of the itemsin this segment. If + * 'seg' is not NULL, it contains the items in an already-compressed * format. It can point to an on-disk page (!modified), or a palloc'd * segment in memory. If both are set, they must represent the same items. */ @@ -386,7 +386,7 @@ GinDataPageAddPostingItem(Page page, PostingItem *data, OffsetNumber offset) if (offset != maxoff + 1) memmove(ptr + sizeof(PostingItem), ptr, - (maxoff - offset + 1) * sizeof(PostingItem)); + (maxoff - offset + 1) *sizeof(PostingItem)); } memcpy(ptr, data, sizeof(PostingItem)); @@ -436,8 +436,8 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, int maxitems = items->nitem - items->curitem; Page page = BufferGetPage(buf); int i; - ItemPointerData rbound; - ItemPointerData lbound; + ItemPointerData rbound; + ItemPointerData lbound; bool needsplit; bool append; int segsize; @@ -451,7 +451,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, Assert(GinPageIsData(page)); - rbound = *GinDataPageGetRightBound(page); + rbound = *GinDataPageGetRightBound(page); /* * Count how many of the new items belong to this page. @@ -464,8 +464,8 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, { /* * This needs to go to some other location in the tree. (The - * caller should've chosen the insert location so that at least - * the first item goes here.) + * caller should've chosen the insert location so that at + * least the first item goes here.) */ Assert(i > 0); break; @@ -553,7 +553,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, /* Add the new items to the segments */ if (!addItemsToLeaf(leaf, newItems, maxitems)) { - /* all items were duplicates, we have nothing to do */ + /* all items were duplicates, we have nothing to do */ items->curitem += maxitems; MemoryContextSwitchTo(oldCxt); @@ -680,7 +680,7 @@ dataPlaceToPageLeaf(GinBtree btree, Buffer buf, GinBtreeStack *stack, Assert(GinPageRightMost(page) || ginCompareItemPointers(GinDataPageGetRightBound(*newlpage), - GinDataPageGetRightBound(*newrpage)) < 0); + GinDataPageGetRightBound(*newrpage)) < 0); if (append) elog(DEBUG2, "appended %d items to block %u; split %d/%d (%d to go)", @@ -769,16 +769,16 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs) * We don't try to re-encode the segments here, even though some of them * might be really small now that we've removed some items from them. It * seems like a waste of effort, as there isn't really any benefit from - * larger segments per se; larger segments only help to pack more items - * in the same space. We might as well delay doing that until the next + * larger segments per se; larger segments only help to pack more items in + * the same space. We might as well delay doing that until the next * insertion, which will need to re-encode at least part of the page * anyway. * - * Also note if the page was in uncompressed, pre-9.4 format before, it - * is now represented as one huge segment that contains all the items. - * It might make sense to split that, to speed up random access, but we - * don't bother. You'll have to REINDEX anyway if you want the full gain - * of the new tighter index format. + * Also note if the page was in uncompressed, pre-9.4 format before, it is + * now represented as one huge segment that contains all the items. It + * might make sense to split that, to speed up random access, but we don't + * bother. You'll have to REINDEX anyway if you want the full gain of the + * new tighter index format. */ if (removedsomething) { @@ -795,6 +795,7 @@ ginVacuumPostingTreeLeaf(Relation indexrel, Buffer buffer, GinVacuumState *gvs) { leafSegmentInfo *seginfo = dlist_container(leafSegmentInfo, node, iter.cur); + if (seginfo->action != GIN_SEGMENT_UNMODIFIED) modified = true; if (modified && seginfo->action != GIN_SEGMENT_DELETE) @@ -862,10 +863,11 @@ constructLeafRecompressWALData(Buffer buf, disassembledLeaf *leaf) } walbufbegin = palloc( - sizeof(ginxlogRecompressDataLeaf) + - BLCKSZ + /* max size needed to hold the segment data */ - nmodified * 2 + /* (segno + action) per action */ - sizeof(XLogRecData)); + sizeof(ginxlogRecompressDataLeaf) + + BLCKSZ + /* max size needed to hold the segment + * data */ + nmodified * 2 + /* (segno + action) per action */ + sizeof(XLogRecData)); walbufend = walbufbegin; recompress_xlog = (ginxlogRecompressDataLeaf *) walbufend; @@ -965,9 +967,9 @@ dataPlaceToPageLeafRecompress(Buffer buf, disassembledLeaf *leaf) int segsize; /* - * If the page was in pre-9.4 format before, convert the header, and - * force all segments to be copied to the page whether they were modified - * or not. + * If the page was in pre-9.4 format before, convert the header, and force + * all segments to be copied to the page whether they were modified or + * not. */ if (!GinPageIsCompressed(page)) { @@ -1022,6 +1024,7 @@ dataPlaceToPageLeafSplit(Buffer buf, disassembledLeaf *leaf, dlist_node *node; dlist_node *firstright; leafSegmentInfo *seginfo; + /* these must be static so they can be returned to caller */ static ginxlogSplitDataLeaf split_xlog; static XLogRecData rdata[3]; @@ -1121,6 +1124,7 @@ dataPlaceToPageInternal(GinBtree btree, Buffer buf, GinBtreeStack *stack, Page page = BufferGetPage(buf); OffsetNumber off = stack->off; PostingItem *pitem; + /* these must be static so they can be returned to caller */ static XLogRecData rdata; static ginxlogInsertDataInternal data; @@ -1198,7 +1202,7 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf, int nrightitems; Size pageSize = PageGetPageSize(oldpage); ItemPointerData oldbound = *GinDataPageGetRightBound(oldpage); - ItemPointer bound; + ItemPointer bound; Page lpage; Page rpage; OffsetNumber separator; @@ -1216,8 +1220,8 @@ dataSplitPageInternal(GinBtree btree, Buffer origbuf, *prdata = rdata; /* - * First construct a new list of PostingItems, which includes all the - * old items, and the new item. + * First construct a new list of PostingItems, which includes all the old + * items, and the new item. */ memcpy(allitems, GinDataPageGetPostingItem(oldpage, FirstOffsetNumber), (off - 1) * sizeof(PostingItem)); @@ -1402,8 +1406,8 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems) leafSegmentInfo *newseg; /* - * If the page is completely empty, just construct one new segment to - * hold all the new items. + * If the page is completely empty, just construct one new segment to hold + * all the new items. */ if (dlist_is_empty(&leaf->segments)) { @@ -1418,9 +1422,9 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems) dlist_foreach(iter, &leaf->segments) { - leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur); + leafSegmentInfo *cur = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, iter.cur); int nthis; - ItemPointer tmpitems; + ItemPointer tmpitems; int ntmpitems; /* @@ -1434,7 +1438,7 @@ addItemsToLeaf(disassembledLeaf *leaf, ItemPointer newItems, int nNewItems) ItemPointerData next_first; next = (leafSegmentInfo *) dlist_container(leafSegmentInfo, node, - dlist_next_node(&leaf->segments, iter.cur)); + dlist_next_node(&leaf->segments, iter.cur)); if (next->items) next_first = next->items[0]; else @@ -1556,27 +1560,27 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining) if (seginfo->seg == NULL) { if (seginfo->nitems > GinPostingListSegmentMaxSize) - npacked = 0; /* no chance that it would fit. */ + npacked = 0; /* no chance that it would fit. */ else { seginfo->seg = ginCompressPostingList(seginfo->items, seginfo->nitems, - GinPostingListSegmentMaxSize, + GinPostingListSegmentMaxSize, &npacked); } if (npacked != seginfo->nitems) { /* - * Too large. Compress again to the target size, and create - * a new segment to represent the remaining items. The new - * segment is inserted after this one, so it will be - * processed in the next iteration of this loop. + * Too large. Compress again to the target size, and + * create a new segment to represent the remaining items. + * The new segment is inserted after this one, so it will + * be processed in the next iteration of this loop. */ if (seginfo->seg) pfree(seginfo->seg); seginfo->seg = ginCompressPostingList(seginfo->items, seginfo->nitems, - GinPostingListSegmentTargetSize, + GinPostingListSegmentTargetSize, &npacked); if (seginfo->action != GIN_SEGMENT_INSERT) seginfo->action = GIN_SEGMENT_REPLACE; @@ -1596,7 +1600,7 @@ leafRepackItems(disassembledLeaf *leaf, ItemPointer remaining) */ if (SizeOfGinPostingList(seginfo->seg) < GinPostingListSegmentMinSize && next_node) { - int nmerged; + int nmerged; nextseg = dlist_container(leafSegmentInfo, node, next_node); @@ -1741,8 +1745,8 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems, GinPageGetOpaque(tmppage)->rightlink = InvalidBlockNumber; /* - * Write as many of the items to the root page as fit. In segments - * of max GinPostingListSegmentMaxSize bytes each. + * Write as many of the items to the root page as fit. In segments of max + * GinPostingListSegmentMaxSize bytes each. */ nrootitems = 0; rootsize = 0; diff --git a/src/backend/access/gin/ginentrypage.c b/src/backend/access/gin/ginentrypage.c index 4291bab63be..412f90da4db 100644 --- a/src/backend/access/gin/ginentrypage.c +++ b/src/backend/access/gin/ginentrypage.c @@ -135,7 +135,8 @@ GinFormTuple(GinState *ginstate, */ if (data) { - char *ptr = GinGetPosting(itup); + char *ptr = GinGetPosting(itup); + memcpy(ptr, data, dataSize); } @@ -162,7 +163,7 @@ ginReadTuple(GinState *ginstate, OffsetNumber attnum, IndexTuple itup, { Pointer ptr = GinGetPosting(itup); int nipd = GinGetNPosting(itup); - ItemPointer ipd; + ItemPointer ipd; int ndecoded; if (GinItupIsCompressed(itup)) @@ -192,7 +193,7 @@ ginReadTuple(GinState *ginstate, OffsetNumber attnum, IndexTuple itup, * Form a non-leaf entry tuple by copying the key data from the given tuple, * which can be either a leaf or non-leaf entry tuple. * - * Any posting list in the source tuple is not copied. The specified child + * Any posting list in the source tuple is not copied. The specified child * block number is inserted into t_tid. */ static IndexTuple diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c index a16c2140c22..09c3e39bf3b 100644 --- a/src/backend/access/gin/ginfast.c +++ b/src/backend/access/gin/ginfast.c @@ -440,7 +440,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector) * Create temporary index tuples for a single indexable item (one index column * for the heap tuple specified by ht_ctid), and append them to the array * in *collector. They will subsequently be written out using - * ginHeapTupleFastInsert. Note that to guarantee consistent state, all + * ginHeapTupleFastInsert. Note that to guarantee consistent state, all * temp tuples for a given heap tuple must be written in one call to * ginHeapTupleFastInsert. */ @@ -707,7 +707,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka, * * This can be called concurrently by multiple backends, so it must cope. * On first glance it looks completely not concurrent-safe and not crash-safe - * either. The reason it's okay is that multiple insertion of the same entry + * either. The reason it's okay is that multiple insertion of the same entry * is detected and treated as a no-op by gininsert.c. If we crash after * posting entries to the main index and before removing them from the * pending list, it's okay because when we redo the posting later on, nothing @@ -761,7 +761,7 @@ ginInsertCleanup(GinState *ginstate, LockBuffer(metabuffer, GIN_UNLOCK); /* - * Initialize. All temporary space will be in opCtx + * Initialize. All temporary space will be in opCtx */ opCtx = AllocSetContextCreate(CurrentMemoryContext, "GIN insert cleanup temporary context", @@ -855,7 +855,7 @@ ginInsertCleanup(GinState *ginstate, /* * While we left the page unlocked, more stuff might have gotten - * added to it. If so, process those entries immediately. There + * added to it. If so, process those entries immediately. There * shouldn't be very many, so we don't worry about the fact that * we're doing this with exclusive lock. Insertion algorithm * guarantees that inserted row(s) will not continue on next page. diff --git a/src/backend/access/gin/ginget.c b/src/backend/access/gin/ginget.c index fda19cf4e69..271f09901b9 100644 --- a/src/backend/access/gin/ginget.c +++ b/src/backend/access/gin/ginget.c @@ -85,7 +85,8 @@ scanPostingTree(Relation index, GinScanEntry scanEntry, page = BufferGetPage(buffer); if ((GinPageGetOpaque(page)->flags & GIN_DELETED) == 0) { - int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap); + int n = GinDataLeafPageGetItemsToTbm(page, scanEntry->matchBitmap); + scanEntry->predictNumberResult += n; } @@ -100,7 +101,7 @@ scanPostingTree(Relation index, GinScanEntry scanEntry, /* * Collects TIDs into scanEntry->matchBitmap for all heap tuples that - * match the search entry. This supports three different match modes: + * match the search entry. This supports three different match modes: * * 1. Partial-match support: scan from current point until the * comparePartialFn says we're done. @@ -196,7 +197,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack, /* * In ALL mode, we are not interested in null items, so we can * stop if we get to a null-item placeholder (which will be the - * last entry for a given attnum). We do want to include NULL_KEY + * last entry for a given attnum). We do want to include NULL_KEY * and EMPTY_ITEM entries, though. */ if (icategory == GIN_CAT_NULL_ITEM) @@ -407,7 +408,7 @@ restartScanEntry: else if (GinGetNPosting(itup) > 0) { entry->list = ginReadTuple(ginstate, entry->attnum, itup, - &entry->nlist); + &entry->nlist); entry->predictNumberResult = entry->nlist; entry->isFinished = FALSE; @@ -463,11 +464,11 @@ startScanKey(GinState *ginstate, GinScanOpaque so, GinScanKey key) * considerably, if the frequent term can be put in the additional set. * * There can be many legal ways to divide them entries into these two - * sets. A conservative division is to just put everything in the - * required set, but the more you can put in the additional set, the more - * you can skip during the scan. To maximize skipping, we try to put as - * many frequent items as possible into additional, and less frequent - * ones into required. To do that, sort the entries by frequency + * sets. A conservative division is to just put everything in the required + * set, but the more you can put in the additional set, the more you can + * skip during the scan. To maximize skipping, we try to put as many + * frequent items as possible into additional, and less frequent ones into + * required. To do that, sort the entries by frequency * (predictNumberResult), and put entries into the required set in that * order, until the consistent function says that none of the remaining * entries can form a match, without any items from the required set. The @@ -635,8 +636,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan if (stepright) { /* - * We've processed all the entries on this page. If it was the last - * page in the tree, we're done. + * We've processed all the entries on this page. If it was the + * last page in the tree, we're done. */ if (GinPageRightMost(page)) { @@ -647,8 +648,8 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan } /* - * Step to next page, following the right link. then find the first - * ItemPointer greater than advancePast. + * Step to next page, following the right link. then find the + * first ItemPointer greater than advancePast. */ entry->buffer = ginStepRight(entry->buffer, ginstate->index, @@ -658,7 +659,7 @@ entryLoadMoreItems(GinState *ginstate, GinScanEntry entry, ItemPointerData advan stepright = true; if (GinPageGetOpaque(page)->flags & GIN_DELETED) - continue; /* page was deleted by concurrent vacuum */ + continue; /* page was deleted by concurrent vacuum */ /* * The first item > advancePast might not be on this page, but @@ -781,6 +782,7 @@ entryGetItem(GinState *ginstate, GinScanEntry entry, gotitem = true; break; } + /* * Not a lossy page. Skip over any offsets <= advancePast, and * return that. @@ -788,8 +790,9 @@ entryGetItem(GinState *ginstate, GinScanEntry entry, if (entry->matchResult->blockno == advancePastBlk) { /* - * First, do a quick check against the last offset on the page. - * If that's > advancePast, so are all the other offsets. + * First, do a quick check against the last offset on the + * page. If that's > advancePast, so are all the other + * offsets. */ if (entry->matchResult->offsets[entry->matchResult->ntuples - 1] <= advancePastOff) { @@ -890,8 +893,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, /* * We might have already tested this item; if so, no need to repeat work. - * (Note: the ">" case can happen, if advancePast is exact but we previously - * had to set curItem to a lossy-page pointer.) + * (Note: the ">" case can happen, if advancePast is exact but we + * previously had to set curItem to a lossy-page pointer.) */ if (ginCompareItemPointers(&key->curItem, &advancePast) > 0) return; @@ -942,8 +945,8 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, /* * Ok, we now know that there are no matches < minItem. * - * If minItem is lossy, it means that there were no exact items on - * the page among requiredEntries, because lossy pointers sort after exact + * If minItem is lossy, it means that there were no exact items on the + * page among requiredEntries, because lossy pointers sort after exact * items. However, there might be exact items for the same page among * additionalEntries, so we mustn't advance past them. */ @@ -1085,6 +1088,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, if (entry->isFinished) key->entryRes[i] = GIN_FALSE; #if 0 + /* * This case can't currently happen, because we loaded all the entries * for this item earlier. @@ -1119,6 +1123,7 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, break; default: + /* * the 'default' case shouldn't happen, but if the consistent * function returns something bogus, this is the safe result @@ -1129,11 +1134,10 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key, } /* - * We have a tuple, and we know if it matches or not. If it's a - * non-match, we could continue to find the next matching tuple, but - * let's break out and give scanGetItem a chance to advance the other - * keys. They might be able to skip past to a much higher TID, allowing - * us to save work. + * We have a tuple, and we know if it matches or not. If it's a non-match, + * we could continue to find the next matching tuple, but let's break out + * and give scanGetItem a chance to advance the other keys. They might be + * able to skip past to a much higher TID, allowing us to save work. */ /* clean up after consistentFn calls */ @@ -1165,14 +1169,14 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, * matching item. * * This logic works only if a keyGetItem stream can never contain both - * exact and lossy pointers for the same page. Else we could have a + * exact and lossy pointers for the same page. Else we could have a * case like * * stream 1 stream 2 - * ... ... + * ... ... * 42/6 42/7 * 50/1 42/0xffff - * ... ... + * ... ... * * We would conclude that 42/6 is not a match and advance stream 1, * thus never detecting the match to the lossy pointer in stream 2. @@ -1205,12 +1209,11 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, } /* - * It's a match. We can conclude that nothing < matches, so - * the other key streams can skip to this item. + * It's a match. We can conclude that nothing < matches, so the + * other key streams can skip to this item. * - * Beware of lossy pointers, though; from a lossy pointer, we - * can only conclude that nothing smaller than this *block* - * matches. + * Beware of lossy pointers, though; from a lossy pointer, we can + * only conclude that nothing smaller than this *block* matches. */ if (ItemPointerIsLossyPage(&key->curItem)) { @@ -1229,8 +1232,8 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, } /* - * If this is the first key, remember this location as a - * potential match, and proceed to check the rest of the keys. + * If this is the first key, remember this location as a potential + * match, and proceed to check the rest of the keys. * * Otherwise, check if this is the same item that we checked the * previous keys for (or a lossy pointer for the same page). If @@ -1247,7 +1250,7 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, if (ItemPointerIsLossyPage(&key->curItem) || ItemPointerIsLossyPage(item)) { - Assert (GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item)); + Assert(GinItemPointerGetBlockNumber(&key->curItem) >= GinItemPointerGetBlockNumber(item)); match = (GinItemPointerGetBlockNumber(&key->curItem) == GinItemPointerGetBlockNumber(item)); } @@ -1264,8 +1267,8 @@ scanGetItem(IndexScanDesc scan, ItemPointerData advancePast, /* * Now *item contains the first ItemPointer after previous result that - * satisfied all the keys for that exact TID, or a lossy reference - * to the same page. + * satisfied all the keys for that exact TID, or a lossy reference to the + * same page. * * We must return recheck = true if any of the keys are marked recheck. */ @@ -1776,10 +1779,10 @@ gingetbitmap(PG_FUNCTION_ARGS) /* * First, scan the pending list and collect any matching entries into the - * bitmap. After we scan a pending item, some other backend could post it + * bitmap. After we scan a pending item, some other backend could post it * into the main index, and so we might visit it a second time during the * main scan. This is okay because we'll just re-set the same bit in the - * bitmap. (The possibility of duplicate visits is a major reason why GIN + * bitmap. (The possibility of duplicate visits is a major reason why GIN * can't support the amgettuple API, however.) Note that it would not do * to scan the main index before the pending list, since concurrent * cleanup could then make us miss entries entirely. diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c index 3bafb6471b3..b27cae3aab2 100644 --- a/src/backend/access/gin/gininsert.c +++ b/src/backend/access/gin/gininsert.c @@ -40,7 +40,7 @@ typedef struct * Adds array of item pointers to tuple's posting list, or * creates posting tree and tuple pointing to tree in case * of not enough space. Max size of tuple is defined in - * GinFormTuple(). Returns a new, modified index tuple. + * GinFormTuple(). Returns a new, modified index tuple. * items[] must be in sorted order with no duplicates. */ static IndexTuple diff --git a/src/backend/access/gin/ginlogic.c b/src/backend/access/gin/ginlogic.c index 167d25ea5c7..052abd2bd8e 100644 --- a/src/backend/access/gin/ginlogic.c +++ b/src/backend/access/gin/ginlogic.c @@ -47,7 +47,7 @@ * Maximum number of MAYBE inputs that shimTriConsistentFn will try to * resolve by calling all combinations. */ -#define MAX_MAYBE_ENTRIES 4 +#define MAX_MAYBE_ENTRIES 4 /* * Dummy consistent functions for an EVERYTHING key. Just claim it matches. @@ -95,14 +95,14 @@ static GinTernaryValue directTriConsistentFn(GinScanKey key) { return DatumGetGinTernaryValue(FunctionCall7Coll( - key->triConsistentFmgrInfo, - key->collation, - PointerGetDatum(key->entryRes), - UInt16GetDatum(key->strategy), - key->query, - UInt32GetDatum(key->nuserentries), - PointerGetDatum(key->extra_data), - PointerGetDatum(key->queryValues), + key->triConsistentFmgrInfo, + key->collation, + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), + PointerGetDatum(key->queryValues), PointerGetDatum(key->queryCategories))); } @@ -115,15 +115,16 @@ static bool shimBoolConsistentFn(GinScanKey key) { GinTernaryValue result; + result = DatumGetGinTernaryValue(FunctionCall7Coll( - key->triConsistentFmgrInfo, - key->collation, - PointerGetDatum(key->entryRes), - UInt16GetDatum(key->strategy), - key->query, - UInt32GetDatum(key->nuserentries), - PointerGetDatum(key->extra_data), - PointerGetDatum(key->queryValues), + key->triConsistentFmgrInfo, + key->collation, + PointerGetDatum(key->entryRes), + UInt16GetDatum(key->strategy), + key->query, + UInt32GetDatum(key->nuserentries), + PointerGetDatum(key->extra_data), + PointerGetDatum(key->queryValues), PointerGetDatum(key->queryCategories))); if (result == GIN_MAYBE) { @@ -240,8 +241,8 @@ ginInitConsistentFunction(GinState *ginstate, GinScanKey key) key->boolConsistentFn = shimBoolConsistentFn; if (OidIsValid(ginstate->triConsistentFn[key->attnum - 1].fn_oid)) - key->triConsistentFn = directTriConsistentFn; + key->triConsistentFn = directTriConsistentFn; else - key->triConsistentFn = shimTriConsistentFn; + key->triConsistentFn = shimTriConsistentFn; } } diff --git a/src/backend/access/gin/ginpostinglist.c b/src/backend/access/gin/ginpostinglist.c index 81bbb09c244..606a824f125 100644 --- a/src/backend/access/gin/ginpostinglist.c +++ b/src/backend/access/gin/ginpostinglist.c @@ -126,9 +126,9 @@ encode_varbyte(uint64 val, unsigned char **ptr) static uint64 decode_varbyte(unsigned char **ptr) { - uint64 val; + uint64 val; unsigned char *p = *ptr; - uint64 c; + uint64 c; c = *(p++); val = c & 0x7F; @@ -210,7 +210,7 @@ ginCompressPostingList(const ItemPointer ipd, int nipd, int maxsize, uint64 val = itemptr_to_uint64(&ipd[totalpacked]); uint64 delta = val - prev; - Assert (val > prev); + Assert(val > prev); if (endptr - ptr >= 6) encode_varbyte(delta, &ptr); @@ -225,7 +225,7 @@ ginCompressPostingList(const ItemPointer ipd, int nipd, int maxsize, encode_varbyte(delta, &p); if (p - buf > (endptr - ptr)) - break; /* output is full */ + break; /* output is full */ memcpy(ptr, buf, p - buf); ptr += (p - buf); @@ -286,7 +286,7 @@ ginPostingListDecode(GinPostingList *plist, int *ndecoded) ItemPointer ginPostingListDecodeAllSegments(GinPostingList *segment, int len, int *ndecoded_out) { - ItemPointer result; + ItemPointer result; int nallocated; uint64 val; char *endseg = ((char *) segment) + len; @@ -349,7 +349,7 @@ ginPostingListDecodeAllSegmentsToTbm(GinPostingList *ptr, int len, TIDBitmap *tbm) { int ndecoded; - ItemPointer items; + ItemPointer items; items = ginPostingListDecodeAllSegments(ptr, len, &ndecoded); tbm_add_tuples(tbm, items, ndecoded, false); @@ -374,8 +374,8 @@ ginMergeItemPointers(ItemPointerData *a, uint32 na, dst = (ItemPointer) palloc((na + nb) * sizeof(ItemPointerData)); /* - * If the argument arrays don't overlap, we can just append them to - * each other. + * If the argument arrays don't overlap, we can just append them to each + * other. */ if (na == 0 || nb == 0 || ginCompareItemPointers(&a[na - 1], &b[0]) < 0) { diff --git a/src/backend/access/gin/ginscan.c b/src/backend/access/gin/ginscan.c index b19386e19ad..66c62b2e32a 100644 --- a/src/backend/access/gin/ginscan.c +++ b/src/backend/access/gin/ginscan.c @@ -389,7 +389,7 @@ ginNewScanKey(IndexScanDesc scan) /* * If the index is version 0, it may be missing null and placeholder * entries, which would render searches for nulls and full-index scans - * unreliable. Throw an error if so. + * unreliable. Throw an error if so. */ if (hasNullQuery && !so->isVoidRes) { diff --git a/src/backend/access/gin/ginutil.c b/src/backend/access/gin/ginutil.c index 4dadb50dcaa..3ca0b68434b 100644 --- a/src/backend/access/gin/ginutil.c +++ b/src/backend/access/gin/ginutil.c @@ -67,6 +67,7 @@ initGinState(GinState *state, Relation index) fmgr_info_copy(&(state->extractQueryFn[i]), index_getprocinfo(index, i + 1, GIN_EXTRACTQUERY_PROC), CurrentMemoryContext); + /* * Check opclass capability to do tri-state or binary logic consistent * check. @@ -74,14 +75,14 @@ initGinState(GinState *state, Relation index) if (index_getprocid(index, i + 1, GIN_TRICONSISTENT_PROC) != InvalidOid) { fmgr_info_copy(&(state->triConsistentFn[i]), - index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC), + index_getprocinfo(index, i + 1, GIN_TRICONSISTENT_PROC), CurrentMemoryContext); } if (index_getprocid(index, i + 1, GIN_CONSISTENT_PROC) != InvalidOid) { fmgr_info_copy(&(state->consistentFn[i]), - index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC), + index_getprocinfo(index, i + 1, GIN_CONSISTENT_PROC), CurrentMemoryContext); } @@ -458,7 +459,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum, * If there's more than one key, sort and unique-ify. * * XXX Using qsort here is notationally painful, and the overhead is - * pretty bad too. For small numbers of keys it'd likely be better to use + * pretty bad too. For small numbers of keys it'd likely be better to use * a simple insertion sort. */ if (*nentries > 1) diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index 72f734caf8d..af4d2714b5f 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -47,7 +47,7 @@ ginVacuumItemPointers(GinVacuumState *gvs, ItemPointerData *items, { int i, remaining = 0; - ItemPointer tmpitems = NULL; + ItemPointer tmpitems = NULL; /* * Iterate over TIDs array @@ -208,8 +208,8 @@ ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, } /* - * if we have root and there are empty pages in tree, then we don't release - * lock to go further processing and guarantee that tree is unused + * if we have root and there are empty pages in tree, then we don't + * release lock to go further processing and guarantee that tree is unused */ if (!(isRoot && hasVoidPage)) { @@ -236,7 +236,7 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn Buffer pBuffer; Page page, parentPage; - BlockNumber rightlink; + BlockNumber rightlink; /* * Lock the pages in the same order as an insertion would, to avoid @@ -302,11 +302,11 @@ ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkn data.rightLink = GinPageGetOpaque(page)->rightlink; /* - * We can't pass buffer_std = TRUE, because we didn't set pd_lower - * on pre-9.4 versions. The page might've been binary-upgraded from - * an older version, and hence not have pd_lower set correctly. - * Ditto for the left page, but removing the item from the parent - * updated its pd_lower, so we know that's OK at this point. + * We can't pass buffer_std = TRUE, because we didn't set pd_lower on + * pre-9.4 versions. The page might've been binary-upgraded from an + * older version, and hence not have pd_lower set correctly. Ditto for + * the left page, but removing the item from the parent updated its + * pd_lower, so we know that's OK at this point. */ rdata[0].buffer = dBuffer; rdata[0].buffer_std = FALSE; @@ -538,7 +538,8 @@ ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint3 } /* - * if we already created a temporary page, make changes in place + * if we already created a temporary page, make changes in + * place */ if (tmppage == origpage) { diff --git a/src/backend/access/gin/ginxlog.c b/src/backend/access/gin/ginxlog.c index d19389330c5..a8a917a9d0e 100644 --- a/src/backend/access/gin/ginxlog.c +++ b/src/backend/access/gin/ginxlog.c @@ -133,7 +133,7 @@ ginRedoInsertEntry(Buffer buffer, bool isLeaf, BlockNumber rightblkno, void *rda if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), offset, false, false) == InvalidOffsetNumber) { RelFileNode node; - ForkNumber forknum; + ForkNumber forknum; BlockNumber blknum; BufferGetTag(buffer, &node, &forknum, &blknum); @@ -341,8 +341,8 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record) payload = XLogRecGetData(record) + sizeof(ginxlogInsert); /* - * First clear incomplete-split flag on child page if this finishes - * a split. + * First clear incomplete-split flag on child page if this finishes a + * split. */ if (!isLeaf) { @@ -472,8 +472,8 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) payload = XLogRecGetData(record) + sizeof(ginxlogSplit); /* - * First clear incomplete-split flag on child page if this finishes - * a split + * First clear incomplete-split flag on child page if this finishes a + * split */ if (!isLeaf) { @@ -522,7 +522,7 @@ ginRedoSplit(XLogRecPtr lsn, XLogRecord *record) if (isRoot) { - BlockNumber rootBlkno = data->rrlink; + BlockNumber rootBlkno = data->rrlink; Buffer rootBuf = XLogReadBuffer(data->node, rootBlkno, true); Page rootPage = BufferGetPage(rootBuf); @@ -711,9 +711,9 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record) Buffer buffer; /* - * Restore the metapage. This is essentially the same as a full-page image, - * so restore the metapage unconditionally without looking at the LSN, to - * avoid torn page hazards. + * Restore the metapage. This is essentially the same as a full-page + * image, so restore the metapage unconditionally without looking at the + * LSN, to avoid torn page hazards. */ metabuffer = XLogReadBuffer(data->node, GIN_METAPAGE_BLKNO, false); if (!BufferIsValid(metabuffer)) @@ -877,7 +877,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record) /* * In normal operation, shiftList() takes exclusive lock on all the - * pages-to-be-deleted simultaneously. During replay, however, it should + * pages-to-be-deleted simultaneously. During replay, however, it should * be all right to lock them one at a time. This is dependent on the fact * that we are deleting pages from the head of the list, and that readers * share-lock the next page before releasing the one they are on. So we diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index 64125d51953..e6f06c29e51 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -1382,7 +1382,7 @@ initGISTstate(Relation index) /* * If the index column has a specified collation, we should honor that * while doing comparisons. However, we may have a collatable storage - * type for a noncollatable indexed data type. If there's no index + * type for a noncollatable indexed data type. If there's no index * collation then specify default collation in case the support * functions need collation. This is harmless if the support * functions don't care about collation, so we just do it diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index 92a9dce8e61..7a8692b5087 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -31,7 +31,7 @@ * * On success return for a heap tuple, *recheck_p is set to indicate * whether recheck is needed. We recheck if any of the consistent() functions - * request it. recheck is not interesting when examining a non-leaf entry, + * request it. recheck is not interesting when examining a non-leaf entry, * since we must visit the lower index page if there's any doubt. * * If we are doing an ordered scan, so->distances[] is filled with distance @@ -62,7 +62,7 @@ gistindex_keytest(IndexScanDesc scan, /* * If it's a leftover invalid tuple from pre-9.1, treat it as a match with - * minimum possible distances. This means we'll always follow it to the + * minimum possible distances. This means we'll always follow it to the * referenced page. */ if (GistTupleIsInvalid(tuple)) @@ -224,7 +224,7 @@ gistindex_keytest(IndexScanDesc scan, * ntids: if not NULL, gistgetbitmap's output tuple counter * * If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap - * tuples should be reported directly into the bitmap. If they are NULL, + * tuples should be reported directly into the bitmap. If they are NULL, * we're doing a plain or ordered indexscan. For a plain indexscan, heap * tuple TIDs are returned into so->pageData[]. For an ordered indexscan, * heap tuple TIDs are pushed into individual search queue items. diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 5194fe08ab7..8360b16ae50 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -56,7 +56,7 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg) /* * If new item is heap tuple, it goes to front of chain; otherwise insert * it before the first index-page item, so that index pages are visited in - * LIFO order, ensuring depth-first search of index pages. See comments + * LIFO order, ensuring depth-first search of index pages. See comments * in gist_private.h. */ if (GISTSearchItemIsHeap(*newitem)) diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c index 2dd26de0982..e1994bf04b5 100644 --- a/src/backend/access/gist/gistsplit.c +++ b/src/backend/access/gist/gistsplit.c @@ -71,7 +71,7 @@ gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec, * Recompute unions of left- and right-side subkeys after a page split, * ignoring any tuples that are marked in spl->spl_dontcare[]. * - * Note: we always recompute union keys for all index columns. In some cases + * Note: we always recompute union keys for all index columns. In some cases * this might represent duplicate work for the leftmost column(s), but it's * not safe to assume that "zero penalty to move a tuple" means "the union * key doesn't change at all". Penalty functions aren't 100% accurate. @@ -160,7 +160,7 @@ findDontCares(Relation r, GISTSTATE *giststate, GISTENTRY *valvec, /* * Remove tuples that are marked don't-cares from the tuple index array a[] - * of length *len. This is applied separately to the spl_left and spl_right + * of length *len. This is applied separately to the spl_left and spl_right * arrays. */ static void @@ -193,7 +193,7 @@ removeDontCares(OffsetNumber *a, int *len, const bool *dontcare) /* * Place a single don't-care tuple into either the left or right side of the * split, according to which has least penalty for merging the tuple into - * the previously-computed union keys. We need consider only columns starting + * the previously-computed union keys. We need consider only columns starting * at attno. */ static void @@ -291,7 +291,7 @@ supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno, /* * There is only one previously defined union, so we just choose swap - * or not by lowest penalty for that side. We can only get here if a + * or not by lowest penalty for that side. We can only get here if a * secondary split happened to have all NULLs in its column in the * tuples that the outer recursion level had assigned to one side. * (Note that the null checks in gistSplitByKey don't prevent the @@ -427,7 +427,7 @@ gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVec sv->spl_rdatum = v->spl_rattr[attno]; /* - * Let the opclass-specific PickSplit method do its thing. Note that at + * Let the opclass-specific PickSplit method do its thing. Note that at * this point we know there are no null keys in the entryvec. */ FunctionCall2Coll(&giststate->picksplitFn[attno], diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c index fbccdb800bc..f32e35ad159 100644 --- a/src/backend/access/gist/gistutil.c +++ b/src/backend/access/gist/gistutil.c @@ -414,7 +414,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */ * some inserts to go to other equally-good subtrees. * * keep_current_best is -1 if we haven't yet had to make a random choice - * whether to keep the current best tuple. If we have done so, and + * whether to keep the current best tuple. If we have done so, and * decided to keep it, keep_current_best is 1; if we've decided to * replace, keep_current_best is 0. (This state will be reset to -1 as * soon as we've made the replacement, but sometimes we make the choice in @@ -456,7 +456,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */ { /* * New best penalty for column. Tentatively select this tuple - * as the target, and record the best penalty. Then reset the + * as the target, and record the best penalty. Then reset the * next column's penalty to "unknown" (and indirectly, the * same for all the ones to its right). This will force us to * adopt this tuple's penalty values as the best for all the @@ -475,7 +475,7 @@ gistchoose(Relation r, Page p, IndexTuple it, /* it has compressed entry */ { /* * The current tuple is exactly as good for this column as the - * best tuple seen so far. The next iteration of this loop + * best tuple seen so far. The next iteration of this loop * will compare the next column. */ } @@ -681,7 +681,7 @@ gistcheckpage(Relation rel, Buffer buf) /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane - * page header or is all-zero. We have to defend against the all-zero + * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) diff --git a/src/backend/access/gist/gistvacuum.c b/src/backend/access/gist/gistvacuum.c index 215806be12f..278d386a7cd 100644 --- a/src/backend/access/gist/gistvacuum.c +++ b/src/backend/access/gist/gistvacuum.c @@ -49,7 +49,7 @@ gistvacuumcleanup(PG_FUNCTION_ARGS) stats->estimated_count = info->estimated_count; /* - * XXX the above is wrong if index is partial. Would it be OK to just + * XXX the above is wrong if index is partial. Would it be OK to just * return NULL, or is there work we must do below? */ } diff --git a/src/backend/access/gist/gistxlog.c b/src/backend/access/gist/gistxlog.c index e12b9c66dc1..7d36b2ab6a3 100644 --- a/src/backend/access/gist/gistxlog.c +++ b/src/backend/access/gist/gistxlog.c @@ -38,7 +38,7 @@ static MemoryContext opCtx; /* working memory for operations */ * follow-right flag, because that change is not included in the full-page * image. To be sure that the intermediate state with the wrong flag value is * not visible to concurrent Hot Standby queries, this function handles - * restoring the full-page image as well as updating the flag. (Note that + * restoring the full-page image as well as updating the flag. (Note that * we never need to do anything else to the child page in the current WAL * action.) */ @@ -89,7 +89,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record) /* * We need to acquire and hold lock on target page while updating the left - * child page. If we have a full-page image of target page, getting the + * child page. If we have a full-page image of target page, getting the * lock is a side-effect of restoring that image. Note that even if the * target page no longer exists, we'll still attempt to replay the change * on the child page. @@ -387,6 +387,7 @@ gistXLogSplit(RelFileNode node, BlockNumber blkno, bool page_is_leaf, for (ptr = dist; ptr; ptr = ptr->next) npage++; + /* * the caller should've checked this already, but doesn't hurt to check * again. diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index 502fc31dd19..7abb7a47fc2 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -78,7 +78,7 @@ hashbuild(PG_FUNCTION_ARGS) * (assuming their hash codes are pretty random) there will be no locality * of access to the index, and if the index is bigger than available RAM * then we'll thrash horribly. To prevent that scenario, we can sort the - * tuples by (expected) bucket number. However, such a sort is useless + * tuples by (expected) bucket number. However, such a sort is useless * overhead when the index does fit in RAM. We choose to sort if the * initial index size exceeds NBuffers. * @@ -248,7 +248,7 @@ hashgettuple(PG_FUNCTION_ARGS) /* * An insertion into the current index page could have happened while * we didn't have read lock on it. Re-find our position by looking - * for the TID we previously returned. (Because we hold share lock on + * for the TID we previously returned. (Because we hold share lock on * the bucket, no deletions or splits could have occurred; therefore * we can expect that the TID still exists in the current index page, * at an offset >= where we were.) @@ -524,7 +524,7 @@ hashbulkdelete(PG_FUNCTION_ARGS) /* * Read the metapage to fetch original bucket and tuple counts. Also, we * keep a copy of the last-seen metapage so that we can use its - * hashm_spares[] values to compute bucket page addresses. This is a bit + * hashm_spares[] values to compute bucket page addresses. This is a bit * hokey but perfectly safe, since the interesting entries in the spares * array cannot change under us; and it beats rereading the metapage for * each bucket. @@ -655,7 +655,7 @@ loop_top: { /* * Otherwise, our count is untrustworthy since we may have - * double-scanned tuples in split buckets. Proceed by dead-reckoning. + * double-scanned tuples in split buckets. Proceed by dead-reckoning. * (Note: we still return estimated_count = false, because using this * count is better than not updating reltuples at all.) */ diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c index 6d351da5b0a..c61fec6b84f 100644 --- a/src/backend/access/hash/hashfunc.c +++ b/src/backend/access/hash/hashfunc.c @@ -11,7 +11,7 @@ * src/backend/access/hash/hashfunc.c * * NOTES - * These functions are stored in pg_amproc. For each operator class + * These functions are stored in pg_amproc. For each operator class * defined for hash indexes, they compute the hash value of the argument. * * Additional hash functions appear in /utils/adt/ files for various @@ -158,7 +158,7 @@ hashtext(PG_FUNCTION_ARGS) /* * Note: this is currently identical in behavior to hashvarlena, but keep * it as a separate function in case we someday want to do something - * different in non-C locales. (See also hashbpchar, if so.) + * different in non-C locales. (See also hashbpchar, if so.) */ result = hash_any((unsigned char *) VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key)); @@ -236,7 +236,7 @@ hashvarlena(PG_FUNCTION_ARGS) * * This allows some parallelism. Read-after-writes are good at doubling * the number of bits affected, so the goal of mixing pulls in the opposite - * direction from the goal of parallelism. I did what I could. Rotates + * direction from the goal of parallelism. I did what I could. Rotates * seem to cost as much as shifts on every machine I could lay my hands on, * and rotates are much kinder to the top and bottom bits, so I used rotates. *---------- @@ -270,7 +270,7 @@ hashvarlena(PG_FUNCTION_ARGS) * substantial performance increase since final() does not need to * do well in reverse, but is does need to affect all output bits. * mix(), on the other hand, does not need to affect all output - * bits (affecting 32 bits is enough). The original hash function had + * bits (affecting 32 bits is enough). The original hash function had * a single mixing operation that had to satisfy both sets of requirements * and was slower as a result. *---------- @@ -291,7 +291,7 @@ hashvarlena(PG_FUNCTION_ARGS) * k : the key (the unaligned variable-length array of bytes) * len : the length of the key, counting by bytes * - * Returns a uint32 value. Every bit of the key affects every bit of + * Returns a uint32 value. Every bit of the key affects every bit of * the return value. Every 1-bit and 2-bit delta achieves avalanche. * About 6*len+35 instructions. The best hash table sizes are powers * of 2. There is no need to do mod a prime (mod is sooo slow!). diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index 49211eef9a3..05e9808b8ad 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -89,7 +89,7 @@ _hash_doinsert(Relation rel, IndexTuple itup) /* * If the previous iteration of this loop locked what is still the - * correct target bucket, we are done. Otherwise, drop any old lock + * correct target bucket, we are done. Otherwise, drop any old lock * and lock what now appears to be the correct bucket. */ if (retry) diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 2389c3843f7..628c05698b9 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -80,7 +80,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * * Add an overflow page to the bucket whose last page is pointed to by 'buf'. * - * On entry, the caller must hold a pin but no lock on 'buf'. The pin is + * On entry, the caller must hold a pin but no lock on 'buf'. The pin is * dropped before exiting (we assume the caller is not interested in 'buf' * anymore). The returned overflow page will be pinned and write-locked; * it is guaranteed to be empty. @@ -89,12 +89,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * That buffer is returned in the same state. * * The caller must hold at least share lock on the bucket, to ensure that - * no one else tries to compact the bucket meanwhile. This guarantees that + * no one else tries to compact the bucket meanwhile. This guarantees that * 'buf' won't stop being part of the bucket while it's unlocked. * * NB: since this could be executed concurrently by multiple processes, * one should not assume that the returned overflow page will be the - * immediate successor of the originally passed 'buf'. Additional overflow + * immediate successor of the originally passed 'buf'. Additional overflow * pages might have been added to the bucket chain in between. */ Buffer @@ -157,7 +157,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf) /* * _hash_getovflpage() * - * Find an available overflow page and return it. The returned buffer + * Find an available overflow page and return it. The returned buffer * is pinned and write-locked, and has had _hash_pageinit() applied, * but it is caller's responsibility to fill the special space. * @@ -253,7 +253,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf) * We create the new bitmap page with all pages marked "in use". * Actually two pages in the new bitmap's range will exist * immediately: the bitmap page itself, and the following page which - * is the one we return to the caller. Both of these are correctly + * is the one we return to the caller. Both of these are correctly * marked "in use". Subsequent pages do not exist yet, but it is * convenient to pre-mark them as "in use" too. */ @@ -284,7 +284,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf) metap->hashm_spares[splitnum]++; /* - * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * Adjust hashm_firstfree to avoid redundant searches. But don't risk * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) @@ -313,7 +313,7 @@ found: blkno = bitno_to_blkno(metap, bit); /* - * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * Adjust hashm_firstfree to avoid redundant searches. But don't risk * changing it if someone moved it while we were searching bitmap pages. */ if (metap->hashm_firstfree == orig_firstfree) @@ -494,7 +494,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, /* * _hash_initbitmap() * - * Initialize a new bitmap page. The metapage has a write-lock upon + * Initialize a new bitmap page. The metapage has a write-lock upon * entering the function, and must be written by caller after return. * * 'blkno' is the block number of the new bitmap page. diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 1552b73f28b..9e4a2e04340 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -49,7 +49,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, * of the locking rules). However, we can skip taking lmgr locks when the * index is local to the current backend (ie, either temp or new in the * current transaction). No one else can see it, so there's no reason to - * take locks. We still take buffer-level locks, but not lmgr locks. + * take locks. We still take buffer-level locks, but not lmgr locks. */ #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel)) @@ -136,7 +136,7 @@ _hash_getbuf(Relation rel, BlockNumber blkno, int access, int flags) * * This must be used only to fetch pages that are known to be before * the index's filesystem EOF, but are to be filled from scratch. - * _hash_pageinit() is applied automatically. Otherwise it has + * _hash_pageinit() is applied automatically. Otherwise it has * effects similar to _hash_getbuf() with access = HASH_WRITE. * * When this routine returns, a write lock is set on the @@ -344,7 +344,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum) /* * Determine the target fill factor (in tuples per bucket) for this index. * The idea is to make the fill factor correspond to pages about as full - * as the user-settable fillfactor parameter says. We can compute it + * as the user-settable fillfactor parameter says. We can compute it * exactly since the index datatype (i.e. uint32 hash key) is fixed-width. */ data_width = sizeof(uint32); @@ -377,7 +377,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum) /* * We initialize the metapage, the first N bucket pages, and the first * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend() - * calls to occur. This ensures that the smgr level has the right idea of + * calls to occur. This ensures that the smgr level has the right idea of * the physical index length. */ metabuf = _hash_getnewbuf(rel, HASH_METAPAGE, forkNum); @@ -545,7 +545,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * Determine which bucket is to be split, and attempt to lock the old - * bucket. If we can't get the lock, give up. + * bucket. If we can't get the lock, give up. * * The lock protects us against other backends, but not against our own * backend. Must check for active scans separately. @@ -603,7 +603,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) } /* - * Okay to proceed with split. Update the metapage bucket mapping info. + * Okay to proceed with split. Update the metapage bucket mapping info. * * Since we are scribbling on the metapage data right in the shared * buffer, any failure in this next little bit leaves us with a big @@ -641,7 +641,7 @@ _hash_expandtable(Relation rel, Buffer metabuf) * Copy bucket mapping info now; this saves re-accessing the meta page * inside _hash_splitbucket's inner loop. Note that once we drop the * split lock, other splits could begin, so these values might be out of - * date before _hash_splitbucket finishes. That's okay, since all it + * date before _hash_splitbucket finishes. That's okay, since all it * needs is to tell which of these two buckets to map hashkeys into. */ maxbucket = metap->hashm_maxbucket; @@ -876,7 +876,7 @@ _hash_splitbucket(Relation rel, /* * We're at the end of the old bucket chain, so we're done partitioning - * the tuples. Before quitting, call _hash_squeezebucket to ensure the + * the tuples. Before quitting, call _hash_squeezebucket to ensure the * tuples remaining in the old bucket (including the overflow pages) are * packed as tightly as possible. The new bucket is already tight. */ diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c index ad405646c53..5aabe066064 100644 --- a/src/backend/access/hash/hashsearch.c +++ b/src/backend/access/hash/hashsearch.c @@ -210,7 +210,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) /* * If the previous iteration of this loop locked what is still the - * correct target bucket, we are done. Otherwise, drop any old lock + * correct target bucket, we are done. Otherwise, drop any old lock * and lock what now appears to be the correct bucket. */ if (retry) @@ -269,7 +269,7 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) * _hash_step() -- step to the next valid item in a scan in the bucket. * * If no valid record exists in the requested direction, return - * false. Else, return true and set the hashso_curpos for the + * false. Else, return true and set the hashso_curpos for the * scan to the right thing. * * 'bufP' points to the current buffer, which is pinned and read-locked. diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c index e13670c4f40..c0d6fec2567 100644 --- a/src/backend/access/hash/hashsort.c +++ b/src/backend/access/hash/hashsort.c @@ -8,7 +8,7 @@ * thrashing. We use tuplesort.c to sort the given index tuples into order. * * Note: if the number of rows in the table has been underestimated, - * bucket splits may occur during the index build. In that case we'd + * bucket splits may occur during the index build. In that case we'd * be inserting into two or more buckets for each possible masked-off * hash code value. That's no big problem though, since we'll still have * plenty of locality of access. @@ -52,7 +52,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets) hspool->index = index; /* - * Determine the bitmask for hash code values. Since there are currently + * Determine the bitmask for hash code values. Since there are currently * num_buckets buckets in the index, the appropriate mask can be computed * as follows. * diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 20bd2792585..43652921ac1 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -160,7 +160,7 @@ _hash_checkpage(Relation rel, Buffer buf, int flags) /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane - * page header or is all-zero. We have to defend against the all-zero + * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) @@ -280,7 +280,7 @@ _hash_form_tuple(Relation index, Datum *values, bool *isnull) * * Returns the offset of the first index entry having hashkey >= hash_value, * or the page's max offset plus one if hash_value is greater than all - * existing hash keys in the page. This is the appropriate place to start + * existing hash keys in the page. This is the appropriate place to start * a search, or to insert a new item. */ OffsetNumber diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 336fbb06dac..405117a5261 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -88,11 +88,11 @@ static XLogRecPtr log_heap_update(Relation reln, Buffer oldbuf, HeapTuple newtup, HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared); static void HeapSatisfiesHOTandKeyUpdate(Relation relation, - Bitmapset *hot_attrs, - Bitmapset *key_attrs, Bitmapset *id_attrs, - bool *satisfies_hot, bool *satisfies_key, - bool *satisfies_id, - HeapTuple oldtup, HeapTuple newtup); + Bitmapset *hot_attrs, + Bitmapset *key_attrs, Bitmapset *id_attrs, + bool *satisfies_hot, bool *satisfies_key, + bool *satisfies_id, + HeapTuple oldtup, HeapTuple newtup); static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, @@ -113,7 +113,7 @@ static bool ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status XLTW_Oper oper, int *remaining); static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup); static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, - bool *copy); + bool *copy); /* @@ -213,7 +213,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) * while the scan is in progress will be invisible to my snapshot anyway. * (That is not true when using a non-MVCC snapshot. However, we couldn't * guarantee to return tuples added after scan start anyway, since they - * might go into pages we already scanned. To guarantee consistent + * might go into pages we already scanned. To guarantee consistent * results for a non-MVCC snapshot, the caller must hold some higher-level * lock that ensures the interesting tuple(s) won't change.) */ @@ -221,7 +221,7 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) /* * If the table is large relative to NBuffers, use a bulk-read access - * strategy and enable synchronized scanning (see syncscan.c). Although + * strategy and enable synchronized scanning (see syncscan.c). Although * the thresholds for these features could be different, we make them the * same so that there are only two behaviors to tune rather than four. * (However, some callers need to be able to disable one or both of these @@ -325,7 +325,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) } /* - * Be sure to check for interrupts at least once per page. Checks at + * Be sure to check for interrupts at least once per page. Checks at * higher code levels won't be able to stop a seqscan that encounters many * pages' worth of consecutive dead tuples. */ @@ -349,7 +349,7 @@ heapgetpage(HeapScanDesc scan, BlockNumber page) /* * We must hold share lock on the buffer content while examining tuple - * visibility. Afterwards, however, the tuples we have found to be + * visibility. Afterwards, however, the tuples we have found to be * visible are guaranteed good as long as we hold the buffer pin. */ LockBuffer(buffer, BUFFER_LOCK_SHARE); @@ -1126,7 +1126,7 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode) * * Same as relation_openrv, but with an additional missing_ok argument * allowing a NULL return rather than an error if the relation is not - * found. (Note that some other causes, such as permissions problems, + * found. (Note that some other causes, such as permissions problems, * will still result in an ereport.) * ---------------- */ @@ -1740,7 +1740,7 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, /* * When first_call is true (and thus, skip is initially false) we'll - * return the first tuple we find. But on later passes, heapTuple + * return the first tuple we find. But on later passes, heapTuple * will initially be pointing to the tuple we returned last time. * Returning it again would be incorrect (and would loop forever), so * we skip it and return the next match we find. @@ -1834,7 +1834,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot, * possibly uncommitted version. * * *tid is both an input and an output parameter: it is updated to - * show the latest version of the row. Note that it will not be changed + * show the latest version of the row. Note that it will not be changed * if no version of the row passes the snapshot test. */ void @@ -1955,7 +1955,7 @@ heap_get_latest_tid(Relation relation, * * This is called after we have waited for the XMAX transaction to terminate. * If the transaction aborted, we guarantee the XMAX_INVALID hint bit will - * be set on exit. If the transaction committed, we set the XMAX_COMMITTED + * be set on exit. If the transaction committed, we set the XMAX_COMMITTED * hint bit if possible --- but beware that that may not yet be possible, * if the transaction committed asynchronously. * @@ -2042,7 +2042,7 @@ FreeBulkInsertState(BulkInsertState bistate) * The return value is the OID assigned to the tuple (either here or by the * caller), or InvalidOid if no OID. The header fields of *tup are updated * to match the stored tuple; in particular tup->t_self receives the actual - * TID where the tuple was stored. But note that any toasting of fields + * TID where the tuple was stored. But note that any toasting of fields * within the tuple data is NOT reflected into *tup. */ Oid @@ -2071,7 +2071,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, * For a heap insert, we only need to check for table-level SSI locks. Our * new tuple can't possibly conflict with existing tuple locks, and heap * page locks are only consolidated versions of tuple locks; they do not - * lock "gaps" as index page locks do. So we don't need to identify a + * lock "gaps" as index page locks do. So we don't need to identify a * buffer before making the call. */ CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); @@ -2123,8 +2123,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, bool need_tuple_data; /* - * For logical decoding, we need the tuple even if we're doing a - * full page write, so make sure to log it separately. (XXX We could + * For logical decoding, we need the tuple even if we're doing a full + * page write, so make sure to log it separately. (XXX We could * alternatively store a pointer into the FPW). * * Also, if this is a catalog, we need to transmit combocids to @@ -2165,9 +2165,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid, rdata[2].next = NULL; /* - * Make a separate rdata entry for the tuple's buffer if we're - * doing logical decoding, so that an eventual FPW doesn't - * remove the tuple's data. + * Make a separate rdata entry for the tuple's buffer if we're doing + * logical decoding, so that an eventual FPW doesn't remove the + * tuple's data. */ if (need_tuple_data) { @@ -2248,7 +2248,7 @@ heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, /* * If the object id of this tuple has already been assigned, trust the - * caller. There are a couple of ways this can happen. At initial db + * caller. There are a couple of ways this can happen. At initial db * creation, the backend program sets oids for tuples. When we define * an index, we set the oid. Finally, in the future, we may allow * users to set their own object ids in order to support a persistent @@ -2342,7 +2342,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, * For a heap insert, we only need to check for table-level SSI locks. Our * new tuple can't possibly conflict with existing tuple locks, and heap * page locks are only consolidated versions of tuple locks; they do not - * lock "gaps" as index page locks do. So we don't need to identify a + * lock "gaps" as index page locks do. So we don't need to identify a * buffer before making the call. */ CheckForSerializableConflictIn(relation, NULL, InvalidBuffer); @@ -2356,7 +2356,7 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, int nthispage; /* - * Find buffer where at least the next tuple will fit. If the page is + * Find buffer where at least the next tuple will fit. If the page is * all-visible, this will also pin the requisite visibility map page. */ buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len, @@ -2487,9 +2487,9 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples, rdata[1].next = NULL; /* - * Make a separate rdata entry for the tuple's buffer if - * we're doing logical decoding, so that an eventual FPW - * doesn't remove the tuple's data. + * Make a separate rdata entry for the tuple's buffer if we're + * doing logical decoding, so that an eventual FPW doesn't remove + * the tuple's data. */ if (need_tuple_data) { @@ -2597,8 +2597,8 @@ compute_infobits(uint16 infomask, uint16 infomask2) static inline bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask) { - const uint16 interesting = - HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; + const uint16 interesting = + HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK; if ((new_infomask & interesting) != (old_infomask & interesting)) return true; @@ -2650,7 +2650,7 @@ heap_delete(Relation relation, ItemPointer tid, bool have_tuple_lock = false; bool iscombo; bool all_visible_cleared = false; - HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */ + HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */ bool old_key_copied = false; Assert(ItemPointerIsValid(tid)); @@ -2751,10 +2751,10 @@ l1: /* * You might think the multixact is necessarily done here, but not * so: it could have surviving members, namely our own xact or - * other subxacts of this backend. It is legal for us to delete + * other subxacts of this backend. It is legal for us to delete * the tuple in either case, however (the latter case is * essentially a situation of upgrading our former shared lock to - * exclusive). We don't bother changing the on-disk hint bits + * exclusive). We don't bother changing the on-disk hint bits * since we are about to overwrite the xmax altogether. */ } @@ -2836,7 +2836,7 @@ l1: * If this is the first possibly-multixact-able operation in the current * transaction, set my per-backend OldestMemberMXactId setting. We can be * certain that the transaction will never become a member of any older - * MultiXactIds than that. (We have to do this even if we end up just + * MultiXactIds than that. (We have to do this even if we end up just * using our own TransactionId below, since some other backend could * incorporate our XID into a MultiXact immediately afterwards.) */ @@ -2852,7 +2852,7 @@ l1: /* * If this transaction commits, the tuple will become DEAD sooner or * later. Set flag that this page is a candidate for pruning once our xid - * falls below the OldestXmin horizon. If the transaction finally aborts, + * falls below the OldestXmin horizon. If the transaction finally aborts, * the subsequent page pruning will be a no-op and the hint will be * cleared. */ @@ -2919,7 +2919,7 @@ l1: xlhdr.t_hoff = old_key_tuple->t_data->t_hoff; rdata[1].next = &(rdata[2]); - rdata[2].data = (char*)&xlhdr; + rdata[2].data = (char *) &xlhdr; rdata[2].len = SizeOfHeapHeader; rdata[2].buffer = InvalidBuffer; rdata[2].next = NULL; @@ -2994,7 +2994,7 @@ l1: * * This routine may be used to delete a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock - * on the relation associated with the tuple). Any failure is reported + * on the relation associated with the tuple). Any failure is reported * via ereport(). */ void @@ -3110,7 +3110,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, /* * Fetch the list of attributes to be checked for HOT update. This is * wasted effort if we fail to update or have to put the new tuple on a - * different page. But we must compute the list before obtaining buffer + * different page. But we must compute the list before obtaining buffer * lock --- in the worst case, if we are doing an update on one of the * relevant system catalogs, we could deadlock if we try to fetch the list * later. In any case, the relcache caches the data so this is usually @@ -3122,7 +3122,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, hot_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_ALL); key_attrs = RelationGetIndexAttrBitmap(relation, INDEX_ATTR_BITMAP_KEY); id_attrs = RelationGetIndexAttrBitmap(relation, - INDEX_ATTR_BITMAP_IDENTITY_KEY); + INDEX_ATTR_BITMAP_IDENTITY_KEY); block = ItemPointerGetBlockNumber(otid); buffer = ReadBuffer(relation, block); @@ -3193,7 +3193,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, * If this is the first possibly-multixact-able operation in the * current transaction, set my per-backend OldestMemberMXactId * setting. We can be certain that the transaction will never become a - * member of any older MultiXactIds than that. (We have to do this + * member of any older MultiXactIds than that. (We have to do this * even if we end up just using our own TransactionId below, since * some other backend could incorporate our XID into a MultiXact * immediately afterwards.) @@ -3238,7 +3238,7 @@ l2: /* * XXX note that we don't consider the "no wait" case here. This * isn't a problem currently because no caller uses that case, but it - * should be fixed if such a caller is introduced. It wasn't a + * should be fixed if such a caller is introduced. It wasn't a * problem previously because this code would always wait, but now * that some tuple locks do not conflict with one of the lock modes we * use, it is possible that this case is interesting to handle @@ -3276,7 +3276,7 @@ l2: * it as locker, unless it is gone completely. * * If it's not a multi, we need to check for sleeping conditions - * before actually going to sleep. If the update doesn't conflict + * before actually going to sleep. If the update doesn't conflict * with the locks, we just continue without sleeping (but making sure * it is preserved). */ @@ -3302,10 +3302,10 @@ l2: goto l2; /* - * Note that the multixact may not be done by now. It could have + * Note that the multixact may not be done by now. It could have * surviving members; our own xact or other subxacts of this * backend, and also any other concurrent transaction that locked - * the tuple with KeyShare if we only got TupleLockUpdate. If + * the tuple with KeyShare if we only got TupleLockUpdate. If * this is the case, we have to be careful to mark the updated * tuple with the surviving members in Xmax. * @@ -3512,7 +3512,7 @@ l2: * If the toaster needs to be activated, OR if the new tuple will not fit * on the same page as the old, then we need to release the content lock * (but not the pin!) on the old tuple's buffer while we are off doing - * TOAST and/or table-file-extension work. We must mark the old tuple to + * TOAST and/or table-file-extension work. We must mark the old tuple to * show that it's already being updated, else other processes may try to * update it themselves. * @@ -3578,7 +3578,7 @@ l2: * there's more free now than before. * * What's more, if we need to get a new page, we will need to acquire - * buffer locks on both old and new pages. To avoid deadlock against + * buffer locks on both old and new pages. To avoid deadlock against * some other backend trying to get the same two locks in the other * order, we must be consistent about the order we get the locks in. * We use the rule "lock the lower-numbered page of the relation @@ -3638,7 +3638,7 @@ l2: /* * At this point newbuf and buffer are both pinned and locked, and newbuf - * has enough space for the new tuple. If they are the same buffer, only + * has enough space for the new tuple. If they are the same buffer, only * one pin is held. */ @@ -3646,7 +3646,7 @@ l2: { /* * Since the new tuple is going into the same page, we might be able - * to do a HOT update. Check if any of the index columns have been + * to do a HOT update. Check if any of the index columns have been * changed. If not, then HOT update is possible. */ if (satisfies_hot) @@ -3672,13 +3672,13 @@ l2: /* * If this transaction commits, the old tuple will become DEAD sooner or * later. Set flag that this page is a candidate for pruning once our xid - * falls below the OldestXmin horizon. If the transaction finally aborts, + * falls below the OldestXmin horizon. If the transaction finally aborts, * the subsequent page pruning will be a no-op and the hint will be * cleared. * * XXX Should we set hint on newbuf as well? If the transaction aborts, * there would be a prunable tuple in the newbuf; but for now we choose - * not to optimize for aborts. Note that heap_xlog_update must be kept in + * not to optimize for aborts. Note that heap_xlog_update must be kept in * sync if this decision changes. */ PageSetPrunable(page, xid); @@ -3775,7 +3775,7 @@ l2: * Mark old tuple for invalidation from system caches at next command * boundary, and mark the new tuple for invalidation in case we abort. We * have to do this before releasing the buffer because oldtup is in the - * buffer. (heaptup is all in local memory, but it's necessary to process + * buffer. (heaptup is all in local memory, but it's necessary to process * both tuple versions in one call to inval.c so we can avoid redundant * sinval messages.) */ @@ -3853,7 +3853,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum, /* * Extract the corresponding values. XXX this is pretty inefficient if - * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do + * there are many indexed columns. Should HeapSatisfiesHOTandKeyUpdate do * a single heap_deform_tuple call on each tuple, instead? But that * doesn't work for system columns ... */ @@ -3876,7 +3876,7 @@ heap_tuple_attr_equals(TupleDesc tupdesc, int attrnum, /* * We do simple binary comparison of the two datums. This may be overly * strict because there can be multiple binary representations for the - * same logical value. But we should be OK as long as there are no false + * same logical value. But we should be OK as long as there are no false * positives. Using a type-specific equality operator is messy because * there could be multiple notions of equality in different operator * classes; furthermore, we cannot safely invoke user-defined functions @@ -3951,8 +3951,7 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs, /* * Since the HOT attributes are a superset of the key attributes and * the key attributes are a superset of the id attributes, this logic - * is guaranteed to identify the next column that needs to be - * checked. + * is guaranteed to identify the next column that needs to be checked. */ if (hot_result && next_hot_attnum > FirstLowInvalidHeapAttributeNumber) check_now = next_hot_attnum; @@ -3981,12 +3980,11 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs, } /* - * Advance the next attribute numbers for the sets that contain - * the attribute we just checked. As we work our way through the - * columns, the next_attnum values will rise; but when each set - * becomes empty, bms_first_member() will return -1 and the attribute - * number will end up with a value less than - * FirstLowInvalidHeapAttributeNumber. + * Advance the next attribute numbers for the sets that contain the + * attribute we just checked. As we work our way through the columns, + * the next_attnum values will rise; but when each set becomes empty, + * bms_first_member() will return -1 and the attribute number will end + * up with a value less than FirstLowInvalidHeapAttributeNumber. */ if (hot_result && check_now == next_hot_attnum) { @@ -4015,7 +4013,7 @@ HeapSatisfiesHOTandKeyUpdate(Relation relation, Bitmapset *hot_attrs, * * This routine may be used to update a tuple when concurrent updates of * the target tuple are not expected (for example, because we have a lock - * on the relation associated with the tuple). Any failure is reported + * on the relation associated with the tuple). Any failure is reported * via ereport(). */ void @@ -4057,7 +4055,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup) static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update) { - int retval; + int retval; if (is_update) retval = tupleLockExtraInfo[mode].updstatus; @@ -4239,15 +4237,15 @@ l3: * However, if there are updates, we need to walk the update chain * to mark future versions of the row as locked, too. That way, * if somebody deletes that future version, we're protected - * against the key going away. This locking of future versions + * against the key going away. This locking of future versions * could block momentarily, if a concurrent transaction is * deleting a key; or it could return a value to the effect that - * the transaction deleting the key has already committed. So we + * the transaction deleting the key has already committed. So we * do this before re-locking the buffer; otherwise this would be * prone to deadlocks. * * Note that the TID we're locking was grabbed before we unlocked - * the buffer. For it to change while we're not looking, the + * the buffer. For it to change while we're not looking, the * other properties we're testing for below after re-locking the * buffer would also change, in which case we would restart this * loop above. @@ -4472,7 +4470,7 @@ l3: * Of course, the multixact might not be done here: if we're * requesting a light lock mode, other transactions with light * locks could still be alive, as well as locks owned by our - * own xact or other subxacts of this backend. We need to + * own xact or other subxacts of this backend. We need to * preserve the surviving MultiXact members. Note that it * isn't absolutely necessary in the latter case, but doing so * is simpler. @@ -4516,7 +4514,7 @@ l3: /* * xwait is done, but if xwait had just locked the tuple then * some other xact could update this tuple before we get to - * this point. Check for xmax change, and start over if so. + * this point. Check for xmax change, and start over if so. */ if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) || !TransactionIdEquals( @@ -4525,7 +4523,7 @@ l3: goto l3; /* - * Otherwise check if it committed or aborted. Note we cannot + * Otherwise check if it committed or aborted. Note we cannot * be here if the tuple was only locked by somebody who didn't * conflict with us; that should have been handled above. So * that transaction must necessarily be gone by now. @@ -4605,7 +4603,7 @@ failed: * If this is the first possibly-multixact-able operation in the current * transaction, set my per-backend OldestMemberMXactId setting. We can be * certain that the transaction will never become a member of any older - * MultiXactIds than that. (We have to do this even if we end up just + * MultiXactIds than that. (We have to do this even if we end up just * using our own TransactionId below, since some other backend could * incorporate our XID into a MultiXact immediately afterwards.) */ @@ -4641,7 +4639,7 @@ failed: HeapTupleHeaderSetXmax(tuple->t_data, xid); /* - * Make sure there is no forward chain link in t_ctid. Note that in the + * Make sure there is no forward chain link in t_ctid. Note that in the * cases where the tuple has been updated, we must not overwrite t_ctid, * because it was set by the updater. Moreover, if the tuple has been * updated, we need to follow the update chain to lock the new versions of @@ -4653,8 +4651,8 @@ failed: MarkBufferDirty(*buffer); /* - * XLOG stuff. You might think that we don't need an XLOG record because - * there is no state change worth restoring after a crash. You would be + * XLOG stuff. You might think that we don't need an XLOG record because + * there is no state change worth restoring after a crash. You would be * wrong however: we have just written either a TransactionId or a * MultiXactId that may never have been seen on disk before, and we need * to make sure that there are XLOG entries covering those ID numbers. @@ -4818,7 +4816,7 @@ l5: * If the XMAX is already a MultiXactId, then we need to expand it to * include add_to_xmax; but if all the members were lockers and are * all gone, we can do away with the IS_MULTI bit and just set - * add_to_xmax as the only locker/updater. If all lockers are gone + * add_to_xmax as the only locker/updater. If all lockers are gone * and we have an updater that aborted, we can also do without a * multi. * @@ -4881,7 +4879,7 @@ l5: */ MultiXactStatus new_status; MultiXactStatus old_status; - LockTupleMode old_mode; + LockTupleMode old_mode; if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)) { @@ -4900,8 +4898,8 @@ l5: { /* * LOCK_ONLY can be present alone only when a page has been - * upgraded by pg_upgrade. But in that case, - * TransactionIdIsInProgress() should have returned false. We + * upgraded by pg_upgrade. But in that case, + * TransactionIdIsInProgress() should have returned false. We * assume it's no longer locked in this case. */ elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax); @@ -4929,12 +4927,13 @@ l5: if (xmax == add_to_xmax) { /* - * Note that it's not possible for the original tuple to be updated: - * we wouldn't be here because the tuple would have been invisible and - * we wouldn't try to update it. As a subtlety, this code can also - * run when traversing an update chain to lock future versions of a - * tuple. But we wouldn't be here either, because the add_to_xmax - * would be different from the original updater. + * Note that it's not possible for the original tuple to be + * updated: we wouldn't be here because the tuple would have been + * invisible and we wouldn't try to update it. As a subtlety, + * this code can also run when traversing an update chain to lock + * future versions of a tuple. But we wouldn't be here either, + * because the add_to_xmax would be different from the original + * updater. */ Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)); @@ -5013,7 +5012,7 @@ static HTSU_Result test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, LockTupleMode mode, bool *needwait) { - MultiXactStatus wantedstatus; + MultiXactStatus wantedstatus; *needwait = false; wantedstatus = get_mxact_status_for_lock(mode, false); @@ -5026,18 +5025,18 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, if (TransactionIdIsCurrentTransactionId(xid)) { /* - * Updated by our own transaction? Just return failure. This shouldn't - * normally happen. + * Updated by our own transaction? Just return failure. This + * shouldn't normally happen. */ return HeapTupleSelfUpdated; } else if (TransactionIdIsInProgress(xid)) { /* - * If the locking transaction is running, what we do depends on whether - * the lock modes conflict: if they do, then we must wait for it to - * finish; otherwise we can fall through to lock this tuple version - * without waiting. + * If the locking transaction is running, what we do depends on + * whether the lock modes conflict: if they do, then we must wait for + * it to finish; otherwise we can fall through to lock this tuple + * version without waiting. */ if (DoLockModesConflict(LOCKMODE_from_mxstatus(status), LOCKMODE_from_mxstatus(wantedstatus))) @@ -5046,8 +5045,8 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, } /* - * If we set needwait above, then this value doesn't matter; otherwise, - * this value signals to caller that it's okay to proceed. + * If we set needwait above, then this value doesn't matter; + * otherwise, this value signals to caller that it's okay to proceed. */ return HeapTupleMayBeUpdated; } @@ -5059,7 +5058,7 @@ test_lockmode_for_conflict(MultiXactStatus status, TransactionId xid, * The other transaction committed. If it was only a locker, then the * lock is completely gone now and we can return success; but if it * was an update, then what we do depends on whether the two lock - * modes conflict. If they conflict, then we must report error to + * modes conflict. If they conflict, then we must report error to * caller. But if they don't, we can fall through to allow the current * transaction to lock the tuple. * @@ -5133,8 +5132,8 @@ l4: LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); /* - * Check the tuple XMIN against prior XMAX, if any. If we reached - * the end of the chain, we're done, so return success. + * Check the tuple XMIN against prior XMAX, if any. If we reached the + * end of the chain, we're done, so return success. */ if (TransactionIdIsValid(priorXmax) && !TransactionIdEquals(HeapTupleHeaderGetXmin(mytup.t_data), @@ -5162,14 +5161,14 @@ l4: rawxmax = HeapTupleHeaderGetRawXmax(mytup.t_data); if (old_infomask & HEAP_XMAX_IS_MULTI) { - int nmembers; - int i; + int nmembers; + int i; MultiXactMember *members; nmembers = GetMultiXactIdMembers(rawxmax, &members, false); for (i = 0; i < nmembers; i++) { - HTSU_Result res; + HTSU_Result res; res = test_lockmode_for_conflict(members[i].status, members[i].xid, @@ -5196,7 +5195,7 @@ l4: } else { - HTSU_Result res; + HTSU_Result res; MultiXactStatus status; /* @@ -5219,9 +5218,9 @@ l4: else { /* - * LOCK_ONLY present alone (a pg_upgraded tuple - * marked as share-locked in the old cluster) shouldn't - * be seen in the middle of an update chain. + * LOCK_ONLY present alone (a pg_upgraded tuple marked + * as share-locked in the old cluster) shouldn't be + * seen in the middle of an update chain. */ elog(ERROR, "invalid lock status in tuple"); } @@ -5323,7 +5322,7 @@ l4: * The initial tuple is assumed to be already locked. * * This function doesn't check visibility, it just inconditionally marks the - * tuple(s) as locked. If any tuple in the updated chain is being deleted + * tuple(s) as locked. If any tuple in the updated chain is being deleted * concurrently (or updated with the key being modified), sleep until the * transaction doing it is finished. * @@ -5347,7 +5346,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, * If this is the first possibly-multixact-able operation in the * current transaction, set my per-backend OldestMemberMXactId * setting. We can be certain that the transaction will never become a - * member of any older MultiXactIds than that. (We have to do this + * member of any older MultiXactIds than that. (We have to do this * even if we end up just using our own TransactionId below, since * some other backend could incorporate our XID into a MultiXact * immediately afterwards.) @@ -5366,7 +5365,7 @@ heap_lock_updated_tuple(Relation rel, HeapTuple tuple, ItemPointer ctid, * heap_inplace_update - update a tuple "in place" (ie, overwrite it) * * Overwriting violates both MVCC and transactional safety, so the uses - * of this function in Postgres are extremely limited. Nonetheless we + * of this function in Postgres are extremely limited. Nonetheless we * find some places to use it. * * The tuple cannot change size, and therefore it's reasonable to assume @@ -5608,7 +5607,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, */ if (ISUPDATE_from_mxstatus(members[i].status)) { - TransactionId xid = members[i].xid; + TransactionId xid = members[i].xid; /* * It's an update; should we keep it? If the transaction is known @@ -5728,7 +5727,7 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * heap_prepare_freeze_tuple * * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) - * are older than the specified cutoff XID and cutoff MultiXactId. If so, + * are older than the specified cutoff XID and cutoff MultiXactId. If so, * setup enough state (in the *frz output argument) to later execute and * WAL-log what we would need to do, and return TRUE. Return FALSE if nothing * is to be changed. @@ -5801,11 +5800,11 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, else if (flags & FRM_RETURN_IS_XID) { /* - * NB -- some of these transformations are only valid because - * we know the return Xid is a tuple updater (i.e. not merely a + * NB -- some of these transformations are only valid because we + * know the return Xid is a tuple updater (i.e. not merely a * locker.) Also note that the only reason we don't explicitely - * worry about HEAP_KEYS_UPDATED is because it lives in t_infomask2 - * rather than t_infomask. + * worry about HEAP_KEYS_UPDATED is because it lives in + * t_infomask2 rather than t_infomask. */ frz->t_infomask &= ~HEAP_XMAX_BITS; frz->xmax = newxmax; @@ -5815,8 +5814,8 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, } else if (flags & FRM_RETURN_IS_MULTI) { - uint16 newbits; - uint16 newbits2; + uint16 newbits; + uint16 newbits2; /* * We can't use GetMultiXactIdHintBits directly on the new multi @@ -5851,7 +5850,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, /* * The tuple might be marked either XMAX_INVALID or XMAX_COMMITTED + - * LOCKED. Normalize to INVALID just to be sure no one gets confused. + * LOCKED. Normalize to INVALID just to be sure no one gets confused. * Also get rid of the HEAP_KEYS_UPDATED bit. */ frz->t_infomask &= ~HEAP_XMAX_BITS; @@ -6111,7 +6110,7 @@ HeapTupleGetUpdateXid(HeapTupleHeader tuple) * used to optimize multixact access in case it's a lock-only multi); 'nowait' * indicates whether to use conditional lock acquisition, to allow callers to * fail if lock is unavailable. 'rel', 'ctid' and 'oper' are used to set up - * context information for error messages. 'remaining', if not NULL, receives + * context information for error messages. 'remaining', if not NULL, receives * the number of members that are still running, including any (non-aborted) * subtransactions of our own transaction. * @@ -6173,7 +6172,7 @@ Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, * return failure, if asked to avoid waiting.) * * Note that we don't set up an error context callback ourselves, - * but instead we pass the info down to XactLockTableWait. This + * but instead we pass the info down to XactLockTableWait. This * might seem a bit wasteful because the context is set up and * tore down for each member of the multixact, but in reality it * should be barely noticeable, and it avoids duplicate code. @@ -6242,7 +6241,7 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status, * heap_tuple_needs_freeze * * Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac) - * are older than the specified cutoff XID or MultiXactId. If so, return TRUE. + * are older than the specified cutoff XID or MultiXactId. If so, return TRUE. * * It doesn't matter whether the tuple is alive or dead, we are checking * to see if a tuple needs to be removed or frozen to avoid wraparound. @@ -6366,7 +6365,7 @@ heap_restrpos(HeapScanDesc scan) else { /* - * If we reached end of scan, rs_inited will now be false. We must + * If we reached end of scan, rs_inited will now be false. We must * reset it to true to keep heapgettup from doing the wrong thing. */ scan->rs_inited = true; @@ -6548,7 +6547,7 @@ log_heap_clean(Relation reln, Buffer buffer, } /* - * Perform XLogInsert for a heap-freeze operation. Caller must have already + * Perform XLogInsert for a heap-freeze operation. Caller must have already * modified the buffer and marked it dirty. */ XLogRecPtr @@ -6593,7 +6592,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid, /* * Perform XLogInsert for a heap-visible operation. 'block' is the block * being marked all-visible, and vm_buffer is the buffer containing the - * corresponding visibility map block. Both should have already been modified + * corresponding visibility map block. Both should have already been modified * and dirtied. * * If checksums are enabled, we also add the heap_buffer to the chain to @@ -6642,7 +6641,7 @@ log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, } /* - * Perform XLogInsert for a heap-update operation. Caller must already + * Perform XLogInsert for a heap-update operation. Caller must already * have modified the buffer(s) and marked them dirty. */ static XLogRecPtr @@ -6674,10 +6673,10 @@ log_heap_update(Relation reln, Buffer oldbuf, info = XLOG_HEAP_UPDATE; /* - * If the old and new tuple are on the same page, we only need to log - * the parts of the new tuple that were changed. That saves on the amount - * of WAL we need to write. Currently, we just count any unchanged bytes - * in the beginning and end of the tuple. That's quick to check, and + * If the old and new tuple are on the same page, we only need to log the + * parts of the new tuple that were changed. That saves on the amount of + * WAL we need to write. Currently, we just count any unchanged bytes in + * the beginning and end of the tuple. That's quick to check, and * perfectly covers the common case that only one field is updated. * * We could do this even if the old and new tuple are on different pages, @@ -6688,10 +6687,10 @@ log_heap_update(Relation reln, Buffer oldbuf, * updates tend to create the new tuple version on the same page, there * isn't much to be gained by doing this across pages anyway. * - * Skip this if we're taking a full-page image of the new page, as we don't - * include the new tuple in the WAL record in that case. Also disable if - * wal_level='logical', as logical decoding needs to be able to read the - * new tuple in whole from the WAL record alone. + * Skip this if we're taking a full-page image of the new page, as we + * don't include the new tuple in the WAL record in that case. Also + * disable if wal_level='logical', as logical decoding needs to be able to + * read the new tuple in whole from the WAL record alone. */ if (oldbuf == newbuf && !need_tuple_data && !XLogCheckBufferNeedsBackup(newbuf)) @@ -6707,6 +6706,7 @@ log_heap_update(Relation reln, Buffer oldbuf, if (newp[prefixlen] != oldp[prefixlen]) break; } + /* * Storing the length of the prefix takes 2 bytes, so we need to save * at least 3 bytes or there's no point. @@ -6793,8 +6793,8 @@ log_heap_update(Relation reln, Buffer oldbuf, xlhdr.header.t_infomask2 = newtup->t_data->t_infomask2; xlhdr.header.t_infomask = newtup->t_data->t_infomask; xlhdr.header.t_hoff = newtup->t_data->t_hoff; - Assert(offsetof(HeapTupleHeaderData, t_bits) + prefixlen + suffixlen <= newtup->t_len); - xlhdr.t_len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) - prefixlen - suffixlen; + Assert(offsetof(HeapTupleHeaderData, t_bits) +prefixlen + suffixlen <= newtup->t_len); + xlhdr.t_len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -prefixlen - suffixlen; /* * As with insert records, we need not store this rdata segment if we @@ -6816,7 +6816,7 @@ log_heap_update(Relation reln, Buffer oldbuf, if (prefixlen == 0) { rdata[nr].data = ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits); - rdata[nr].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) - suffixlen; + rdata[nr].len = newtup->t_len - offsetof(HeapTupleHeaderData, t_bits) -suffixlen; rdata[nr].buffer = need_tuple_data ? InvalidBuffer : newbufref; rdata[nr].buffer_std = true; rdata[nr].next = NULL; @@ -6829,7 +6829,7 @@ log_heap_update(Relation reln, Buffer oldbuf, * two separate rdata entries. */ /* bitmap [+ padding] [+ oid] */ - if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) > 0) + if (newtup->t_data->t_hoff - offsetof(HeapTupleHeaderData, t_bits) >0) { rdata[nr - 1].next = &(rdata[nr]); rdata[nr].data = ((char *) newtup->t_data) + offsetof(HeapTupleHeaderData, t_bits); @@ -6853,13 +6853,13 @@ log_heap_update(Relation reln, Buffer oldbuf, /* * Separate storage for the FPW buffer reference of the new page in the * wal_level >= logical case. - */ + */ if (need_tuple_data) { rdata[nr - 1].next = &(rdata[nr]); rdata[nr].data = NULL, - rdata[nr].len = 0; + rdata[nr].len = 0; rdata[nr].buffer = newbufref; rdata[nr].buffer_std = true; rdata[nr].next = NULL; @@ -6992,8 +6992,8 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno, recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata); /* - * The page may be uninitialized. If so, we can't set the LSN because - * that would corrupt the page. + * The page may be uninitialized. If so, we can't set the LSN because that + * would corrupt the page. */ if (!PageIsNew(page)) { @@ -7173,14 +7173,14 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool * */ for (natt = 0; natt < idx_desc->natts; natt++) { - int attno = idx_rel->rd_index->indkey.values[natt]; + int attno = idx_rel->rd_index->indkey.values[natt]; if (attno < 0) { /* * The OID column can appear in an index definition, but that's - * OK, becuse we always copy the OID if present (see below). - * Other system columns may not. + * OK, becuse we always copy the OID if present (see below). Other + * system columns may not. */ if (attno == ObjectIdAttributeNumber) continue; @@ -7210,7 +7210,8 @@ ExtractReplicaIdentity(Relation relation, HeapTuple tp, bool key_changed, bool * */ if (HeapTupleHasExternal(key_tuple)) { - HeapTuple oldtup = key_tuple; + HeapTuple oldtup = key_tuple; + key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation)); heap_freetuple(oldtup); } @@ -7963,7 +7964,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update) /* * In normal operation, it is important to lock the two pages in * page-number order, to avoid possible deadlocks against other update - * operations going the other way. However, during WAL replay there can + * operations going the other way. However, during WAL replay there can * be no other update happening, so we don't need to worry about that. But * we *do* need to worry that we don't expose an inconsistent state to Hot * Standby queries --- so the original page can't be unlocked before we've @@ -8169,7 +8170,7 @@ newsame:; if (suffixlen > 0) memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen); - newlen = offsetof(HeapTupleHeaderData, t_bits) + xlhdr.t_len + prefixlen + suffixlen; + newlen = offsetof(HeapTupleHeaderData, t_bits) +xlhdr.t_len + prefixlen + suffixlen; htup->t_infomask2 = xlhdr.header.t_infomask2; htup->t_infomask = xlhdr.header.t_infomask; htup->t_hoff = xlhdr.header.t_hoff; @@ -8444,6 +8445,7 @@ heap2_redo(XLogRecPtr lsn, XLogRecord *record) heap_xlog_lock_updated(lsn, record); break; case XLOG_HEAP2_NEW_CID: + /* * Nothing to do on a real replay, only used during logical * decoding. diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c index b306398aec1..631af759d78 100644 --- a/src/backend/access/heap/hio.c +++ b/src/backend/access/heap/hio.c @@ -146,7 +146,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, /* * If there are two buffers involved and we pinned just one of them, * it's possible that the second one became all-visible while we were - * busy pinning the first one. If it looks like that's a possible + * busy pinning the first one. If it looks like that's a possible * scenario, we'll need to make a second pass through this loop. */ if (buffer2 == InvalidBuffer || buffer1 == buffer2 @@ -177,7 +177,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, * NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the * same buffer we select for insertion of the new tuple (this could only * happen if space is freed in that page after heap_update finds there's not - * enough there). In that case, the page will be pinned and locked only once. + * enough there). In that case, the page will be pinned and locked only once. * * For the vmbuffer and vmbuffer_other arguments, we avoid deadlock by * locking them only after locking the corresponding heap page, and taking @@ -198,7 +198,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2, * for additional constraints needed for safe usage of this behavior.) * * The caller can also provide a BulkInsertState object to optimize many - * insertions into the same relation. This keeps a pin on the current + * insertions into the same relation. This keeps a pin on the current * insertion target page (to save pin/unpin cycles) and also passes a * BULKWRITE buffer selection strategy object to the buffer manager. * Passing NULL for bistate selects the default behavior. @@ -251,7 +251,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * We first try to put the tuple on the same page we last inserted a tuple - * on, as cached in the BulkInsertState or relcache entry. If that + * on, as cached in the BulkInsertState or relcache entry. If that * doesn't work, we ask the Free Space Map to locate a suitable page. * Since the FSM's info might be out of date, we have to be prepared to * loop around and retry multiple times. (To insure this isn't an infinite @@ -283,7 +283,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * If the FSM knows nothing of the rel, try the last page before we - * give up and extend. This avoids one-tuple-per-page syndrome during + * give up and extend. This avoids one-tuple-per-page syndrome during * bootstrapping or in a recently-started system. */ if (targetBlock == InvalidBlockNumber) @@ -305,7 +305,7 @@ RelationGetBufferForTuple(Relation relation, Size len, * If the page-level all-visible flag is set, caller will need to * clear both that and the corresponding visibility map bit. However, * by the time we return, we'll have x-locked the buffer, and we don't - * want to do any I/O while in that state. So we check the bit here + * want to do any I/O while in that state. So we check the bit here * before taking the lock, and pin the page if it appears necessary. * Checking without the lock creates a risk of getting the wrong * answer, so we'll have to recheck after acquiring the lock. @@ -347,7 +347,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * We now have the target page (and the other buffer, if any) pinned - * and locked. However, since our initial PageIsAllVisible checks + * and locked. However, since our initial PageIsAllVisible checks * were performed before acquiring the lock, the results might now be * out of date, either for the selected victim buffer, or for the * other buffer passed by the caller. In that case, we'll need to @@ -390,7 +390,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * Not enough space, so we must give up our page locks and pin (if - * any) and prepare to look elsewhere. We don't care which order we + * any) and prepare to look elsewhere. We don't care which order we * unlock the two buffers in, so this can be slightly simpler than the * code above. */ @@ -432,7 +432,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * XXX This does an lseek - rather expensive - but at the moment it is the - * only way to accurately determine how many blocks are in a relation. Is + * only way to accurately determine how many blocks are in a relation. Is * it worth keeping an accurate file length in shared memory someplace, * rather than relying on the kernel to do it for us? */ @@ -452,7 +452,7 @@ RelationGetBufferForTuple(Relation relation, Size len, /* * Release the file-extension lock; it's now OK for someone else to extend - * the relation some more. Note that we cannot release this lock before + * the relation some more. Note that we cannot release this lock before * we have buffer lock on the new page, or we risk a race condition * against vacuumlazy.c --- see comments therein. */ diff --git a/src/backend/access/heap/pruneheap.c b/src/backend/access/heap/pruneheap.c index 3c69e1badac..06b54889230 100644 --- a/src/backend/access/heap/pruneheap.c +++ b/src/backend/access/heap/pruneheap.c @@ -117,7 +117,7 @@ heap_page_prune_opt(Relation relation, Buffer buffer) * Checking free space here is questionable since we aren't holding any * lock on the buffer; in the worst case we could get a bogus answer. It's * unlikely to be *seriously* wrong, though, since reading either pd_lower - * or pd_upper is probably atomic. Avoiding taking a lock seems more + * or pd_upper is probably atomic. Avoiding taking a lock seems more * important than sometimes getting a wrong answer in what is after all * just a heuristic estimate. */ @@ -332,8 +332,8 @@ heap_page_prune(Relation relation, Buffer buffer, TransactionId OldestXmin, * OldestXmin is the cutoff XID used to identify dead tuples. * * We don't actually change the page here, except perhaps for hint-bit updates - * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in - * prstate showing the changes to be made. Items to be redirected are added + * caused by HeapTupleSatisfiesVacuum. We just add entries to the arrays in + * prstate showing the changes to be made. Items to be redirected are added * to the redirected[] array (two entries per redirection); items to be set to * LP_DEAD state are added to nowdead[]; and items to be set to LP_UNUSED * state are added to nowunused[]. @@ -384,7 +384,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum, * We need this primarily to handle aborted HOT updates, that is, * XMIN_INVALID heap-only tuples. Those might not be linked to by * any chain, since the parent tuple might be re-updated before - * any pruning occurs. So we have to be able to reap them + * any pruning occurs. So we have to be able to reap them * separately from chain-pruning. (Note that * HeapTupleHeaderIsHotUpdated will never return true for an * XMIN_INVALID tuple, so this code will work even when there were @@ -496,9 +496,10 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum, break; case HEAPTUPLE_DELETE_IN_PROGRESS: + /* - * This tuple may soon become DEAD. Update the hint field - * so that the page is reconsidered for pruning in future. + * This tuple may soon become DEAD. Update the hint field so + * that the page is reconsidered for pruning in future. */ heap_prune_record_prunable(prstate, HeapTupleHeaderGetUpdateXid(htup)); @@ -574,7 +575,7 @@ heap_prune_chain(Relation relation, Buffer buffer, OffsetNumber rootoffnum, /* * If the root entry had been a normal tuple, we are deleting it, so - * count it in the result. But changing a redirect (even to DEAD + * count it in the result. But changing a redirect (even to DEAD * state) doesn't count. */ if (ItemIdIsNormal(rootlp)) @@ -663,7 +664,7 @@ heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum) * buffer, and is inside a critical section. * * This is split out because it is also used by heap_xlog_clean() - * to replay the WAL record when needed after a crash. Note that the + * to replay the WAL record when needed after a crash. Note that the * arguments are identical to those of log_heap_clean(). */ void diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index ef8c12194c7..7b579114774 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -10,7 +10,7 @@ * * The caller is responsible for creating the new heap, all catalog * changes, supplying the tuples to be written to the new heap, and - * rebuilding indexes. The caller must hold AccessExclusiveLock on the + * rebuilding indexes. The caller must hold AccessExclusiveLock on the * target table, because we assume no one else is writing into it. * * To use the facility: @@ -43,7 +43,7 @@ * to substitute the correct ctid instead. * * For each ctid reference from A -> B, we might encounter either A first - * or B first. (Note that a tuple in the middle of a chain is both A and B + * or B first. (Note that a tuple in the middle of a chain is both A and B * of different pairs.) * * If we encounter A first, we'll store the tuple in the unresolved_tups @@ -58,11 +58,11 @@ * and can write A immediately with the correct ctid. * * Entries in the hash tables can be removed as soon as the later tuple - * is encountered. That helps to keep the memory usage down. At the end, + * is encountered. That helps to keep the memory usage down. At the end, * both tables are usually empty; we should have encountered both A and B * of each pair. However, it's possible for A to be RECENTLY_DEAD and B * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test - * for deadness using OldestXmin is not exact. In such a case we might + * for deadness using OldestXmin is not exact. In such a case we might * encounter B first, and skip it, and find A later. Then A would be added * to unresolved_tups, and stay there until end of the rewrite. Since * this case is very unusual, we don't worry about the memory usage. @@ -78,7 +78,7 @@ * of CLUSTERing on an unchanging key column, we'll see all the versions * of a given tuple together anyway, and so the peak memory usage is only * proportional to the number of RECENTLY_DEAD versions of a single row, not - * in the whole table. Note that if we do fail halfway through a CLUSTER, + * in the whole table. Note that if we do fail halfway through a CLUSTER, * the old table is still valid, so failure is not catastrophic. * * We can't use the normal heap_insert function to insert into the new @@ -143,13 +143,13 @@ typedef struct RewriteStateData BlockNumber rs_blockno; /* block where page will go */ bool rs_buffer_valid; /* T if any tuples in buffer */ bool rs_use_wal; /* must we WAL-log inserts? */ - bool rs_logical_rewrite; /* do we need to do logical rewriting */ + bool rs_logical_rewrite; /* do we need to do logical rewriting */ TransactionId rs_oldest_xmin; /* oldest xmin used by caller to * determine tuple visibility */ TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff * point */ - TransactionId rs_logical_xmin; /* Xid that will be used as cutoff - * point for logical rewrites */ + TransactionId rs_logical_xmin; /* Xid that will be used as cutoff + * point for logical rewrites */ MultiXactId rs_cutoff_multi;/* MultiXactId that will be used as cutoff * point for multixacts */ MemoryContext rs_cxt; /* for hash tables and entries and tuples in @@ -158,7 +158,7 @@ typedef struct RewriteStateData HTAB *rs_unresolved_tups; /* unmatched A tuples */ HTAB *rs_old_new_tid_map; /* unmatched B tuples */ HTAB *rs_logical_mappings; /* logical remapping files */ - uint32 rs_num_rewrite_mappings; /* # in memory mappings */ + uint32 rs_num_rewrite_mappings; /* # in memory mappings */ } RewriteStateData; /* @@ -199,12 +199,12 @@ typedef OldToNewMappingData *OldToNewMapping; */ typedef struct RewriteMappingFile { - TransactionId xid; /* xid that might need to see the row */ - int vfd; /* fd of mappings file */ - off_t off; /* how far have we written yet */ - uint32 num_mappings; /* number of in-memory mappings */ - dlist_head mappings; /* list of in-memory mappings */ - char path[MAXPGPATH]; /* path, for error messages */ + TransactionId xid; /* xid that might need to see the row */ + int vfd; /* fd of mappings file */ + off_t off; /* how far have we written yet */ + uint32 num_mappings; /* number of in-memory mappings */ + dlist_head mappings; /* list of in-memory mappings */ + char path[MAXPGPATH]; /* path, for error messages */ } RewriteMappingFile; /* @@ -213,8 +213,8 @@ typedef struct RewriteMappingFile */ typedef struct RewriteMappingDataEntry { - LogicalRewriteMappingData map; /* map between old and new location of - * the tuple */ + LogicalRewriteMappingData map; /* map between old and new location of + * the tuple */ dlist_node node; } RewriteMappingDataEntry; @@ -346,7 +346,7 @@ end_heap_rewrite(RewriteState state) } /* - * If the rel is WAL-logged, must fsync before commit. We use heap_sync + * If the rel is WAL-logged, must fsync before commit. We use heap_sync * to ensure that the toast table gets fsync'd too. * * It's obvious that we must do this when not WAL-logging. It's less @@ -617,7 +617,7 @@ rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple) } /* - * Insert a tuple to the new relation. This has to track heap_insert + * Insert a tuple to the new relation. This has to track heap_insert * and its subsidiary functions! * * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the @@ -866,13 +866,13 @@ logical_heap_rewrite_flush_mappings(RewriteState state) hash_seq_init(&seq_status, state->rs_logical_mappings); while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL) { - XLogRecData rdata[2]; - char *waldata; - char *waldata_start; + XLogRecData rdata[2]; + char *waldata; + char *waldata_start; xl_heap_rewrite_mapping xlrec; - Oid dboid; - uint32 len; - int written; + Oid dboid; + uint32 len; + int written; /* this file hasn't got any new mappings */ if (src->num_mappings == 0) @@ -962,14 +962,14 @@ logical_end_heap_rewrite(RewriteState state) return; /* writeout remaining in-memory entries */ - if (state->rs_num_rewrite_mappings > 0 ) + if (state->rs_num_rewrite_mappings > 0) logical_heap_rewrite_flush_mappings(state); /* Iterate over all mappings we have written and fsync the files. */ hash_seq_init(&seq_status, state->rs_logical_mappings); while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL) { - if(FileSync(src->vfd) != 0) + if (FileSync(src->vfd) != 0) ereport(ERROR, (errcode_for_file_access(), errmsg("could not fsync file \"%s\": %m", src->path))); @@ -985,10 +985,10 @@ static void logical_rewrite_log_mapping(RewriteState state, TransactionId xid, LogicalRewriteMappingData *map) { - RewriteMappingFile *src; - RewriteMappingDataEntry *pmap; - Oid relid; - bool found; + RewriteMappingFile *src; + RewriteMappingDataEntry *pmap; + Oid relid; + bool found; relid = RelationGetRelid(state->rs_old_rel); @@ -1027,7 +1027,7 @@ logical_rewrite_log_mapping(RewriteState state, TransactionId xid, if (src->vfd < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create file \"%s\": %m", path))); + errmsg("could not create file \"%s\": %m", path))); } pmap = MemoryContextAlloc(state->rs_cxt, @@ -1041,7 +1041,7 @@ logical_rewrite_log_mapping(RewriteState state, TransactionId xid, * Write out buffer every time we've too many in-memory entries across all * mapping files. */ - if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */) + if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ ) logical_heap_rewrite_flush_mappings(state); } @@ -1054,11 +1054,11 @@ logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple) { ItemPointerData new_tid = new_tuple->t_self; - TransactionId cutoff = state->rs_logical_xmin; - TransactionId xmin; - TransactionId xmax; - bool do_log_xmin = false; - bool do_log_xmax = false; + TransactionId cutoff = state->rs_logical_xmin; + TransactionId xmin; + TransactionId xmax; + bool do_log_xmin = false; + bool do_log_xmax = false; LogicalRewriteMappingData map; /* no logical rewrite in progress, we don't need to log anything */ @@ -1147,7 +1147,8 @@ heap_xlog_logical_rewrite(XLogRecPtr lsn, XLogRecord *r) if (fd < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create file \"%s\": %m", path))); + errmsg("could not create file \"%s\": %m", path))); + /* * Truncate all data that's not guaranteed to have been safely fsynced (by * previous record or by the last checkpoint). @@ -1174,6 +1175,7 @@ heap_xlog_logical_rewrite(XLogRecPtr lsn, XLogRecord *r) ereport(ERROR, (errcode_for_file_access(), errmsg("could not write to file \"%s\": %m", path))); + /* * Now fsync all previously written data. We could improve things and only * do this for the last write to a file, but the required bookkeeping @@ -1222,13 +1224,14 @@ CheckPointLogicalRewriteHeap(void) mappings_dir = AllocateDir("pg_llog/mappings"); while ((mapping_de = ReadDir(mappings_dir, "pg_llog/mappings")) != NULL) { - struct stat statbuf; + struct stat statbuf; Oid dboid; Oid relid; XLogRecPtr lsn; TransactionId rewrite_xid; TransactionId create_xid; - uint32 hi, lo; + uint32 hi, + lo; if (strcmp(mapping_de->d_name, ".") == 0 || strcmp(mapping_de->d_name, "..") == 0) @@ -1244,7 +1247,7 @@ CheckPointLogicalRewriteHeap(void) if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT, &dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6) - elog(ERROR,"could not parse filename \"%s\"", mapping_de->d_name); + elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name); lsn = ((uint64) hi) << 32 | lo; @@ -1258,7 +1261,7 @@ CheckPointLogicalRewriteHeap(void) } else { - int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); + int fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); /* * The file cannot vanish due to concurrency since this function @@ -1269,6 +1272,7 @@ CheckPointLogicalRewriteHeap(void) ereport(ERROR, (errcode_for_file_access(), errmsg("could not open file \"%s\": %m", path))); + /* * We could try to avoid fsyncing files that either haven't * changed or have only been created since the checkpoint's start, diff --git a/src/backend/access/heap/syncscan.c b/src/backend/access/heap/syncscan.c index edd0395d8e9..7ea1ead543f 100644 --- a/src/backend/access/heap/syncscan.c +++ b/src/backend/access/heap/syncscan.c @@ -4,7 +4,7 @@ * heap scan synchronization support * * When multiple backends run a sequential scan on the same table, we try - * to keep them synchronized to reduce the overall I/O needed. The goal is + * to keep them synchronized to reduce the overall I/O needed. The goal is * to read each page into shared buffer cache only once, and let all backends * that take part in the shared scan process the page before it falls out of * the cache. @@ -26,7 +26,7 @@ * don't want such queries to slow down others. * * There can realistically only be a few large sequential scans on different - * tables in progress at any time. Therefore we just keep the scan positions + * tables in progress at any time. Therefore we just keep the scan positions * in a small LRU list which we scan every time we need to look up or update a * scan position. The whole mechanism is only applied for tables exceeding * a threshold size (but that is not the concern of this module). @@ -243,7 +243,7 @@ ss_search(RelFileNode relfilenode, BlockNumber location, bool set) * relation, or 0 if no valid location is found. * * We expect the caller has just done RelationGetNumberOfBlocks(), and - * so that number is passed in rather than computing it again. The result + * so that number is passed in rather than computing it again. The result * is guaranteed less than relnblocks (assuming that's > 0). */ BlockNumber diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index dde74d47978..4adfe8217bd 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -53,11 +53,11 @@ static struct varlena *toast_fetch_datum(struct varlena * attr); static struct varlena *toast_fetch_datum_slice(struct varlena * attr, int32 sliceoffset, int32 length); static int toast_open_indexes(Relation toastrel, - LOCKMODE lock, - Relation **toastidxs, - int *num_indexes); + LOCKMODE lock, + Relation **toastidxs, + int *num_indexes); static void toast_close_indexes(Relation *toastidxs, int num_indexes, - LOCKMODE lock); + LOCKMODE lock); /* ---------- @@ -91,8 +91,9 @@ heap_tuple_fetch_attr(struct varlena * attr) * to persist a Datum for unusually long time, like in a HOLD cursor. */ struct varatt_indirect redirect; + VARATT_EXTERNAL_GET_POINTER(redirect, attr); - attr = (struct varlena *)redirect.pointer; + attr = (struct varlena *) redirect.pointer; /* nested indirect Datums aren't allowed */ Assert(!VARATT_IS_EXTERNAL_INDIRECT(attr)); @@ -147,8 +148,9 @@ heap_tuple_untoast_attr(struct varlena * attr) else if (VARATT_IS_EXTERNAL_INDIRECT(attr)) { struct varatt_indirect redirect; + VARATT_EXTERNAL_GET_POINTER(redirect, attr); - attr = (struct varlena *)redirect.pointer; + attr = (struct varlena *) redirect.pointer; /* nested indirect Datums aren't allowed */ Assert(!VARATT_IS_EXTERNAL_INDIRECT(attr)); @@ -217,6 +219,7 @@ heap_tuple_untoast_attr_slice(struct varlena * attr, else if (VARATT_IS_EXTERNAL_INDIRECT(attr)) { struct varatt_indirect redirect; + VARATT_EXTERNAL_GET_POINTER(redirect, attr); /* nested indirect Datums aren't allowed */ @@ -299,6 +302,7 @@ toast_raw_datum_size(Datum value) else if (VARATT_IS_EXTERNAL_INDIRECT(attr)) { struct varatt_indirect toast_pointer; + VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr); /* nested indirect Datums aren't allowed */ @@ -354,6 +358,7 @@ toast_datum_size(Datum value) else if (VARATT_IS_EXTERNAL_INDIRECT(attr)) { struct varatt_indirect toast_pointer; + VARATT_EXTERNAL_GET_POINTER(toast_pointer, attr); /* nested indirect Datums aren't allowed */ @@ -597,7 +602,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, * We took care of UPDATE above, so any external value we find * still in the tuple must be someone else's we cannot reuse. * Fetch it back (without decompression, unless we are forcing - * PLAIN storage). If necessary, we'll push it out as a new + * PLAIN storage). If necessary, we'll push it out as a new * external value below. */ if (VARATT_IS_EXTERNAL(new_value)) @@ -740,7 +745,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, /* * Second we look for attributes of attstorage 'x' or 'e' that are still - * inline. But skip this if there's no toast table to push them to. + * inline. But skip this if there's no toast table to push them to. */ while (heap_compute_data_size(tupleDesc, toast_values, toast_isnull) > maxDataLen && @@ -850,7 +855,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, } /* - * Finally we store attributes of type 'm' externally. At this point we + * Finally we store attributes of type 'm' externally. At this point we * increase the target tuple size, so that 'm' attributes aren't stored * externally unless really necessary. */ @@ -1438,7 +1443,7 @@ toast_save_datum(Relation rel, Datum value, * those versions could easily reference the same toast value. * When we copy the second or later version of such a row, * reusing the OID will mean we select an OID that's already - * in the new toast table. Check for that, and if so, just + * in the new toast table. Check for that, and if so, just * fall through without writing the data again. * * While annoying and ugly-looking, this is a good thing @@ -1467,7 +1472,7 @@ toast_save_datum(Relation rel, Datum value, { toast_pointer.va_valueid = GetNewOidWithIndex(toastrel, - RelationGetRelid(toastidxs[validIndex]), + RelationGetRelid(toastidxs[validIndex]), (AttrNumber) 1); } while (toastid_valueid_exists(rel->rd_toastoid, toast_pointer.va_valueid)); @@ -1488,7 +1493,7 @@ toast_save_datum(Relation rel, Datum value, */ while (data_todo > 0) { - int i; + int i; /* * Calculate the size of this chunk @@ -1506,7 +1511,7 @@ toast_save_datum(Relation rel, Datum value, heap_insert(toastrel, toasttup, mycid, options, NULL); /* - * Create the index entry. We cheat a little here by not using + * Create the index entry. We cheat a little here by not using * FormIndexDatum: this relies on the knowledge that the index columns * are the same as the initial columns of the table for all the * indexes. @@ -1656,8 +1661,8 @@ toastrel_valueid_exists(Relation toastrel, Oid valueid) * Is there any such chunk? */ toastscan = systable_beginscan(toastrel, - RelationGetRelid(toastidxs[validIndex]), - true, SnapshotToast, 1, &toastkey); + RelationGetRelid(toastidxs[validIndex]), + true, SnapshotToast, 1, &toastkey); if (systable_getnext(toastscan) != NULL) result = true; @@ -2126,7 +2131,8 @@ toast_open_indexes(Relation toastrel, /* Fetch the first valid index in list */ for (i = 0; i < *num_indexes; i++) { - Relation toastidx = (*toastidxs)[i]; + Relation toastidx = (*toastidxs)[i]; + if (toastidx->rd_index->indisvalid) { res = i; @@ -2136,14 +2142,14 @@ toast_open_indexes(Relation toastrel, } /* - * Free index list, not necessary anymore as relations are opened - * and a valid index has been found. + * Free index list, not necessary anymore as relations are opened and a + * valid index has been found. */ list_free(indexlist); /* - * The toast relation should have one valid index, so something is - * going wrong if there is nothing. + * The toast relation should have one valid index, so something is going + * wrong if there is nothing. */ if (!found) elog(ERROR, "no valid index found for toast relation with Oid %d", @@ -2161,7 +2167,7 @@ toast_open_indexes(Relation toastrel, static void toast_close_indexes(Relation *toastidxs, int num_indexes, LOCKMODE lock) { - int i; + int i; /* Close relations and clean up things */ for (i = 0; i < num_indexes; i++) diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c index 899ffacf1e9..a0c0c7f2a6b 100644 --- a/src/backend/access/heap/visibilitymap.c +++ b/src/backend/access/heap/visibilitymap.c @@ -27,7 +27,7 @@ * the sense that we make sure that whenever a bit is set, we know the * condition is true, but if a bit is not set, it might or might not be true. * - * Clearing a visibility map bit is not separately WAL-logged. The callers + * Clearing a visibility map bit is not separately WAL-logged. The callers * must make sure that whenever a bit is cleared, the bit is cleared on WAL * replay of the updating operation as well. * @@ -36,9 +36,9 @@ * it may still be the case that every tuple on the page is visible to all * transactions; we just don't know that for certain. The difficulty is that * there are two bits which are typically set together: the PD_ALL_VISIBLE bit - * on the page itself, and the visibility map bit. If a crash occurs after the + * on the page itself, and the visibility map bit. If a crash occurs after the * visibility map page makes it to disk and before the updated heap page makes - * it to disk, redo must set the bit on the heap page. Otherwise, the next + * it to disk, redo must set the bit on the heap page. Otherwise, the next * insert, update, or delete on the heap page will fail to realize that the * visibility map bit must be cleared, possibly causing index-only scans to * return wrong answers. @@ -59,10 +59,10 @@ * the buffer lock over any I/O that may be required to read in the visibility * map page. To avoid this, we examine the heap page before locking it; * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map - * bit. Then, we lock the buffer. But this creates a race condition: there + * bit. Then, we lock the buffer. But this creates a race condition: there * is a possibility that in the time it takes to lock the buffer, the * PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the - * buffer, pin the visibility map page, and relock the buffer. This shouldn't + * buffer, pin the visibility map page, and relock the buffer. This shouldn't * happen often, because only VACUUM currently sets visibility map bits, * and the race will only occur if VACUUM processes a given page at almost * exactly the same time that someone tries to further modify it. @@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf) * visibilitymap_set - set a bit on a previously pinned page * * recptr is the LSN of the XLOG record we're replaying, if we're in recovery, - * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the + * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the * one provided; in normal running, we generate a new XLOG record and set the - * page LSN to that value. cutoff_xid is the largest xmin on the page being + * page LSN to that value. cutoff_xid is the largest xmin on the page being * marked all-visible; it is needed for Hot Standby, and can be * InvalidTransactionId if the page contains no tuples. * @@ -320,10 +320,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, * releasing *buf after it's done testing and setting bits. * * NOTE: This function is typically called without a lock on the heap page, - * so somebody else could change the bit just after we look at it. In fact, + * so somebody else could change the bit just after we look at it. In fact, * since we don't lock the visibility map page either, it's even possible that * someone else could have changed the bit just before we look at it, but yet - * we might see the old value. It is the caller's responsibility to deal with + * we might see the old value. It is the caller's responsibility to deal with * all concurrency issues! */ bool @@ -526,7 +526,7 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend) /* * We might not have opened the relation at the smgr level yet, or we - * might have been forced to close it by a sinval message. The code below + * might have been forced to close it by a sinval message. The code below * won't necessarily notice relation extension immediately when extend = * false, so we rely on sinval messages to ensure that our ideas about the * size of the map aren't too far out of date. diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 50cb92a47b4..850008b3407 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -45,7 +45,7 @@ * * At the end of a scan, the AM's endscan routine undoes the locking, * but does *not* call IndexScanEnd --- the higher-level index_endscan - * routine does that. (We can't do it in the AM because index_endscan + * routine does that. (We can't do it in the AM because index_endscan * still needs to touch the IndexScanDesc after calling the AM.) * * Because of this, the AM does not have a choice whether to call @@ -79,7 +79,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys) scan->heapRelation = NULL; /* may be set later */ scan->indexRelation = indexRelation; - scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */ + scan->xs_snapshot = InvalidSnapshot; /* caller must initialize this */ scan->numberOfKeys = nkeys; scan->numberOfOrderBys = norderbys; @@ -188,7 +188,7 @@ BuildIndexValueDescription(Relation indexRelation, * at rd_opcintype not the index tupdesc. * * Note: this is a bit shaky for opclasses that have pseudotype - * input types such as ANYARRAY or RECORD. Currently, the + * input types such as ANYARRAY or RECORD. Currently, the * typoutput functions associated with the pseudotypes will work * okay, but we might have to try harder in future. */ @@ -269,7 +269,7 @@ systable_beginscan(Relation heapRelation, if (snapshot == NULL) { - Oid relid = RelationGetRelid(heapRelation); + Oid relid = RelationGetRelid(heapRelation); snapshot = RegisterSnapshot(GetCatalogSnapshot(relid)); sysscan->snapshot = snapshot; @@ -442,7 +442,7 @@ systable_endscan(SysScanDesc sysscan) * index order. Also, for largely historical reasons, the index to use * is opened and locked by the caller, not here. * - * Currently we do not support non-index-based scans here. (In principle + * Currently we do not support non-index-based scans here. (In principle * we could do a heapscan and sort, but the uses are in places that * probably don't need to still work with corrupted catalog indexes.) * For the moment, therefore, these functions are merely the thinnest of @@ -475,7 +475,7 @@ systable_beginscan_ordered(Relation heapRelation, if (snapshot == NULL) { - Oid relid = RelationGetRelid(heapRelation); + Oid relid = RelationGetRelid(heapRelation); snapshot = RegisterSnapshot(GetCatalogSnapshot(relid)); sysscan->snapshot = snapshot; diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c index a4b5f3d698e..53cf96fc103 100644 --- a/src/backend/access/index/indexam.c +++ b/src/backend/access/index/indexam.c @@ -84,7 +84,7 @@ * * Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there * to check that we don't try to scan or do retail insertions into an index - * that is currently being rebuilt or pending rebuild. This helps to catch + * that is currently being rebuilt or pending rebuild. This helps to catch * things that don't work when reindexing system catalogs. The assertion * doesn't prevent the actual rebuild because we don't use RELATION_CHECKS * when calling the index AM's ambuild routine, and there is no reason for @@ -149,7 +149,7 @@ static IndexScanDesc index_beginscan_internal(Relation indexRelation, * index_open - open an index relation by relation OID * * If lockmode is not "NoLock", the specified kind of lock is - * obtained on the index. (Generally, NoLock should only be + * obtained on the index. (Generally, NoLock should only be * used if the caller knows it has some appropriate lock on the * index already.) * @@ -414,7 +414,7 @@ index_markpos(IndexScanDesc scan) * returnable tuple in each HOT chain, and so restoring the prior state at the * granularity of the index AM is sufficient. Since the only current user * of mark/restore functionality is nodeMergejoin.c, this effectively means - * that merge-join plans only work for MVCC snapshots. This could be fixed + * that merge-join plans only work for MVCC snapshots. This could be fixed * if necessary, but for now it seems unimportant. * ---------------- */ @@ -553,7 +553,7 @@ index_fetch_heap(IndexScanDesc scan) /* * If we scanned a whole HOT chain and found only dead tuples, tell index * AM to kill its entry for that TID (this will take effect in the next - * amgettuple call, in index_getnext_tid). We do not do this when in + * amgettuple call, in index_getnext_tid). We do not do this when in * recovery because it may violate MVCC to do so. See comments in * RelationGetIndexScan(). */ @@ -590,7 +590,7 @@ index_getnext(IndexScanDesc scan, ScanDirection direction) { /* * We are resuming scan of a HOT chain after having returned an - * earlier member. Must still hold pin on current heap page. + * earlier member. Must still hold pin on current heap page. */ Assert(BufferIsValid(scan->xs_cbuf)); Assert(ItemPointerGetBlockNumber(&scan->xs_ctup.t_self) == @@ -760,7 +760,7 @@ index_can_return(Relation indexRelation) * particular indexed attribute are those with both types equal to * the index opclass' opcintype (note that this is subtly different * from the indexed attribute's own type: it may be a binary-compatible - * type instead). Only the default functions are stored in relcache + * type instead). Only the default functions are stored in relcache * entries --- access methods can use the syscache to look up non-default * functions. * @@ -794,7 +794,7 @@ index_getprocid(Relation irel, * index_getprocinfo * * This routine allows index AMs to keep fmgr lookup info for - * support procs in the relcache. As above, only the "default" + * support procs in the relcache. As above, only the "default" * functions for any particular indexed attribute are cached. * * Note: the return value points into cached data that will be lost during diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c index 86ac7d3ec21..b1f9ae36850 100644 --- a/src/backend/access/nbtree/nbtcompare.c +++ b/src/backend/access/nbtree/nbtcompare.c @@ -25,7 +25,7 @@ * Although any negative int32 (except INT_MIN) is acceptable for reporting * "<", and any positive int32 is acceptable for reporting ">", routines * that work on 32-bit or wider datatypes can't just return "a - b". - * That could overflow and give the wrong answer. Also, one must not + * That could overflow and give the wrong answer. Also, one must not * return INT_MIN to report "<", since some callers will negate the result. * * NOTE: it is critical that the comparison function impose a total order diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 0d806af5055..d64cbd98223 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -90,7 +90,7 @@ static void _bt_vacuum_one_page(Relation rel, Buffer buffer, Relation heapRel); * By here, itup is filled in, including the TID. * * If checkUnique is UNIQUE_CHECK_NO or UNIQUE_CHECK_PARTIAL, this - * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or + * will allow duplicates. Otherwise (UNIQUE_CHECK_YES or * UNIQUE_CHECK_EXISTING) it will throw error for a duplicate. * For UNIQUE_CHECK_EXISTING we merely run the duplicate check, and * don't actually insert. @@ -129,7 +129,7 @@ top: * If the page was split between the time that we surrendered our read * lock and acquired our write lock, then this page may no longer be the * right place for the key we want to insert. In this case, we need to - * move right in the tree. See Lehman and Yao for an excruciatingly + * move right in the tree. See Lehman and Yao for an excruciatingly * precise description. */ buf = _bt_moveright(rel, buf, natts, itup_scankey, false, @@ -211,7 +211,7 @@ top: * is the first tuple on the next page. * * Returns InvalidTransactionId if there is no conflict, else an xact ID - * we must wait for to see if it commits a conflicting tuple. If an actual + * we must wait for to see if it commits a conflicting tuple. If an actual * conflict is detected, no return --- just ereport(). * * However, if checkUnique == UNIQUE_CHECK_PARTIAL, we always return @@ -293,7 +293,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, /* * If we are doing a recheck, we expect to find the tuple we - * are rechecking. It's not a duplicate, but we have to keep + * are rechecking. It's not a duplicate, but we have to keep * scanning. */ if (checkUnique == UNIQUE_CHECK_EXISTING && @@ -482,7 +482,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * If the new key is equal to one or more existing keys, we can * legitimately place it anywhere in the series of equal keys --- in fact, * if the new key is equal to the page's "high key" we can place it on - * the next page. If it is equal to the high key, and there's not room + * the next page. If it is equal to the high key, and there's not room * to insert the new tuple on the current page without splitting, then * we can move right hoping to find more free space and avoid a split. * (We should not move right indefinitely, however, since that leads to @@ -494,7 +494,7 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel, * removing any LP_DEAD tuples. * * On entry, *buf and *offsetptr point to the first legal position - * where the new tuple could be inserted. The caller should hold an + * where the new tuple could be inserted. The caller should hold an * exclusive lock on *buf. *offsetptr can also be set to * InvalidOffsetNumber, in which case the function will search for the * right location within the page if needed. On exit, they point to the @@ -564,7 +564,7 @@ _bt_findinsertloc(Relation rel, * on every insert. We implement "get tired" as a random choice, * since stopping after scanning a fixed number of pages wouldn't work * well (we'd never reach the right-hand side of previously split - * pages). Currently the probability of moving right is set at 0.99, + * pages). Currently the probability of moving right is set at 0.99, * which may seem too high to change the behavior much, but it does an * excellent job of preventing O(N^2) behavior with many equal keys. *---------- @@ -574,7 +574,7 @@ _bt_findinsertloc(Relation rel, while (PageGetFreeSpace(page) < itemsz) { Buffer rbuf; - BlockNumber rblkno; + BlockNumber rblkno; /* * before considering moving right, see if we can obtain enough space @@ -620,10 +620,10 @@ _bt_findinsertloc(Relation rel, lpageop = (BTPageOpaque) PageGetSpecialPointer(page); /* - * If this page was incompletely split, finish the split now. - * We do this while holding a lock on the left sibling, which - * is not good because finishing the split could be a fairly - * lengthy operation. But this should happen very seldom. + * If this page was incompletely split, finish the split now. We + * do this while holding a lock on the left sibling, which is not + * good because finishing the split could be a fairly lengthy + * operation. But this should happen very seldom. */ if (P_INCOMPLETE_SPLIT(lpageop)) { @@ -681,7 +681,7 @@ _bt_findinsertloc(Relation rel, * + updates the metapage if a true root or fast root is split. * * On entry, we must have the correct buffer in which to do the - * insertion, and the buffer must be pinned and write-locked. On return, + * insertion, and the buffer must be pinned and write-locked. On return, * we will have dropped both the pin and the lock on the buffer. * * When inserting to a non-leaf page, 'cbuf' is the left-sibling of the @@ -978,7 +978,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, * origpage is the original page to be split. leftpage is a temporary * buffer that receives the left-sibling data, which will be copied back * into origpage on success. rightpage is the new page that receives the - * right-sibling data. If we fail before reaching the critical section, + * right-sibling data. If we fail before reaching the critical section, * origpage hasn't been modified and leftpage is only workspace. In * principle we shouldn't need to worry about rightpage either, because it * hasn't been linked into the btree page structure; but to avoid leaving @@ -1196,7 +1196,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, * page. If you're confused, imagine that page A splits to A B and * then again, yielding A C B, while vacuum is in progress. Tuples * originally in A could now be in either B or C, hence vacuum must - * examine both pages. But if D, our right sibling, has a different + * examine both pages. But if D, our right sibling, has a different * cycleid then it could not contain any tuples that were in A when * the vacuum started. */ @@ -1330,11 +1330,10 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, lastrdata++; /* - * Although we don't need to WAL-log anything on the left page, - * we still need XLogInsert to consider storing a full-page image - * of the left page, so make an empty entry referencing that - * buffer. This also ensures that the left page is always backup - * block 1. + * Although we don't need to WAL-log anything on the left page, we + * still need XLogInsert to consider storing a full-page image of + * the left page, so make an empty entry referencing that buffer. + * This also ensures that the left page is always backup block 1. */ lastrdata->data = NULL; lastrdata->len = 0; @@ -1448,7 +1447,7 @@ _bt_split(Relation rel, Buffer buf, Buffer cbuf, OffsetNumber firstright, * * We return the index of the first existing tuple that should go on the * righthand page, plus a boolean indicating whether the new tuple goes on - * the left or right page. The bool is necessary to disambiguate the case + * the left or right page. The bool is necessary to disambiguate the case * where firstright == newitemoff. */ static OffsetNumber @@ -1684,7 +1683,7 @@ _bt_checksplitloc(FindSplitData *state, * * On entry, buf and rbuf are the left and right split pages, which we * still hold write locks on per the L&Y algorithm. We release the - * write locks once we have write lock on the parent page. (Any sooner, + * write locks once we have write lock on the parent page. (Any sooner, * and it'd be possible for some other process to try to split or delete * one of these pages, and get confused because it cannot find the downlink.) * @@ -1705,7 +1704,7 @@ _bt_insert_parent(Relation rel, * Here we have to do something Lehman and Yao don't talk about: deal with * a root split and construction of a new root. If our stack is empty * then we have just split a node on what had been the root level when we - * descended the tree. If it was still the root then we perform a + * descended the tree. If it was still the root then we perform a * new-root construction. If it *wasn't* the root anymore, search to find * the next higher level that someone constructed meanwhile, and find the * right place to insert as for the normal case. @@ -1917,7 +1916,7 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access) /* * These loops will check every item on the page --- but in an * order that's attuned to the probability of where it actually - * is. Scan to the right first, then to the left. + * is. Scan to the right first, then to the left. */ for (offnum = start; offnum <= maxoff; diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index c0ebb95ba8a..d357b33bc05 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -12,7 +12,7 @@ * src/backend/access/nbtree/nbtpage.c * * NOTES - * Postgres btree pages look like ordinary relation pages. The opaque + * Postgres btree pages look like ordinary relation pages. The opaque * data at high addresses includes pointers to left and right siblings * and flag data describing page state. The first page in a btree, page * zero, is special -- it stores meta-information describing the tree. @@ -36,7 +36,7 @@ static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, static bool _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack, Buffer *topparent, OffsetNumber *topoff, BlockNumber *target, BlockNumber *rightsib); -static void _bt_log_reuse_page(Relation rel, BlockNumber blkno, +static void _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedXid); /* @@ -62,7 +62,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level) metaopaque->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not essential + * Set pd_lower just past the end of the metadata. This is not essential * but it makes the page look compressible to xlog.c. */ ((PageHeader) page)->pd_lower = @@ -80,7 +80,7 @@ _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level) * * The access type parameter (BT_READ or BT_WRITE) controls whether * a new root page will be created or not. If access = BT_READ, - * and no root page exists, we just return InvalidBuffer. For + * and no root page exists, we just return InvalidBuffer. For * BT_WRITE, we try to create the root page if it doesn't exist. * NOTE that the returned root page will have only a read lock set * on it even if access = BT_WRITE! @@ -197,7 +197,7 @@ _bt_getroot(Relation rel, int access) /* * Metadata initialized by someone else. In order to guarantee no * deadlocks, we have to release the metadata page and start all - * over again. (Is that really true? But it's hardly worth trying + * over again. (Is that really true? But it's hardly worth trying * to optimize this case.) */ _bt_relbuf(rel, metabuf); @@ -254,7 +254,7 @@ _bt_getroot(Relation rel, int access) END_CRIT_SECTION(); /* - * swap root write lock for read lock. There is no danger of anyone + * swap root write lock for read lock. There is no danger of anyone * else accessing the new root page while it's unlocked, since no one * else knows where it is yet. */ @@ -322,7 +322,7 @@ _bt_getroot(Relation rel, int access) * By the time we acquire lock on the root page, it might have been split and * not be the true root anymore. This is okay for the present uses of this * routine; we only really need to be able to move up at least one tree level - * from whatever non-root page we were at. If we ever do need to lock the + * from whatever non-root page we were at. If we ever do need to lock the * one true root page, we could loop here, re-reading the metapage on each * failure. (Note that it wouldn't do to hold the lock on the metapage while * moving to the root --- that'd deadlock against any concurrent root split.) @@ -497,7 +497,7 @@ _bt_checkpage(Relation rel, Buffer buf) /* * ReadBuffer verifies that every newly-read page passes * PageHeaderIsValid, which means it either contains a reasonably sane - * page header or is all-zero. We have to defend against the all-zero + * page header or is all-zero. We have to defend against the all-zero * case, however. */ if (PageIsNew(page)) @@ -564,7 +564,7 @@ _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedX /* * _bt_getbuf() -- Get a buffer by block number for read or write. * - * blkno == P_NEW means to get an unallocated index page. The page + * blkno == P_NEW means to get an unallocated index page. The page * will be initialized before returning it. * * When this routine returns, the appropriate lock is set on the @@ -595,7 +595,7 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access) * First see if the FSM knows of any free pages. * * We can't trust the FSM's report unreservedly; we have to check that - * the page is still free. (For example, an already-free page could + * the page is still free. (For example, an already-free page could * have been re-used between the time the last VACUUM scanned it and * the time the VACUUM made its FSM updates.) * @@ -774,7 +774,7 @@ _bt_page_recyclable(Page page) /* * Delete item(s) from a btree page during VACUUM. * - * This must only be used for deleting leaf items. Deleting an item on a + * This must only be used for deleting leaf items. Deleting an item on a * non-leaf page has to be done as part of an atomic action that includes * deleting the page it points to. * @@ -842,7 +842,7 @@ _bt_delitems_vacuum(Relation rel, Buffer buf, /* * The target-offsets array is not in the buffer, but pretend that it - * is. When XLogInsert stores the whole buffer, the offsets array + * is. When XLogInsert stores the whole buffer, the offsets array * need not be stored too. */ if (nitems > 0) @@ -1049,11 +1049,12 @@ _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack, lbuf = _bt_getbuf(rel, leftsib, BT_READ); lpage = BufferGetPage(lbuf); lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage); + /* * If the left sibling was concurrently split, so that its - * next-pointer doesn't point to the current page anymore, - * the split that created the current page must be completed. - * (We don't allow splitting an incompletely split page again + * next-pointer doesn't point to the current page anymore, the + * split that created the current page must be completed. (We + * don't allow splitting an incompletely split page again * until the previous split has been completed) */ if (lopaque->btpo_next == parent && @@ -1066,7 +1067,7 @@ _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack, } return _bt_lock_branch_parent(rel, parent, stack->bts_parent, - topparent, topoff, target, rightsib); + topparent, topoff, target, rightsib); } else { @@ -1112,6 +1113,7 @@ _bt_pagedel(Relation rel, Buffer buf) bool rightsib_empty; Page page; BTPageOpaque opaque; + /* * "stack" is a search stack leading (approximately) to the target page. * It is initially NULL, but when iterating, we keep it to avoid @@ -1140,24 +1142,24 @@ _bt_pagedel(Relation rel, Buffer buf) * was never supposed to leave half-dead pages in the tree, it was * just a transient state, but it was nevertheless possible in * error scenarios. We don't know how to deal with them here. They - * are harmless as far as searches are considered, but inserts into - * the deleted keyspace could add out-of-order downlinks in the - * upper levels. Log a notice, hopefully the admin will notice and - * reindex. + * are harmless as far as searches are considered, but inserts + * into the deleted keyspace could add out-of-order downlinks in + * the upper levels. Log a notice, hopefully the admin will notice + * and reindex. */ if (P_ISHALFDEAD(opaque)) ereport(LOG, (errcode(ERRCODE_INDEX_CORRUPTED), - errmsg("index \"%s\" contains a half-dead internal page", - RelationGetRelationName(rel)), + errmsg("index \"%s\" contains a half-dead internal page", + RelationGetRelationName(rel)), errhint("This can be caused by an interrupt VACUUM in version 9.3 or older, before upgrade. Please REINDEX it."))); _bt_relbuf(rel, buf); return ndeleted; } /* - * We can never delete rightmost pages nor root pages. While at - * it, check that page is not already deleted and is empty. + * We can never delete rightmost pages nor root pages. While at it, + * check that page is not already deleted and is empty. * * To keep the algorithm simple, we also never delete an incompletely * split page (they should be rare enough that this doesn't make any @@ -1167,10 +1169,10 @@ _bt_pagedel(Relation rel, Buffer buf) * left half of an incomplete split, but ensuring that it's not the * right half is more complicated. For that, we have to check that * the left sibling doesn't have its INCOMPLETE_SPLIT flag set. On - * the first iteration, we temporarily release the lock on the - * current page, and check the left sibling and also construct a - * search stack to. On subsequent iterations, we know we stepped right - * from a page that passed these tests, so it's OK. + * the first iteration, we temporarily release the lock on the current + * page, and check the left sibling and also construct a search stack + * to. On subsequent iterations, we know we stepped right from a page + * that passed these tests, so it's OK. */ if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) || P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) || @@ -1184,9 +1186,9 @@ _bt_pagedel(Relation rel, Buffer buf) } /* - * First, remove downlink pointing to the page (or a parent of the page, - * if we are going to delete a taller branch), and mark the page as - * half-dead. + * First, remove downlink pointing to the page (or a parent of the + * page, if we are going to delete a taller branch), and mark the page + * as half-dead. */ if (!P_ISHALFDEAD(opaque)) { @@ -1205,7 +1207,7 @@ _bt_pagedel(Relation rel, Buffer buf) ItemId itemid; IndexTuple targetkey; Buffer lbuf; - BlockNumber leftsib; + BlockNumber leftsib; itemid = PageGetItemId(page, P_HIKEY); targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid)); @@ -1219,9 +1221,9 @@ _bt_pagedel(Relation rel, Buffer buf) LockBuffer(buf, BUFFER_LOCK_UNLOCK); /* - * Fetch the left sibling, to check that it's not marked - * with INCOMPLETE_SPLIT flag. That would mean that the - * page to-be-deleted doesn't have a downlink, and the page + * Fetch the left sibling, to check that it's not marked with + * INCOMPLETE_SPLIT flag. That would mean that the page + * to-be-deleted doesn't have a downlink, and the page * deletion algorithm isn't prepared to handle that. */ if (!P_LEFTMOST(opaque)) @@ -1267,7 +1269,7 @@ _bt_pagedel(Relation rel, Buffer buf) /* * Then unlink it from its siblings. Each call to - *_bt_unlink_halfdead_page unlinks the topmost page from the branch, + * _bt_unlink_halfdead_page unlinks the topmost page from the branch, * making it shallower. Iterate until the leaf page is gone. */ rightsib_empty = false; @@ -1291,8 +1293,8 @@ _bt_pagedel(Relation rel, Buffer buf) * is that it was the rightmost child of the parent. Now that we * removed the downlink for this page, the right sibling might now be * the only child of the parent, and could be removed. It would be - * picked up by the next vacuum anyway, but might as well try to remove - * it now, so loop back to process the right sibling. + * picked up by the next vacuum anyway, but might as well try to + * remove it now, so loop back to process the right sibling. */ if (!rightsib_empty) break; @@ -1310,9 +1312,9 @@ _bt_pagedel(Relation rel, Buffer buf) static bool _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) { - BlockNumber leafblkno; + BlockNumber leafblkno; BlockNumber leafrightsib; - BlockNumber target; + BlockNumber target; BlockNumber rightsib; ItemId itemid; Page page; @@ -1351,7 +1353,7 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack) /* * Check that the parent-page index items we're about to delete/overwrite - * contain what we expect. This can fail if the index has become corrupt + * contain what we expect. This can fail if the index has become corrupt * for some reason. We want to throw any error before entering the * critical section --- otherwise it'd be a PANIC. * @@ -1490,9 +1492,9 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) BlockNumber leafblkno = BufferGetBlockNumber(leafbuf); BlockNumber leafleftsib; BlockNumber leafrightsib; - BlockNumber target; - BlockNumber leftsib; - BlockNumber rightsib; + BlockNumber target; + BlockNumber leftsib; + BlockNumber rightsib; Buffer lbuf = InvalidBuffer; Buffer buf; Buffer rbuf; @@ -1506,7 +1508,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) int targetlevel; ItemPointer leafhikey; BlockNumber nextchild; - BlockNumber topblkno; + BlockNumber topblkno; page = BufferGetPage(leafbuf); opaque = (BTPageOpaque) PageGetSpecialPointer(page); @@ -1596,7 +1598,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) lbuf = InvalidBuffer; /* - * Next write-lock the target page itself. It should be okay to take just + * Next write-lock the target page itself. It should be okay to take just * a write lock not a superexclusive lock, since no scans would stop on an * empty page. */ @@ -1605,9 +1607,9 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* - * Check page is still empty etc, else abandon deletion. This is just - * for paranoia's sake; a half-dead page cannot resurrect because there - * can be only one vacuum process running at a time. + * Check page is still empty etc, else abandon deletion. This is just for + * paranoia's sake; a half-dead page cannot resurrect because there can be + * only one vacuum process running at a time. */ if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque)) { @@ -1733,7 +1735,7 @@ _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty) * we're in VACUUM and would not otherwise have an XID. Having already * updated links to the target, ReadNewTransactionId() suffices as an * upper bound. Any scan having retained a now-stale link is advertising - * in its PGXACT an xmin less than or equal to the value we read here. It + * in its PGXACT an xmin less than or equal to the value we read here. It * will continue to do so, holding back RecentGlobalXmin, for the duration * of that scan. */ diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 542ed439843..36dc6c278ea 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -208,7 +208,7 @@ btbuildempty(PG_FUNCTION_ARGS) metapage = (Page) palloc(BLCKSZ); _bt_initmetapage(metapage, P_NONE, 0); - /* Write the page. If archiving/streaming, XLOG it. */ + /* Write the page. If archiving/streaming, XLOG it. */ PageSetChecksumInplace(metapage, BTREE_METAPAGE); smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE, (char *) metapage, true); @@ -427,7 +427,7 @@ btbeginscan(PG_FUNCTION_ARGS) /* * We don't know yet whether the scan will be index-only, so we do not - * allocate the tuple workspace arrays until btrescan. However, we set up + * allocate the tuple workspace arrays until btrescan. However, we set up * scan->xs_itupdesc whether we'll need it or not, since that's so cheap. */ so->currTuples = so->markTuples = NULL; @@ -472,7 +472,7 @@ btrescan(PG_FUNCTION_ARGS) /* * Allocate tuple workspace arrays, if needed for an index-only scan and - * not already done in a previous rescan call. To save on palloc + * not already done in a previous rescan call. To save on palloc * overhead, both workspaces are allocated as one palloc block; only this * function and btendscan know that. * @@ -952,7 +952,7 @@ restart: vstate->lastBlockLocked = blkno; /* - * Check whether we need to recurse back to earlier pages. What we + * Check whether we need to recurse back to earlier pages. What we * are concerned about is a page split that happened since we started * the vacuum scan. If the split moved some tuples to a lower page * then we might have missed 'em. If so, set up for tail recursion. diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 0bf12f0e107..203b9691baa 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -50,7 +50,7 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir); * * NOTE that the returned buffer is read-locked regardless of the access * parameter. However, access = BT_WRITE will allow an empty root page - * to be created and returned. When access = BT_READ, an empty index + * to be created and returned. When access = BT_READ, an empty index * will result in *bufP being set to InvalidBuffer. Also, in BT_WRITE mode, * any incomplete splits encountered during the search will be finished. */ @@ -271,7 +271,7 @@ _bt_moveright(Relation rel, * (or leaf keys > given scankey when nextkey is true). * * This procedure is not responsible for walking right, it just examines - * the given page. _bt_binsrch() has no lock or refcount side effects + * the given page. _bt_binsrch() has no lock or refcount side effects * on the buffer. */ OffsetNumber @@ -403,7 +403,7 @@ _bt_compare(Relation rel, /* * The scan key is set up with the attribute number associated with each * term in the key. It is important that, if the index is multi-key, the - * scan contain the first k key attributes, and that they be in order. If + * scan contain the first k key attributes, and that they be in order. If * you think about how multi-key ordering works, you'll understand why * this is. * @@ -442,7 +442,7 @@ _bt_compare(Relation rel, /* * The sk_func needs to be passed the index value as left arg and * the sk_argument as right arg (they might be of different - * types). Since it is convenient for callers to think of + * types). Since it is convenient for callers to think of * _bt_compare as comparing the scankey to the index item, we have * to flip the sign of the comparison result. (Unless it's a DESC * column, in which case we *don't* flip the sign.) @@ -471,7 +471,7 @@ _bt_compare(Relation rel, * _bt_first() -- Find the first item in a scan. * * We need to be clever about the direction of scan, the search - * conditions, and the tree ordering. We find the first item (or, + * conditions, and the tree ordering. We find the first item (or, * if backwards scan, the last item) in the tree that satisfies the * qualifications in the scan key. On success exit, the page containing * the current index tuple is pinned but not locked, and data about @@ -527,7 +527,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * We want to identify the keys that can be used as starting boundaries; * these are =, >, or >= keys for a forward scan or =, <, <= keys for * a backwards scan. We can use keys for multiple attributes so long as - * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept + * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept * a > or < boundary or find an attribute with no boundary (which can be * thought of as the same as "> -infinity"), we can't use keys for any * attributes to its right, because it would break our simplistic notion @@ -742,7 +742,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * even if the row comparison is of ">" or "<" type, because the * condition applied to all but the last row member is effectively * ">=" or "<=", and so the extra keys don't break the positioning - * scheme. But, by the same token, if we aren't able to use all + * scheme. But, by the same token, if we aren't able to use all * the row members, then the part of the row comparison that we * did use has to be treated as just a ">=" or "<=" condition, and * so we'd better adjust strat_total accordingly. @@ -861,7 +861,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) /* * Find first item >= scankey, then back up one to arrive at last - * item < scankey. (Note: this positioning strategy is only used + * item < scankey. (Note: this positioning strategy is only used * for a backward scan, so that is always the correct starting * position.) */ @@ -910,7 +910,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) case BTGreaterEqualStrategyNumber: /* - * Find first item >= scankey. (This is only used for forward + * Find first item >= scankey. (This is only used for forward * scans.) */ nextkey = false; @@ -988,7 +988,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * * The actually desired starting point is either this item or the prior * one, or in the end-of-page case it's the first item on the next page or - * the last item on this page. Adjust the starting offset if needed. (If + * the last item on this page. Adjust the starting offset if needed. (If * this results in an offset before the first item or after the last one, * _bt_readpage will report no items found, and then we'll step to the * next page as needed.) @@ -1304,7 +1304,7 @@ _bt_steppage(IndexScanDesc scan, ScanDirection dir) * than the walk-right case because of the possibility that the page * to our left splits while we are in flight to it, plus the * possibility that the page we were on gets deleted after we leave - * it. See nbtree/README for details. + * it. See nbtree/README for details. */ for (;;) { @@ -1399,7 +1399,7 @@ _bt_walk_left(Relation rel, Buffer buf) * anymore, not that its left sibling got split more than four times. * * Note that it is correct to test P_ISDELETED not P_IGNORE here, - * because half-dead pages are still in the sibling chain. Caller + * because half-dead pages are still in the sibling chain. Caller * must reject half-dead pages if wanted. */ tries = 0; @@ -1425,7 +1425,7 @@ _bt_walk_left(Relation rel, Buffer buf) if (P_ISDELETED(opaque)) { /* - * It was deleted. Move right to first nondeleted page (there + * It was deleted. Move right to first nondeleted page (there * must be one); that is the page that has acquired the deleted * one's keyspace, so stepping left from it will take us where we * want to be. @@ -1469,7 +1469,7 @@ _bt_walk_left(Relation rel, Buffer buf) * _bt_get_endpoint() -- Find the first or last page on a given tree level * * If the index is empty, we will return InvalidBuffer; any other failure - * condition causes ereport(). We will not return a dead page. + * condition causes ereport(). We will not return a dead page. * * The returned buffer is pinned and read-locked. */ diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 9ddc2754997..1281a120c56 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -7,7 +7,7 @@ * * We use tuplesort.c to sort the given index tuples into order. * Then we scan the index tuples in order and build the btree pages - * for each level. We load source tuples into leaf-level pages. + * for each level. We load source tuples into leaf-level pages. * Whenever we fill a page at one level, we add a link to it to its * parent level (starting a new parent level if necessary). When * done, we write out each final page on each level, adding it to @@ -42,11 +42,11 @@ * * Since the index will never be used unless it is completely built, * from a crash-recovery point of view there is no need to WAL-log the - * steps of the build. After completing the index build, we can just sync + * steps of the build. After completing the index build, we can just sync * the whole file to disk using smgrimmedsync() before exiting this module. * This can be seen to be sufficient for crash recovery by considering that * it's effectively equivalent to what would happen if a CHECKPOINT occurred - * just after the index build. However, it is clearly not sufficient if the + * just after the index build. However, it is clearly not sufficient if the * DBA is using the WAL log for PITR or replication purposes, since another * machine would not be able to reconstruct the index from WAL. Therefore, * we log the completed index pages to WAL if and only if WAL archiving is @@ -89,7 +89,7 @@ struct BTSpool }; /* - * Status record for a btree page being built. We have one of these + * Status record for a btree page being built. We have one of these * for each active tree level. * * The reason we need to store a copy of the minimum key is that we'll @@ -160,7 +160,7 @@ _bt_spoolinit(Relation heap, Relation index, bool isunique, bool isdead) * We size the sort area as maintenance_work_mem rather than work_mem to * speed index creation. This should be OK since a single backend can't * run multiple index creations in parallel. Note that creation of a - * unique index actually requires two BTSpool objects. We expect that the + * unique index actually requires two BTSpool objects. We expect that the * second one (for dead tuples) won't get very full, so we give it only * work_mem. */ @@ -298,7 +298,7 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) PageSetChecksumInplace(page, blkno); /* - * Now write the page. There's no need for smgr to schedule an fsync for + * Now write the page. There's no need for smgr to schedule an fsync for * this write; we'll do it ourselves before ending the build. */ if (blkno == wstate->btws_pages_written) @@ -423,14 +423,14 @@ _bt_sortaddtup(Page page, * A leaf page being built looks like: * * +----------------+---------------------------------+ - * | PageHeaderData | linp0 linp1 linp2 ... | + * | PageHeaderData | linp0 linp1 linp2 ... | * +-----------+----+---------------------------------+ * | ... linpN | | * +-----------+--------------------------------------+ * | ^ last | * | | * +-------------+------------------------------------+ - * | | itemN ... | + * | | itemN ... | * +-------------+------------------+-----------------+ * | ... item3 item2 item1 | "special space" | * +--------------------------------+-----------------+ @@ -492,9 +492,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) RelationGetRelationName(wstate->index)))); /* - * Check to see if page is "full". It's definitely full if the item won't + * Check to see if page is "full". It's definitely full if the item won't * fit. Otherwise, compare to the target freespace derived from the - * fillfactor. However, we must put at least two items on each page, so + * fillfactor. However, we must put at least two items on each page, so * disregard fillfactor if we don't have that many. */ if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY)) @@ -567,7 +567,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup) } /* - * Write out the old page. We never need to touch it again, so we can + * Write out the old page. We never need to touch it again, so we can * free the opage workspace too. */ _bt_blwritepage(wstate, opage, oblkno); @@ -804,7 +804,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) /* * If the index is WAL-logged, we must fsync it down to disk before it's - * safe to commit the transaction. (For a non-WAL-logged index we don't + * safe to commit the transaction. (For a non-WAL-logged index we don't * care since the index will be uninteresting after a crash anyway.) * * It's obvious that we must do this when not WAL-logging the build. It's diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 922e6a9cd4e..f8f8e69be7f 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -107,7 +107,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup) * comparison data ultimately used must match the key datatypes. * * The result cannot be used with _bt_compare(), unless comparison - * data is first stored into the key entries. Currently this + * data is first stored into the key entries. Currently this * routine is only called by nbtsort.c and tuplesort.c, which have * their own comparison routines. */ @@ -269,7 +269,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan) continue; /* - * First, deconstruct the array into elements. Anything allocated + * First, deconstruct the array into elements. Anything allocated * here (including a possibly detoasted array value) is in the * workspace context. */ @@ -283,7 +283,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan) &elem_values, &elem_nulls, &num_elems); /* - * Compress out any null elements. We can ignore them since we assume + * Compress out any null elements. We can ignore them since we assume * all btree operators are strict. */ num_nonnulls = 0; @@ -517,7 +517,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg) * _bt_start_array_keys() -- Initialize array keys at start of a scan * * Set up the cur_elem counters and fill in the first sk_argument value for - * each array scankey. We can't do this until we know the scan direction. + * each array scankey. We can't do this until we know the scan direction. */ void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir) @@ -670,8 +670,8 @@ _bt_restore_array_keys(IndexScanDesc scan) * so that the index sorts in the desired direction. * * One key purpose of this routine is to discover which scan keys must be - * satisfied to continue the scan. It also attempts to eliminate redundant - * keys and detect contradictory keys. (If the index opfamily provides + * satisfied to continue the scan. It also attempts to eliminate redundant + * keys and detect contradictory keys. (If the index opfamily provides * incomplete sets of cross-type operators, we may fail to detect redundant * or contradictory keys, but we can survive that.) * @@ -702,7 +702,7 @@ _bt_restore_array_keys(IndexScanDesc scan) * that's the only one returned. (So, we return either a single = key, * or one or two boundary-condition keys for each attr.) However, if we * cannot compare two keys for lack of a suitable cross-type operator, - * we cannot eliminate either. If there are two such keys of the same + * we cannot eliminate either. If there are two such keys of the same * operator strategy, the second one is just pushed into the output array * without further processing here. We may also emit both >/>= or both * </<= keys if we can't compare them. The logic about required keys still @@ -737,7 +737,7 @@ _bt_restore_array_keys(IndexScanDesc scan) * Note: the reason we have to copy the preprocessed scan keys into private * storage is that we are modifying the array based on comparisons of the * key argument values, which could change on a rescan or after moving to - * new elements of array keys. Therefore we can't overwrite the source data. + * new elements of array keys. Therefore we can't overwrite the source data. */ void _bt_preprocess_keys(IndexScanDesc scan) @@ -919,7 +919,7 @@ _bt_preprocess_keys(IndexScanDesc scan) /* * Emit the cleaned-up keys into the outkeys[] array, and then - * mark them if they are required. They are required (possibly + * mark them if they are required. They are required (possibly * only in one direction) if all attrs before this one had "=". */ for (j = BTMaxStrategyNumber; --j >= 0;) @@ -1017,7 +1017,7 @@ _bt_preprocess_keys(IndexScanDesc scan) * and amoplefttype/amoprighttype equal to the two argument datatypes. * * If the opfamily doesn't supply a complete set of cross-type operators we - * may not be able to make the comparison. If we can make the comparison + * may not be able to make the comparison. If we can make the comparison * we store the operator result in *result and return TRUE. We return FALSE * if the comparison could not be made. * @@ -1043,7 +1043,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, StrategyNumber strat; /* - * First, deal with cases where one or both args are NULL. This should + * First, deal with cases where one or both args are NULL. This should * only happen when the scankeys represent IS NULL/NOT NULL conditions. */ if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ISNULL) @@ -1183,7 +1183,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op, * * Lastly, for ordinary scankeys (not IS NULL/NOT NULL), we check for a * NULL comparison value. Since all btree operators are assumed strict, - * a NULL means that the qual cannot be satisfied. We return TRUE if the + * a NULL means that the qual cannot be satisfied. We return TRUE if the * comparison value isn't NULL, or FALSE if the scan should be abandoned. * * This function is applied to the *input* scankey structure; therefore @@ -1212,7 +1212,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption) * --- we can treat IS NULL as an equality operator for purposes of search * strategy. * - * Likewise, "x IS NOT NULL" is supported. We treat that as either "less + * Likewise, "x IS NOT NULL" is supported. We treat that as either "less * than NULL" in a NULLS LAST index, or "greater than NULL" in a NULLS * FIRST index. * @@ -1284,7 +1284,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption) * Mark a scankey as "required to continue the scan". * * Depending on the operator type, the key may be required for both scan - * directions or just one. Also, if the key is a row comparison header, + * directions or just one. Also, if the key is a row comparison header, * we have to mark the appropriate subsidiary ScanKeys as required. In * such cases, the first subsidiary key is required, but subsequent ones * are required only as long as they correspond to successive index columns @@ -1296,7 +1296,7 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption) * scribbling on a data structure belonging to the index AM's caller, not on * our private copy. This should be OK because the marking will not change * from scan to scan within a query, and so we'd just re-mark the same way - * anyway on a rescan. Something to keep an eye on though. + * anyway on a rescan. Something to keep an eye on though. */ static void _bt_mark_scankey_required(ScanKey skey) @@ -1482,7 +1482,7 @@ _bt_checkkeys(IndexScanDesc scan, /* * Since NULLs are sorted before non-NULLs, we know we have * reached the lower limit of the range of values for this - * index attr. On a backward scan, we can stop if this qual + * index attr. On a backward scan, we can stop if this qual * is one of the "must match" subset. We can stop regardless * of whether the qual is > or <, so long as it's required, * because it's not possible for any future tuples to pass. On @@ -1498,8 +1498,8 @@ _bt_checkkeys(IndexScanDesc scan, /* * Since NULLs are sorted after non-NULLs, we know we have * reached the upper limit of the range of values for this - * index attr. On a forward scan, we can stop if this qual is - * one of the "must match" subset. We can stop regardless of + * index attr. On a forward scan, we can stop if this qual is + * one of the "must match" subset. We can stop regardless of * whether the qual is > or <, so long as it's required, * because it's not possible for any future tuples to pass. On * a backward scan, however, we must keep going, because we @@ -1593,7 +1593,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, /* * Since NULLs are sorted before non-NULLs, we know we have * reached the lower limit of the range of values for this - * index attr. On a backward scan, we can stop if this qual + * index attr. On a backward scan, we can stop if this qual * is one of the "must match" subset. We can stop regardless * of whether the qual is > or <, so long as it's required, * because it's not possible for any future tuples to pass. On @@ -1609,8 +1609,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, /* * Since NULLs are sorted after non-NULLs, we know we have * reached the upper limit of the range of values for this - * index attr. On a forward scan, we can stop if this qual is - * one of the "must match" subset. We can stop regardless of + * index attr. On a forward scan, we can stop if this qual is + * one of the "must match" subset. We can stop regardless of * whether the qual is > or <, so long as it's required, * because it's not possible for any future tuples to pass. On * a backward scan, however, we must keep going, because we @@ -1631,7 +1631,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, { /* * Unlike the simple-scankey case, this isn't a disallowed case. - * But it can never match. If all the earlier row comparison + * But it can never match. If all the earlier row comparison * columns are required for the scan direction, we can stop the * scan, because there can't be another tuple that will succeed. */ @@ -1696,7 +1696,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, /* * Tuple fails this qual. If it's a required qual for the current * scan direction, then we can conclude no further tuples will pass, - * either. Note we have to look at the deciding column, not + * either. Note we have to look at the deciding column, not * necessarily the first or last column of the row condition. */ if ((subkey->sk_flags & SK_BT_REQFWD) && @@ -1722,7 +1722,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc, * is sufficient for setting LP_DEAD status (which is only a hint). * * We match items by heap TID before assuming they are the right ones to - * delete. We cope with cases where items have moved right due to insertions. + * delete. We cope with cases where items have moved right due to insertions. * If an item has moved off the current page due to a split, we'll fail to * find it and do nothing (this is not an error case --- we assume the item * will eventually get marked in a future indexscan). Note that because we @@ -1806,8 +1806,8 @@ _bt_killitems(IndexScanDesc scan, bool haveLock) /* * The following routines manage a shared-memory area in which we track * assignment of "vacuum cycle IDs" to currently-active btree vacuuming - * operations. There is a single counter which increments each time we - * start a vacuum to assign it a cycle ID. Since multiple vacuums could + * operations. There is a single counter which increments each time we + * start a vacuum to assign it a cycle ID. Since multiple vacuums could * be active concurrently, we have to track the cycle ID for each active * vacuum; this requires at most MaxBackends entries (usually far fewer). * We assume at most one vacuum can be active for a given index. diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index 86824f3495e..640639c175e 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -40,9 +40,9 @@ _bt_restore_page(Page page, char *from, int len) int nitems; /* - * To get the items back in the original order, we add them to the page - * in reverse. To figure out where one tuple ends and another begins, - * we have to scan them in forward order first. + * To get the items back in the original order, we add them to the page in + * reverse. To figure out where one tuple ends and another begins, we + * have to scan them in forward order first. */ i = 0; while (from < end) @@ -97,7 +97,7 @@ _bt_restore_meta(RelFileNode rnode, XLogRecPtr lsn, pageop->btpo_flags = BTP_META; /* - * Set pd_lower just past the end of the metadata. This is not essential + * Set pd_lower just past the end of the metadata. This is not essential * but it makes the page look compressible to xlog.c. */ ((PageHeader) metapg)->pd_lower = @@ -118,7 +118,7 @@ static void _bt_clear_incomplete_split(XLogRecPtr lsn, XLogRecord *record, RelFileNode rnode, BlockNumber cblock) { - Buffer buf; + Buffer buf; buf = XLogReadBuffer(rnode, cblock, false); if (BufferIsValid(buf)) @@ -128,6 +128,7 @@ _bt_clear_incomplete_split(XLogRecPtr lsn, XLogRecord *record, if (lsn > PageGetLSN(page)) { BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page); + Assert((pageop->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0); pageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT; @@ -153,6 +154,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, datapos = (char *) xlrec + SizeOfBtreeInsert; datalen = record->xl_len - SizeOfBtreeInsert; + /* * if this insert finishes a split at lower level, extract the block * number of the (left) child. @@ -172,10 +174,10 @@ btree_xlog_insert(bool isleaf, bool ismeta, } /* - * Insertion to an internal page finishes an incomplete split at the - * child level. Clear the incomplete-split flag in the child. Note: - * during normal operation, the child and parent pages are locked at the - * same time, so that clearing the flag and inserting the downlink appear + * Insertion to an internal page finishes an incomplete split at the child + * level. Clear the incomplete-split flag in the child. Note: during + * normal operation, the child and parent pages are locked at the same + * time, so that clearing the flag and inserting the downlink appear * atomic to other backends. We don't bother with that during replay, * because readers don't care about the incomplete-split flag and there * cannot be updates happening. @@ -279,9 +281,10 @@ btree_xlog_split(bool onleft, bool isroot, datapos += left_hikeysz; datalen -= left_hikeysz; } + /* - * If this insertion finishes an incomplete split, get the block number - * of the child. + * If this insertion finishes an incomplete split, get the block number of + * the child. */ if (!isleaf && !(record->xl_info & XLR_BKP_BLOCK(1))) { @@ -439,7 +442,7 @@ btree_xlog_split(bool onleft, bool isroot, * the backup block containing right sibling is 2 or 3, depending * whether this was a leaf or internal page. */ - int rnext_index = isleaf ? 2 : 3; + int rnext_index = isleaf ? 2 : 3; if (record->xl_info & XLR_BKP_BLOCK(rnext_index)) (void) RestoreBackupBlock(lsn, record, rnext_index, false, false); @@ -620,7 +623,7 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec) /* * In what follows, we have to examine the previous state of the index - * page, as well as the heap page(s) it points to. This is only valid if + * page, as well as the heap page(s) it points to. This is only valid if * WAL replay has reached a consistent database state; which means that * the preceding check is not just an optimization, but is *necessary*. We * won't have let in any user sessions before we reach consistency. @@ -629,9 +632,9 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec) elog(PANIC, "btree_xlog_delete_get_latestRemovedXid: cannot operate with inconsistent data"); /* - * Get index page. If the DB is consistent, this should not fail, nor + * Get index page. If the DB is consistent, this should not fail, nor * should any of the heap page fetches below. If one does, we return - * InvalidTransactionId to cancel all HS transactions. That's probably + * InvalidTransactionId to cancel all HS transactions. That's probably * overkill, but it's safe, and certainly better than panicking here. */ ibuffer = XLogReadBuffer(xlrec->node, xlrec->block, false); @@ -716,9 +719,9 @@ btree_xlog_delete_get_latestRemovedXid(xl_btree_delete *xlrec) /* * If all heap tuples were LP_DEAD then we will be returning * InvalidTransactionId here, which avoids conflicts. This matches - * existing logic which assumes that LP_DEAD tuples must already be - * older than the latestRemovedXid on the cleanup record that - * set them as LP_DEAD, hence must already have generated a conflict. + * existing logic which assumes that LP_DEAD tuples must already be older + * than the latestRemovedXid on the cleanup record that set them as + * LP_DEAD, hence must already have generated a conflict. */ return latestRemovedXid; } @@ -735,7 +738,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record) * If we have any conflict processing to do, it must happen before we * update the page. * - * Btree delete records can conflict with standby queries. You might + * Btree delete records can conflict with standby queries. You might * think that vacuum records would conflict as well, but we've handled * that already. XLOG_HEAP2_CLEANUP_INFO records provide the highest xid * cleaned by the vacuum of the heap and so we can resolve any conflicts @@ -828,7 +831,7 @@ btree_xlog_mark_page_halfdead(uint8 info, XLogRecPtr lsn, XLogRecord *record) ItemId itemid; IndexTuple itup; OffsetNumber nextoffset; - BlockNumber rightsib; + BlockNumber rightsib; poffset = ItemPointerGetOffsetNumber(&(xlrec->target.tid)); diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index aa60c8db65c..cd1edfffa25 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -54,7 +54,7 @@ desc_recompress_leaf(StringInfo buf, ginxlogRecompressDataLeaf *insertData) walbuf += nitems * sizeof(ItemPointerData); } - switch(a_action) + switch (a_action) { case GIN_SEGMENT_ADDITEMS: appendStringInfo(buf, " %d (add %d items)", a_segno, nitems); @@ -94,13 +94,13 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) case XLOG_GIN_INSERT: { ginxlogInsert *xlrec = (ginxlogInsert *) rec; - char *payload = rec + sizeof(ginxlogInsert); + char *payload = rec + sizeof(ginxlogInsert); appendStringInfoString(buf, "Insert item, "); desc_node(buf, xlrec->node, xlrec->blkno); appendStringInfo(buf, " isdata: %c isleaf: %c", - (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F', - (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F'); + (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F', + (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F'); if (!(xlrec->flags & GIN_INSERT_ISLEAF)) { BlockNumber leftChildBlkno; @@ -115,11 +115,11 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) } if (!(xlrec->flags & GIN_INSERT_ISDATA)) appendStringInfo(buf, " isdelete: %c", - (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F'); + (((ginxlogInsertEntry *) payload)->isDelete) ? 'T' : 'F'); else if (xlrec->flags & GIN_INSERT_ISLEAF) { ginxlogRecompressDataLeaf *insertData = - (ginxlogRecompressDataLeaf *) payload; + (ginxlogRecompressDataLeaf *) payload; if (xl_info & XLR_BKP_BLOCK(0)) appendStringInfo(buf, " (full page image)"); @@ -129,10 +129,11 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) else { ginxlogInsertDataInternal *insertData = (ginxlogInsertDataInternal *) payload; + appendStringInfo(buf, " pitem: %u-%u/%u", - PostingItemGetBlockNumber(&insertData->newitem), - ItemPointerGetBlockNumber(&insertData->newitem.key), - ItemPointerGetOffsetNumber(&insertData->newitem.key)); + PostingItemGetBlockNumber(&insertData->newitem), + ItemPointerGetBlockNumber(&insertData->newitem.key), + ItemPointerGetOffsetNumber(&insertData->newitem.key)); } } break; @@ -144,8 +145,8 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) desc_node(buf, ((ginxlogSplit *) rec)->node, ((ginxlogSplit *) rec)->lblkno); appendStringInfo(buf, " isrootsplit: %c", (((ginxlogSplit *) rec)->flags & GIN_SPLIT_ROOT) ? 'T' : 'F'); appendStringInfo(buf, " isdata: %c isleaf: %c", - (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F', - (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F'); + (xlrec->flags & GIN_INSERT_ISDATA) ? 'T' : 'F', + (xlrec->flags & GIN_INSERT_ISLEAF) ? 'T' : 'F'); } break; case XLOG_GIN_VACUUM_PAGE: @@ -155,6 +156,7 @@ gin_desc(StringInfo buf, uint8 xl_info, char *rec) case XLOG_GIN_VACUUM_DATA_LEAF_PAGE: { ginxlogVacuumDataLeafPage *xlrec = (ginxlogVacuumDataLeafPage *) rec; + appendStringInfoString(buf, "Vacuum data leaf page, "); desc_node(buf, xlrec->node, xlrec->blkno); if (xl_info & XLR_BKP_BLOCK(0)) diff --git a/src/backend/access/rmgrdesc/nbtdesc.c b/src/backend/access/rmgrdesc/nbtdesc.c index af7663b8cac..a3c746f1a84 100644 --- a/src/backend/access/rmgrdesc/nbtdesc.c +++ b/src/backend/access/rmgrdesc/nbtdesc.c @@ -140,7 +140,7 @@ btree_desc(StringInfo buf, uint8 xl_info, char *rec) xl_btree_unlink_page *xlrec = (xl_btree_unlink_page *) rec; appendStringInfo(buf, "unlink_page: rel %u/%u/%u; ", - xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); + xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); appendStringInfo(buf, "dead %u; left %u; right %u; btpo_xact %u; ", xlrec->deadblk, xlrec->leftsib, xlrec->rightsib, xlrec->btpo_xact); appendStringInfo(buf, "leaf %u; leafleft %u; leafright %u; topparent %u", diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index 48f32cda241..c08d211104d 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -25,7 +25,7 @@ /* * SPPageDesc tracks all info about a page we are inserting into. In some * situations it actually identifies a tuple, or even a specific node within - * an inner tuple. But any of the fields can be invalid. If the buffer + * an inner tuple. But any of the fields can be invalid. If the buffer * field is valid, it implies we hold pin and exclusive lock on that buffer. * page pointer should be valid exactly when buffer is. */ @@ -249,7 +249,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, else { /* - * Tuple must be inserted into existing chain. We mustn't change the + * Tuple must be inserted into existing chain. We mustn't change the * chain's head address, but we don't need to chase the entire chain * to put the tuple at the end; we can insert it second. * @@ -814,7 +814,7 @@ doPickSplit(Relation index, SpGistState *state, * We may not actually insert new tuple because another picksplit may be * necessary due to too large value, but we will try to allocate enough * space to include it; and in any case it has to be included in the input - * for the picksplit function. So don't increment nToInsert yet. + * for the picksplit function. So don't increment nToInsert yet. */ in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state); heapPtrs[in.nTuples] = newLeafTuple->heapPtr; @@ -872,7 +872,7 @@ doPickSplit(Relation index, SpGistState *state, /* * Check to see if the picksplit function failed to separate the values, * ie, it put them all into the same child node. If so, select allTheSame - * mode and create a random split instead. See comments for + * mode and create a random split instead. See comments for * checkAllTheSame as to why we need to know if the new leaf tuples could * fit on one page. */ @@ -1037,7 +1037,7 @@ doPickSplit(Relation index, SpGistState *state, &xlrec.initDest); /* - * Attempt to assign node groups to the two pages. We might fail to + * Attempt to assign node groups to the two pages. We might fail to * do so, even if totalLeafSizes is less than the available space, * because we can't split a group across pages. */ @@ -1917,7 +1917,7 @@ spgdoinsert(Relation index, SpGistState *state, if (current.blkno == InvalidBlockNumber) { /* - * Create a leaf page. If leafSize is too large to fit on a page, + * Create a leaf page. If leafSize is too large to fit on a page, * we won't actually use the page yet, but it simplifies the API * for doPickSplit to always have a leaf page at hand; so just * quietly limit our request to a page size. @@ -2120,7 +2120,7 @@ spgdoinsert(Relation index, SpGistState *state, out.result.addNode.nodeLabel); /* - * Retry insertion into the enlarged node. We assume that + * Retry insertion into the enlarged node. We assume that * we'll get a MatchNode result this time. */ goto process_inner_tuple; diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c index 2b1b49348cf..a4408f03bd3 100644 --- a/src/backend/access/spgist/spginsert.c +++ b/src/backend/access/spgist/spginsert.c @@ -163,7 +163,7 @@ spgbuildempty(PG_FUNCTION_ARGS) page = (Page) palloc(BLCKSZ); SpGistInitMetapage(page); - /* Write the page. If archiving/streaming, XLOG it. */ + /* Write the page. If archiving/streaming, XLOG it. */ PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO); smgrwrite(index->rd_smgr, INIT_FORKNUM, SPGIST_METAPAGE_BLKNO, (char *) page, true); @@ -232,7 +232,7 @@ spginsert(PG_FUNCTION_ARGS) /* * We might have to repeat spgdoinsert() multiple times, if conflicts * occur with concurrent insertions. If so, reset the insertCtx each time - * to avoid cumulative memory consumption. That means we also have to + * to avoid cumulative memory consumption. That means we also have to * redo initSpGistState(), but it's cheap enough not to matter. */ while (!spgdoinsert(index, &spgstate, ht_ctid, *values, *isnull)) diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c index 0a1e09c51e8..35cc41b3aab 100644 --- a/src/backend/access/spgist/spgscan.c +++ b/src/backend/access/spgist/spgscan.c @@ -103,7 +103,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so) * Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so. * * The point here is to eliminate null-related considerations from what the - * opclass consistent functions need to deal with. We assume all SPGiST- + * opclass consistent functions need to deal with. We assume all SPGiST- * indexable operators are strict, so any null RHS value makes the scan * condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL * conditions; their effect is reflected into searchNulls/searchNonNulls. @@ -600,7 +600,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr, if (so->want_itup) { /* - * Reconstruct desired IndexTuple. We have to copy the datum out of + * Reconstruct desired IndexTuple. We have to copy the datum out of * the temp context anyway, so we may as well create the tuple here. */ so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc, diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c index bcdd29362d7..5b7a5a06a0f 100644 --- a/src/backend/access/spgist/spgtextproc.c +++ b/src/backend/access/spgist/spgtextproc.c @@ -26,11 +26,11 @@ * In the worst case, an inner tuple in a text radix tree could have as many * as 256 nodes (one for each possible byte value). Each node can take 16 * bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page - * of size BLCKSZ. Rather than assuming we know the exact amount of overhead + * of size BLCKSZ. Rather than assuming we know the exact amount of overhead * imposed by page headers, tuple headers, etc, we leave 100 bytes for that * (the actual overhead should be no more than 56 bytes at this writing, so * there is slop in this number). So we can safely create prefixes up to - * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is + * BLCKSZ - 256 * 16 - 100 bytes long. Unfortunately, because 256 * 16 is * already 4K, there is no safe prefix length when BLCKSZ is less than 8K; * it is always possible to get "SPGiST inner tuple size exceeds maximum" * if there are too many distinct next-byte values at a given place in the @@ -327,7 +327,7 @@ spg_text_picksplit(PG_FUNCTION_ARGS) } /* - * Sort by label bytes so that we can group the values into nodes. This + * Sort by label bytes so that we can group the values into nodes. This * also ensures that the nodes are ordered by label value, allowing the * use of binary search in searchChar. */ @@ -377,7 +377,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS) /* * Reconstruct values represented at this tuple, including parent data, - * prefix of this tuple if any, and the node label if any. in->level + * prefix of this tuple if any, and the node label if any. in->level * should be the length of the previously reconstructed value, and the * number of bytes added here is prefixSize or prefixSize + 1. * diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c index 3cbad99e46a..1a224ef7cc1 100644 --- a/src/backend/access/spgist/spgutils.c +++ b/src/backend/access/spgist/spgutils.c @@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index) * * When requesting an inner page, if we get one with the wrong parity, * we just release the buffer and try again. We will get a different page - * because GetFreeIndexPage will have marked the page used in FSM. The page + * because GetFreeIndexPage will have marked the page used in FSM. The page * is entered in our local lastUsedPages cache, so there's some hope of * making use of it later in this session, but otherwise we rely on VACUUM * to eventually re-enter the page in FSM, making it available for recycling. @@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index) * * When we return a buffer to the caller, the page is *not* entered into * the lastUsedPages cache; we expect the caller will do so after it's taken - * whatever space it will use. This is because after the caller has used up + * whatever space it will use. This is because after the caller has used up * some space, the page might have less space than whatever was cached already * so we'd rather not trash the old cache entry. */ @@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew) /* * If possible, increase the space request to include relation's - * fillfactor. This ensures that when we add unrelated tuples to a page, + * fillfactor. This ensures that when we add unrelated tuples to a page, * we try to keep 100-fillfactor% available for adding tuples that are * related to the ones already on it. But fillfactor mustn't cause an * error for requests that would otherwise be legal. diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index 633cf7aeae7..19a461be41d 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -211,7 +211,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, * Figure out exactly what we have to do. We do this separately from * actually modifying the page, mainly so that we have a representation * that can be dumped into WAL and then the replay code can do exactly - * the same thing. The output of this step consists of six arrays + * the same thing. The output of this step consists of six arrays * describing four kinds of operations, to be performed in this order: * * toDead[]: tuple numbers to be replaced with DEAD tuples @@ -287,7 +287,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer, else { /* - * Second or later live tuple. Arrange to re-chain it to the + * Second or later live tuple. Arrange to re-chain it to the * previous live one, if there was a gap. */ if (interveningDeletable) diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c index 1689324f234..cc0184d174d 100644 --- a/src/backend/access/spgist/spgxlog.c +++ b/src/backend/access/spgist/spgxlog.c @@ -41,7 +41,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc) } /* - * Add a leaf tuple, or replace an existing placeholder tuple. This is used + * Add a leaf tuple, or replace an existing placeholder tuple. This is used * to replay SpGistPageAddNewItem() operations. If the offset points at an * existing tuple, it had better be a placeholder tuple. */ @@ -462,7 +462,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record) } /* - * Update parent downlink. Since parent could be in either of the + * Update parent downlink. Since parent could be in either of the * previous two buffers, it's a bit tricky to determine which BKP bit * applies. */ @@ -799,7 +799,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record) bbi++; /* - * Now we can release the leaf-page locks. It's okay to do this before + * Now we can release the leaf-page locks. It's okay to do this before * updating the parent downlink. */ if (BufferIsValid(srcBuffer)) diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index 0eadd776af6..27ca4c65673 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -11,15 +11,15 @@ * log can be broken into relatively small, independent segments. * * XLOG interactions: this module generates an XLOG record whenever a new - * CLOG page is initialized to zeroes. Other writes of CLOG come from + * CLOG page is initialized to zeroes. Other writes of CLOG come from * recording of transaction commit or abort in xact.c, which generates its * own XLOG records for these events and will re-perform the status update - * on redo; so we need make no additional XLOG entry here. For synchronous + * on redo; so we need make no additional XLOG entry here. For synchronous * transaction commits, the XLOG is guaranteed flushed through the XLOG commit * record before we are called to log a commit, so the WAL rule "write xlog * before data" is satisfied automatically. However, for async commits we * must track the latest LSN affecting each CLOG page, so that we can flush - * XLOG that far and satisfy the WAL rule. We don't have to worry about this + * XLOG that far and satisfy the WAL rule. We don't have to worry about this * for aborts (whether sync or async), since the post-crash assumption would * be that such transactions failed anyway. * @@ -105,7 +105,7 @@ static void set_status_by_pages(int nsubxids, TransactionId *subxids, * in the tree of xid. In various cases nsubxids may be zero. * * lsn must be the WAL location of the commit record when recording an async - * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the + * commit. For a synchronous commit it can be InvalidXLogRecPtr, since the * caller guarantees the commit record is already flushed in that case. It * should be InvalidXLogRecPtr for abort cases, too. * @@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn) * Testing during the PostgreSQL 9.2 development cycle revealed that on a * large multi-processor system, it was possible to have more CLOG page * requests in flight at one time than the numebr of CLOG buffers which existed - * at that time, which was hardcoded to 8. Further testing revealed that + * at that time, which was hardcoded to 8. Further testing revealed that * performance dropped off with more than 32 CLOG buffers, possibly because * the linear buffer search algorithm doesn't scale well. * diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 459f59cb4e0..9da22c8bdfc 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -5,7 +5,7 @@ * * The pg_multixact manager is a pg_clog-like manager that stores an array of * MultiXactMember for each MultiXactId. It is a fundamental part of the - * shared-row-lock implementation. Each MultiXactMember is comprised of a + * shared-row-lock implementation. Each MultiXactMember is comprised of a * TransactionId and a set of flag bits. The name is a bit historical: * originally, a MultiXactId consisted of more than one TransactionId (except * in rare corner cases), hence "multi". Nowadays, however, it's perfectly @@ -18,7 +18,7 @@ * * We use two SLRU areas, one for storing the offsets at which the data * starts for each MultiXactId in the other one. This trick allows us to - * store variable length arrays of TransactionIds. (We could alternatively + * store variable length arrays of TransactionIds. (We could alternatively * use one area containing counts and TransactionIds, with valid MultiXactId * values pointing at slots containing counts; but that way seems less robust * since it would get completely confused if someone inquired about a bogus @@ -38,7 +38,7 @@ * * Like clog.c, and unlike subtrans.c, we have to preserve state across * crashes and ensure that MXID and offset numbering increases monotonically - * across a crash. We do this in the same way as it's done for transaction + * across a crash. We do this in the same way as it's done for transaction * IDs: the WAL record is guaranteed to contain evidence of every MXID we * could need to worry about, and we just make sure that at the end of * replay, the next-MXID and next-offset counters are at least as large as @@ -50,7 +50,7 @@ * The minimum value in each database is stored in pg_database, and the * global minimum is part of pg_control. Any vacuum that is able to * advance its database's minimum value also computes a new global minimum, - * and uses this value to truncate older segments. When new multixactid + * and uses this value to truncate older segments. When new multixactid * values are to be created, care is taken that the counter does not * fall within the wraparound horizon considering the global minimum value. * @@ -85,13 +85,13 @@ /* - * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is + * Defines for MultiXactOffset page sizes. A page is the same BLCKSZ as is * used everywhere else in Postgres. * * Note: because MultiXactOffsets are 32 bits and wrap around at 0xFFFFFFFF, * MultiXact page numbering also wraps around at * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE, and segment numbering at - * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need + * 0xFFFFFFFF/MULTIXACT_OFFSETS_PER_PAGE/SLRU_SEGMENTS_PER_PAGE. We need * take no explicit notice of that fact in this module, except when comparing * segment and page numbers in TruncateMultiXact (see * MultiXactOffsetPagePrecedes). @@ -110,7 +110,7 @@ * additional flag bits for each TransactionId. To do this without getting * into alignment issues, we store four bytes of flags, and then the * corresponding 4 Xids. Each such 5-word (20-byte) set we call a "group", and - * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups + * are stored as a whole in pages. Thus, with 8kB BLCKSZ, we keep 409 groups * per page. This wastes 12 bytes per page, but that's OK -- simplicity (and * performance) trumps space efficiency here. * @@ -161,7 +161,7 @@ static SlruCtlData MultiXactMemberCtlData; #define MultiXactMemberCtl (&MultiXactMemberCtlData) /* - * MultiXact state shared across all backends. All this state is protected + * MultiXact state shared across all backends. All this state is protected * by MultiXactGenLock. (We also use MultiXactOffsetControlLock and * MultiXactMemberControlLock to guard accesses to the two sets of SLRU * buffers. For concurrency's sake, we avoid holding more than one of these @@ -179,7 +179,7 @@ typedef struct MultiXactStateData MultiXactId lastTruncationPoint; /* - * oldest multixact that is still on disk. Anything older than this + * oldest multixact that is still on disk. Anything older than this * should not be consulted. */ MultiXactId oldestMultiXactId; @@ -269,8 +269,8 @@ typedef struct mXactCacheEnt } mXactCacheEnt; #define MAX_CACHE_ENTRIES 256 -static dlist_head MXactCache = DLIST_STATIC_INIT(MXactCache); -static int MXactCacheMembers = 0; +static dlist_head MXactCache = DLIST_STATIC_INIT(MXactCache); +static int MXactCacheMembers = 0; static MemoryContext MXactContext = NULL; #ifdef MULTIXACT_DEBUG @@ -528,7 +528,7 @@ MultiXactIdIsRunning(MultiXactId multi) /* * This could be made faster by having another entry point in procarray.c, - * walking the PGPROC array only once for all the members. But in most + * walking the PGPROC array only once for all the members. But in most * cases nmembers should be small enough that it doesn't much matter. */ for (i = 0; i < nmembers; i++) @@ -579,9 +579,9 @@ MultiXactIdSetOldestMember(void) * back. Which would be wrong. * * Note that a shared lock is sufficient, because it's enough to stop - * someone from advancing nextMXact; and nobody else could be trying to - * write to our OldestMember entry, only reading (and we assume storing - * it is atomic.) + * someone from advancing nextMXact; and nobody else could be trying + * to write to our OldestMember entry, only reading (and we assume + * storing it is atomic.) */ LWLockAcquire(MultiXactGenLock, LW_SHARED); @@ -615,7 +615,7 @@ MultiXactIdSetOldestMember(void) * The value to set is the oldest of nextMXact and all the valid per-backend * OldestMemberMXactId[] entries. Because of the locking we do, we can be * certain that no subsequent call to MultiXactIdSetOldestMember can set - * an OldestMemberMXactId[] entry older than what we compute here. Therefore + * an OldestMemberMXactId[] entry older than what we compute here. Therefore * there is no live transaction, now or later, that can be a member of any * MultiXactId older than the OldestVisibleMXactId we compute here. */ @@ -751,7 +751,7 @@ MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members) * heap_lock_tuple() to have put it there, and heap_lock_tuple() generates * an XLOG record that must follow ours. The normal LSN interlock between * the data page and that XLOG record will ensure that our XLOG record - * reaches disk first. If the SLRU members/offsets data reaches disk + * reaches disk first. If the SLRU members/offsets data reaches disk * sooner than the XLOG record, we do not care because we'll overwrite it * with zeroes unless the XLOG record is there too; see notes at top of * this file. @@ -882,7 +882,7 @@ RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset, * GetNewMultiXactId * Get the next MultiXactId. * - * Also, reserve the needed amount of space in the "members" area. The + * Also, reserve the needed amount of space in the "members" area. The * starting offset of the reserved space is returned in *offset. * * This may generate XLOG records for expansion of the offsets and/or members @@ -916,7 +916,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) /*---------- * Check to see if it's safe to assign another MultiXactId. This protects - * against catastrophic data loss due to multixact wraparound. The basic + * against catastrophic data loss due to multixact wraparound. The basic * rules are: * * If we're past multiVacLimit, start trying to force autovacuum cycles. @@ -930,7 +930,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) { /* * For safety's sake, we release MultiXactGenLock while sending - * signals, warnings, etc. This is not so much because we care about + * signals, warnings, etc. This is not so much because we care about * preserving concurrency in this situation, as to avoid any * possibility of deadlock while doing get_database_name(). First, * copy all the shared values we'll need in this path. @@ -981,8 +981,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) (errmsg_plural("database \"%s\" must be vacuumed before %u more MultiXactId is used", "database \"%s\" must be vacuumed before %u more MultiXactIds are used", multiWrapLimit - result, - oldest_datname, - multiWrapLimit - result), + oldest_datname, + multiWrapLimit - result), errhint("Execute a database-wide VACUUM in that database.\n" "You might also need to commit or roll back old prepared transactions."))); else @@ -990,8 +990,8 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) (errmsg_plural("database with OID %u must be vacuumed before %u more MultiXactId is used", "database with OID %u must be vacuumed before %u more MultiXactIds are used", multiWrapLimit - result, - oldest_datoid, - multiWrapLimit - result), + oldest_datoid, + multiWrapLimit - result), errhint("Execute a database-wide VACUUM in that database.\n" "You might also need to commit or roll back old prepared transactions."))); } @@ -1036,7 +1036,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset) * until after file extension has succeeded! * * We don't care about MultiXactId wraparound here; it will be handled by - * the next iteration. But note that nextMXact may be InvalidMultiXactId + * the next iteration. But note that nextMXact may be InvalidMultiXactId * or the first value on a segment-beginning page after this routine * exits, so anyone else looking at the variable must be prepared to deal * with either case. Similarly, nextOffset may be zero, but we won't use @@ -1114,16 +1114,16 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, * need to allow an empty set to be returned regardless, if the caller is * willing to accept it; the caller is expected to check that it's an * allowed condition (such as ensuring that the infomask bits set on the - * tuple are consistent with the pg_upgrade scenario). If the caller is + * tuple are consistent with the pg_upgrade scenario). If the caller is * expecting this to be called only on recently created multis, then we * raise an error. * * Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is - * seen, it implies undetected ID wraparound has occurred. This raises a + * seen, it implies undetected ID wraparound has occurred. This raises a * hard error. * * Shared lock is enough here since we aren't modifying any global state. - * Acquire it just long enough to grab the current counter values. We may + * Acquire it just long enough to grab the current counter values. We may * need both nextMXact and nextOffset; see below. */ LWLockAcquire(MultiXactGenLock, LW_SHARED); @@ -1151,12 +1151,12 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, /* * Find out the offset at which we need to start reading MultiXactMembers - * and the number of members in the multixact. We determine the latter as + * and the number of members in the multixact. We determine the latter as * the difference between this multixact's starting offset and the next * one's. However, there are some corner cases to worry about: * * 1. This multixact may be the latest one created, in which case there is - * no next one to look at. In this case the nextOffset value we just + * no next one to look at. In this case the nextOffset value we just * saved is the correct endpoint. * * 2. The next multixact may still be in process of being filled in: that @@ -1167,11 +1167,11 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, * (because we are careful to pre-zero offset pages). Because * GetNewMultiXactId will never return zero as the starting offset for a * multixact, when we read zero as the next multixact's offset, we know we - * have this case. We sleep for a bit and try again. + * have this case. We sleep for a bit and try again. * * 3. Because GetNewMultiXactId increments offset zero to offset one to * handle case #2, there is an ambiguity near the point of offset - * wraparound. If we see next multixact's offset is one, is that our + * wraparound. If we see next multixact's offset is one, is that our * multixact's actual endpoint, or did it end at zero with a subsequent * increment? We handle this using the knowledge that if the zero'th * member slot wasn't filled, it'll contain zero, and zero isn't a valid @@ -1297,8 +1297,8 @@ retry: /* * MultiXactHasRunningRemoteMembers - * Does the given multixact have still-live members from - * transactions other than our own? + * Does the given multixact have still-live members from + * transactions other than our own? */ bool MultiXactHasRunningRemoteMembers(MultiXactId multi) @@ -1694,7 +1694,7 @@ multixact_twophase_postabort(TransactionId xid, uint16 info, /* * Initialization of shared memory for MultiXact. We use two SLRU areas, - * thus double memory. Also, reserve space for the shared MultiXactState + * thus double memory. Also, reserve space for the shared MultiXactState * struct and the per-backend MultiXactId arrays (two of those, too). */ Size @@ -1754,7 +1754,7 @@ MultiXactShmemInit(void) /* * This func must be called ONCE on system install. It creates the initial - * MultiXact segments. (The MultiXacts directories are assumed to have been + * MultiXact segments. (The MultiXacts directories are assumed to have been * created by initdb, and MultiXactShmemInit must have been called already.) */ void @@ -1849,7 +1849,7 @@ MaybeExtendOffsetSlru(void) if (!SimpleLruDoesPhysicalPageExist(MultiXactOffsetCtl, pageno)) { - int slotno; + int slotno; /* * Fortunately for us, SimpleLruWritePage is already prepared to deal @@ -1925,7 +1925,7 @@ TrimMultiXact(void) MultiXactOffsetCtl->shared->latest_page_number = pageno; /* - * Zero out the remainder of the current offsets page. See notes in + * Zero out the remainder of the current offsets page. See notes in * StartupCLOG() for motivation. */ entryno = MultiXactIdToOffsetEntry(multi); @@ -1955,7 +1955,7 @@ TrimMultiXact(void) MultiXactMemberCtl->shared->latest_page_number = pageno; /* - * Zero out the remainder of the current members page. See notes in + * Zero out the remainder of the current members page. See notes in * TrimCLOG() for motivation. */ flagsoff = MXOffsetToFlagsOffset(offset); @@ -2097,7 +2097,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid) /* * We'll start complaining loudly when we get within 10M multis of the - * stop point. This is kind of arbitrary, but if you let your gas gauge + * stop point. This is kind of arbitrary, but if you let your gas gauge * get down to 1% of full, would you be looking for the next gas station? * We need to be fairly liberal about this number because there are lots * of scenarios where most transactions are done by automatic clients that @@ -2172,8 +2172,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid) (errmsg_plural("database \"%s\" must be vacuumed before %u more MultiXactId is used", "database \"%s\" must be vacuumed before %u more MultiXactIds are used", multiWrapLimit - curMulti, - oldest_datname, - multiWrapLimit - curMulti), + oldest_datname, + multiWrapLimit - curMulti), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" "You might also need to commit or roll back old prepared transactions."))); else @@ -2181,8 +2181,8 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid) (errmsg_plural("database with OID %u must be vacuumed before %u more MultiXactId is used", "database with OID %u must be vacuumed before %u more MultiXactIds are used", multiWrapLimit - curMulti, - oldest_datoid, - multiWrapLimit - curMulti), + oldest_datoid, + multiWrapLimit - curMulti), errhint("To avoid a database shutdown, execute a database-wide VACUUM in that database.\n" "You might also need to commit or roll back old prepared transactions."))); } @@ -2375,16 +2375,16 @@ GetOldestMultiXactId(void) /* * SlruScanDirectory callback. - * This callback deletes segments that are outside the range determined by - * the given page numbers. + * This callback deletes segments that are outside the range determined by + * the given page numbers. * * Both range endpoints are exclusive (that is, segments containing any of * those pages are kept.) */ typedef struct MembersLiveRange { - int rangeStart; - int rangeEnd; + int rangeStart; + int rangeEnd; } MembersLiveRange; static bool @@ -2392,15 +2392,15 @@ SlruScanDirCbRemoveMembers(SlruCtl ctl, char *filename, int segpage, void *data) { MembersLiveRange *range = (MembersLiveRange *) data; - MultiXactOffset nextOffset; + MultiXactOffset nextOffset; if ((segpage == range->rangeStart) || (segpage == range->rangeEnd)) - return false; /* easy case out */ + return false; /* easy case out */ /* - * To ensure that no segment is spuriously removed, we must keep track - * of new segments added since the start of the directory scan; to do this, + * To ensure that no segment is spuriously removed, we must keep track of + * new segments added since the start of the directory scan; to do this, * we update our end-of-range point as we run. * * As an optimization, we can skip looking at shared memory if we know for @@ -2473,10 +2473,10 @@ void TruncateMultiXact(MultiXactId oldestMXact) { MultiXactOffset oldestOffset; - MultiXactOffset nextOffset; + MultiXactOffset nextOffset; mxtruncinfo trunc; MultiXactId earliest; - MembersLiveRange range; + MembersLiveRange range; /* * Note we can't just plow ahead with the truncation; it's possible that diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index b90db9a417d..1f9a100da85 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -15,7 +15,7 @@ * * We use a control LWLock to protect the shared data structures, plus * per-buffer LWLocks that synchronize I/O for each buffer. The control lock - * must be held to examine or modify any shared state. A process that is + * must be held to examine or modify any shared state. A process that is * reading in or writing out a page buffer does not hold the control lock, * only the per-buffer lock for the buffer it is working on. * @@ -34,7 +34,7 @@ * could have happened while we didn't have the lock). * * As with the regular buffer manager, it is possible for another process - * to re-dirty a page that is currently being written out. This is handled + * to re-dirty a page that is currently being written out. This is handled * by re-setting the page's page_dirty flag. * * @@ -96,7 +96,7 @@ typedef struct SlruFlushData *SlruFlush; * page_lru_count entries to be "reset" to lower values than they should have, * in case a process is delayed while it executes this macro. With care in * SlruSelectLRUPage(), this does little harm, and in any case the absolute - * worst possible consequence is a nonoptimal choice of page to evict. The + * worst possible consequence is a nonoptimal choice of page to evict. The * gain from allowing concurrent reads of SLRU pages seems worth it. */ #define SlruRecentlyUsed(shared, slotno) \ @@ -481,7 +481,7 @@ SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno, TransactionId xid) * * NOTE: only one write attempt is made here. Hence, it is possible that * the page is still dirty at exit (if someone else re-dirtied it during - * the write). However, we *do* attempt a fresh write even if the page + * the write). However, we *do* attempt a fresh write even if the page * is already being written; this is for checkpoints. * * Control lock must be held at entry, and will be held at exit. @@ -634,7 +634,7 @@ SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno) * In a crash-and-restart situation, it's possible for us to receive * commands to set the commit status of transactions whose bits are in * already-truncated segments of the commit log (see notes in - * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case + * SlruPhysicalWritePage). Hence, if we are InRecovery, allow the case * where the file doesn't exist, and return zeroes instead. */ fd = OpenTransientFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); @@ -964,9 +964,9 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) /* * If we find any EMPTY slot, just select that one. Else choose a - * victim page to replace. We normally take the least recently used + * victim page to replace. We normally take the least recently used * valid page, but we will never take the slot containing - * latest_page_number, even if it appears least recently used. We + * latest_page_number, even if it appears least recently used. We * will select a slot that is already I/O busy only if there is no * other choice: a read-busy slot will not be least recently used once * the read finishes, and waiting for an I/O on a write-busy slot is @@ -1041,7 +1041,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno) /* * If all pages (except possibly the latest one) are I/O busy, we'll - * have to wait for an I/O to complete and then retry. In that + * have to wait for an I/O to complete and then retry. In that * unhappy case, we choose to wait for the I/O on the least recently * used slot, on the assumption that it was likely initiated first of * all the I/Os in progress and may therefore finish first. @@ -1193,7 +1193,7 @@ restart:; /* * Hmm, we have (or may have) I/O operations acting on the page, so * we've got to wait for them to finish and then start again. This is - * the same logic as in SlruSelectLRUPage. (XXX if page is dirty, + * the same logic as in SlruSelectLRUPage. (XXX if page is dirty, * wouldn't it be OK to just discard it without writing it? For now, * keep the logic the same as it was.) */ @@ -1293,7 +1293,7 @@ SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data) cldir = AllocateDir(ctl->Dir); while ((clde = ReadDir(cldir, ctl->Dir)) != NULL) { - size_t len; + size_t len; len = strlen(clde->d_name); diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index 2f5cfa0d223..bebaee92160 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -5,7 +5,7 @@ * * The pg_subtrans manager is a pg_clog-like manager that stores the parent * transaction Id for each transaction. It is a fundamental part of the - * nested transactions implementation. A main transaction has a parent + * nested transactions implementation. A main transaction has a parent * of InvalidTransactionId, and each subtransaction has its immediate parent. * The tree can easily be walked from child to parent, but not in the * opposite direction. @@ -191,7 +191,7 @@ SUBTRANSShmemInit(void) * must have been called already.) * * Note: it's not really necessary to create the initial segment now, - * since slru.c would create it on first write anyway. But we may as well + * since slru.c would create it on first write anyway. But we may as well * do it to be sure the directory is set up correctly. */ void diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c index 319a2185410..2d27b3ae318 100644 --- a/src/backend/access/transam/timeline.c +++ b/src/backend/access/transam/timeline.c @@ -66,7 +66,7 @@ restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end) * Try to read a timeline's history file. * * If successful, return the list of component TLIs (the given TLI followed by - * its ancestor TLIs). If we can't find the history file, assume that the + * its ancestor TLIs). If we can't find the history file, assume that the * timeline has no parents, and return a list of just the specified timeline * ID. */ @@ -150,7 +150,7 @@ readTimeLineHistory(TimeLineID targetTLI) if (nfields != 3) ereport(FATAL, (errmsg("syntax error in history file: %s", fline), - errhint("Expected a transaction log switchpoint location."))); + errhint("Expected a transaction log switchpoint location."))); if (result && tli <= lasttli) ereport(FATAL, @@ -281,7 +281,7 @@ findNewestTimeLine(TimeLineID startTLI) * reason: human-readable explanation of why the timeline was switched * * Currently this is only used at the end recovery, and so there are no locking - * considerations. But we should be just as tense as XLogFileInit to avoid + * considerations. But we should be just as tense as XLogFileInit to avoid * emplacing a bogus file. */ void @@ -418,7 +418,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, /* * Prefer link() to rename() here just to be really sure that we don't - * overwrite an existing file. However, there shouldn't be one, so + * overwrite an existing file. However, there shouldn't be one, so * rename() is an acceptable substitute except for the truly paranoid. */ #if HAVE_WORKING_LINK diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c index 8965319551e..12982d9b556 100644 --- a/src/backend/access/transam/transam.c +++ b/src/backend/access/transam/transam.c @@ -145,7 +145,7 @@ TransactionIdDidCommit(TransactionId transactionId) * be a window just after database startup where we do not have complete * knowledge in pg_subtrans of the transactions after TransactionXmin. * StartupSUBTRANS() has ensured that any missing information will be - * zeroed. Since this case should not happen under normal conditions, it + * zeroed. Since this case should not happen under normal conditions, it * seems reasonable to emit a WARNING for it. */ if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED) @@ -301,7 +301,7 @@ TransactionIdPrecedes(TransactionId id1, TransactionId id2) { /* * If either ID is a permanent XID then we can just do unsigned - * comparison. If both are normal, do a modulo-2^32 comparison. + * comparison. If both are normal, do a modulo-2^32 comparison. */ int32 diff; diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 66dbf584568..70ca6ab67d1 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -443,7 +443,7 @@ LockGXact(const char *gid, Oid user) /* * Note: it probably would be possible to allow committing from * another database; but at the moment NOTIFY is known not to work and - * there may be some other issues as well. Hence disallow until + * there may be some other issues as well. Hence disallow until * someone gets motivated to make it work. */ if (MyDatabaseId != proc->databaseId) @@ -1031,7 +1031,7 @@ EndPrepare(GlobalTransaction gxact) * out the correct state file CRC, we have an inconsistency: the xact is * prepared according to WAL but not according to our on-disk state. We * use a critical section to force a PANIC if we are unable to complete - * the write --- then, WAL replay should repair the inconsistency. The + * the write --- then, WAL replay should repair the inconsistency. The * odds of a PANIC actually occurring should be very tiny given that we * were able to write the bogus CRC above. * @@ -1069,7 +1069,7 @@ EndPrepare(GlobalTransaction gxact) errmsg("could not close two-phase state file: %m"))); /* - * Mark the prepared transaction as valid. As soon as xact.c marks + * Mark the prepared transaction as valid. As soon as xact.c marks * MyPgXact as not running our XID (which it will do immediately after * this function returns), others can commit/rollback the xact. * @@ -1336,7 +1336,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit) /* * In case we fail while running the callbacks, mark the gxact invalid so * no one else will try to commit/rollback, and so it can be recycled - * properly later. It is still locked by our XID so it won't go away yet. + * properly later. It is still locked by our XID so it won't go away yet. * * (We assume it's safe to do this without taking TwoPhaseStateLock.) */ @@ -1540,7 +1540,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) * * This approach creates a race condition: someone else could delete a * GXACT between the time we release TwoPhaseStateLock and the time we try - * to open its state file. We handle this by special-casing ENOENT + * to open its state file. We handle this by special-casing ENOENT * failures: if we see that, we verify that the GXACT is no longer valid, * and if so ignore the failure. */ @@ -1621,7 +1621,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon) * * We throw away any prepared xacts with main XID beyond nextXid --- if any * are present, it suggests that the DBA has done a PITR recovery to an - * earlier point in time without cleaning out pg_twophase. We dare not + * earlier point in time without cleaning out pg_twophase. We dare not * try to recover such prepared xacts since they likely depend on database * state that doesn't exist now. * @@ -1713,7 +1713,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p) * XID, and they may force us to advance nextXid. * * We don't expect anyone else to modify nextXid, hence we don't - * need to hold a lock while examining it. We still acquire the + * need to hold a lock while examining it. We still acquire the * lock to modify it, though. */ subxids = (TransactionId *) diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index 51b6b1a3021..7013fb894b4 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -39,7 +39,7 @@ VariableCache ShmemVariableCache = NULL; * * Note: when this is called, we are actually already inside a valid * transaction, since XIDs are now not allocated until the transaction - * does something. So it is safe to do a database lookup if we want to + * does something. So it is safe to do a database lookup if we want to * issue a warning about XID wrap. */ TransactionId @@ -165,20 +165,20 @@ GetNewTransactionId(bool isSubXact) /* * Now advance the nextXid counter. This must not happen until after we * have successfully completed ExtendCLOG() --- if that routine fails, we - * want the next incoming transaction to try it again. We cannot assign + * want the next incoming transaction to try it again. We cannot assign * more XIDs until there is CLOG space for them. */ TransactionIdAdvance(ShmemVariableCache->nextXid); /* * We must store the new XID into the shared ProcArray before releasing - * XidGenLock. This ensures that every active XID older than + * XidGenLock. This ensures that every active XID older than * latestCompletedXid is present in the ProcArray, which is essential for * correct OldestXmin tracking; see src/backend/access/transam/README. * * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we * are relying on fetch/store of an xid to be atomic, else other backends - * might see a partially-set xid here. But holding both locks at once + * might see a partially-set xid here. But holding both locks at once * would be a nasty concurrency hit. So for now, assume atomicity. * * Note that readers of PGXACT xid fields should be careful to fetch the @@ -289,7 +289,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid) /* * We'll start complaining loudly when we get within 10M transactions of - * the stop point. This is kind of arbitrary, but if you let your gas + * the stop point. This is kind of arbitrary, but if you let your gas * gauge get down to 1% of full, would you be looking for the next gas * station? We need to be fairly liberal about this number because there * are lots of scenarios where most transactions are done by automatic @@ -390,7 +390,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid) * We primarily check whether oldestXidDB is valid. The cases we have in * mind are that that database was dropped, or the field was reset to zero * by pg_resetxlog. In either case we should force recalculation of the - * wrap limit. Also do it if oldestXid is old enough to be forcing + * wrap limit. Also do it if oldestXid is old enough to be forcing * autovacuums or other actions; this ensures we update our state as soon * as possible once extra overhead is being incurred. */ diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index 9ee11f34f2c..3e744097c79 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -270,7 +270,7 @@ static void CallSubXactCallbacks(SubXactEvent event, SubTransactionId parentSubid); static void CleanupTransaction(void); static void CheckTransactionChain(bool isTopLevel, bool throwError, - const char *stmtType); + const char *stmtType); static void CommitTransaction(void); static TransactionId RecordTransactionAbort(bool isSubXact); static void StartTransaction(void); @@ -450,7 +450,7 @@ AssignTransactionId(TransactionState s) { bool isSubXact = (s->parent != NULL); ResourceOwner currentOwner; - bool log_unknown_top = false; + bool log_unknown_top = false; /* Assert that caller didn't screw up */ Assert(!TransactionIdIsValid(s->transactionId)); @@ -487,8 +487,8 @@ AssignTransactionId(TransactionState s) /* * When wal_level=logical, guarantee that a subtransaction's xid can only - * be seen in the WAL stream if its toplevel xid has been logged - * before. If necessary we log a xact_assignment record with fewer than + * be seen in the WAL stream if its toplevel xid has been logged before. + * If necessary we log a xact_assignment record with fewer than * PGPROC_MAX_CACHED_SUBXIDS. Note that it is fine if didLogXid isn't set * for a transaction even though it appears in a WAL record, we just might * superfluously log something. That can happen when an xid is included @@ -637,7 +637,7 @@ SubTransactionIsActive(SubTransactionId subxid) * * "used" must be TRUE if the caller intends to use the command ID to mark * inserted/updated/deleted tuples. FALSE means the ID is being fetched - * for read-only purposes (ie, as a snapshot validity cutoff). See + * for read-only purposes (ie, as a snapshot validity cutoff). See * CommandCounterIncrement() for discussion. */ CommandId @@ -724,7 +724,7 @@ TransactionIdIsCurrentTransactionId(TransactionId xid) /* * We always say that BootstrapTransactionId is "not my transaction ID" - * even when it is (ie, during bootstrap). Along with the fact that + * even when it is (ie, during bootstrap). Along with the fact that * transam.c always treats BootstrapTransactionId as already committed, * this causes the tqual.c routines to see all tuples as committed, which * is what we need during bootstrap. (Bootstrap mode only inserts tuples, @@ -866,7 +866,7 @@ AtStart_Memory(void) /* * If this is the first time through, create a private context for * AbortTransaction to work in. By reserving some space now, we can - * insulate AbortTransaction from out-of-memory scenarios. Like + * insulate AbortTransaction from out-of-memory scenarios. Like * ErrorContext, we set it up with slow growth rate and a nonzero minimum * size, so that space will be reserved immediately. */ @@ -969,7 +969,7 @@ AtSubStart_ResourceOwner(void) Assert(s->parent != NULL); /* - * Create a resource owner for the subtransaction. We make it a child of + * Create a resource owner for the subtransaction. We make it a child of * the immediate parent's resource owner. */ s->curTransactionOwner = @@ -989,7 +989,7 @@ AtSubStart_ResourceOwner(void) * RecordTransactionCommit * * Returns latest XID among xact and its children, or InvalidTransactionId - * if the xact has no XID. (We compute that here just because it's easier.) + * if the xact has no XID. (We compute that here just because it's easier.) */ static TransactionId RecordTransactionCommit(void) @@ -1034,7 +1034,7 @@ RecordTransactionCommit(void) /* * If we didn't create XLOG entries, we're done here; otherwise we - * should flush those entries the same as a commit record. (An + * should flush those entries the same as a commit record. (An * example of a possible record that wouldn't cause an XID to be * assigned is a sequence advance record due to nextval() --- we want * to flush that to disk before reporting commit.) @@ -1051,7 +1051,7 @@ RecordTransactionCommit(void) BufmgrCommit(); /* - * Mark ourselves as within our "commit critical section". This + * Mark ourselves as within our "commit critical section". This * forces any concurrent checkpoint to wait until we've updated * pg_clog. Without this, it is possible for the checkpoint to set * REDO after the XLOG record but fail to flush the pg_clog update to @@ -1059,7 +1059,7 @@ RecordTransactionCommit(void) * crashes a little later. * * Note: we could, but don't bother to, set this flag in - * RecordTransactionAbort. That's because loss of a transaction abort + * RecordTransactionAbort. That's because loss of a transaction abort * is noncritical; the presumption would be that it aborted, anyway. * * It's safe to change the delayChkpt flag of our own backend without @@ -1168,15 +1168,15 @@ RecordTransactionCommit(void) /* * Check if we want to commit asynchronously. We can allow the XLOG flush * to happen asynchronously if synchronous_commit=off, or if the current - * transaction has not performed any WAL-logged operation. The latter + * transaction has not performed any WAL-logged operation. The latter * case can arise if the current transaction wrote only to temporary - * and/or unlogged tables. In case of a crash, the loss of such a + * and/or unlogged tables. In case of a crash, the loss of such a * transaction will be irrelevant since temp tables will be lost anyway, * and unlogged tables will be truncated. (Given the foregoing, you might * think that it would be unnecessary to emit the XLOG record at all in * this case, but we don't currently try to do that. It would certainly * cause problems at least in Hot Standby mode, where the - * KnownAssignedXids machinery requires tracking every XID assignment. It + * KnownAssignedXids machinery requires tracking every XID assignment. It * might be OK to skip it only when wal_level < hot_standby, but for now * we don't.) * @@ -1423,7 +1423,7 @@ AtSubCommit_childXids(void) * RecordTransactionAbort * * Returns latest XID among xact and its children, or InvalidTransactionId - * if the xact has no XID. (We compute that here just because it's easier.) + * if the xact has no XID. (We compute that here just because it's easier.) */ static TransactionId RecordTransactionAbort(bool isSubXact) @@ -1440,7 +1440,7 @@ RecordTransactionAbort(bool isSubXact) /* * If we haven't been assigned an XID, nobody will care whether we aborted - * or not. Hence, we're done in that case. It does not matter if we have + * or not. Hence, we're done in that case. It does not matter if we have * rels to delete (note that this routine is not responsible for actually * deleting 'em). We cannot have any child XIDs, either. */ @@ -1456,7 +1456,7 @@ RecordTransactionAbort(bool isSubXact) * We have a valid XID, so we should write an ABORT record for it. * * We do not flush XLOG to disk here, since the default assumption after a - * crash would be that we aborted, anyway. For the same reason, we don't + * crash would be that we aborted, anyway. For the same reason, we don't * need to worry about interlocking against checkpoint start. */ @@ -1624,7 +1624,7 @@ AtSubAbort_childXids(void) /* * We keep the child-XID arrays in TopTransactionContext (see - * AtSubCommit_childXids). This means we'd better free the array + * AtSubCommit_childXids). This means we'd better free the array * explicitly at abort to avoid leakage. */ if (s->childXids != NULL) @@ -1802,7 +1802,7 @@ StartTransaction(void) VirtualXactLockTableInsert(vxid); /* - * Advertise it in the proc array. We assume assignment of + * Advertise it in the proc array. We assume assignment of * LocalTransactionID is atomic, and the backendId should be set already. */ Assert(MyProc->backendId == vxid.backendId); @@ -1899,7 +1899,7 @@ CommitTransaction(void) /* * The remaining actions cannot call any user-defined code, so it's safe - * to start shutting down within-transaction services. But note that most + * to start shutting down within-transaction services. But note that most * of this stuff could still throw an error, which would switch us into * the transaction-abort path. */ @@ -2104,7 +2104,7 @@ PrepareTransaction(void) /* * The remaining actions cannot call any user-defined code, so it's safe - * to start shutting down within-transaction services. But note that most + * to start shutting down within-transaction services. But note that most * of this stuff could still throw an error, which would switch us into * the transaction-abort path. */ @@ -2224,7 +2224,7 @@ PrepareTransaction(void) XactLastRecEnd = 0; /* - * Let others know about no transaction in progress by me. This has to be + * Let others know about no transaction in progress by me. This has to be * done *after* the prepared transaction has been marked valid, else * someone may think it is unlocked and recyclable. */ @@ -2233,7 +2233,7 @@ PrepareTransaction(void) /* * This is all post-transaction cleanup. Note that if an error is raised * here, it's too late to abort the transaction. This should be just - * noncritical resource releasing. See notes in CommitTransaction. + * noncritical resource releasing. See notes in CommitTransaction. */ CallXactCallbacks(XACT_EVENT_PREPARE); @@ -2411,7 +2411,7 @@ AbortTransaction(void) ProcArrayEndTransaction(MyProc, latestXid); /* - * Post-abort cleanup. See notes in CommitTransaction() concerning + * Post-abort cleanup. See notes in CommitTransaction() concerning * ordering. We can skip all of it if the transaction failed before * creating a resource owner. */ @@ -2646,7 +2646,7 @@ CommitTransactionCommand(void) /* * Here we were in a perfectly good transaction block but the user - * told us to ROLLBACK anyway. We have to abort the transaction + * told us to ROLLBACK anyway. We have to abort the transaction * and then clean up. */ case TBLOCK_ABORT_PENDING: @@ -2666,7 +2666,7 @@ CommitTransactionCommand(void) /* * We were just issued a SAVEPOINT inside a transaction block. - * Start a subtransaction. (DefineSavepoint already did + * Start a subtransaction. (DefineSavepoint already did * PushTransaction, so as to have someplace to put the SUBBEGIN * state.) */ @@ -2870,7 +2870,7 @@ AbortCurrentTransaction(void) break; /* - * Here, we failed while trying to COMMIT. Clean up the + * Here, we failed while trying to COMMIT. Clean up the * transaction and return to idle state (we do not want to stay in * the transaction). */ @@ -2932,7 +2932,7 @@ AbortCurrentTransaction(void) /* * If we failed while trying to create a subtransaction, clean up - * the broken subtransaction and abort the parent. The same + * the broken subtransaction and abort the parent. The same * applies if we get a failure while ending a subtransaction. */ case TBLOCK_SUBBEGIN: @@ -3485,7 +3485,7 @@ UserAbortTransactionBlock(void) break; /* - * We are inside a subtransaction. Mark everything up to top + * We are inside a subtransaction. Mark everything up to top * level as exitable. */ case TBLOCK_SUBINPROGRESS: @@ -3619,7 +3619,7 @@ ReleaseSavepoint(List *options) break; /* - * We are in a non-aborted subtransaction. This is the only valid + * We are in a non-aborted subtransaction. This is the only valid * case. */ case TBLOCK_SUBINPROGRESS: @@ -3676,7 +3676,7 @@ ReleaseSavepoint(List *options) /* * Mark "commit pending" all subtransactions up to the target - * subtransaction. The actual commits will happen when control gets to + * subtransaction. The actual commits will happen when control gets to * CommitTransactionCommand. */ xact = CurrentTransactionState; @@ -3775,7 +3775,7 @@ RollbackToSavepoint(List *options) /* * Mark "abort pending" all subtransactions up to the target - * subtransaction. The actual aborts will happen when control gets to + * subtransaction. The actual aborts will happen when control gets to * CommitTransactionCommand. */ xact = CurrentTransactionState; @@ -4182,7 +4182,7 @@ CommitSubTransaction(void) CommandCounterIncrement(); /* - * Prior to 8.4 we marked subcommit in clog at this point. We now only + * Prior to 8.4 we marked subcommit in clog at this point. We now only * perform that step, if required, as part of the atomic update of the * whole transaction tree at top level commit or abort. */ @@ -4641,7 +4641,7 @@ TransStateAsString(TransState state) /* * xactGetCommittedChildren * - * Gets the list of committed children of the current transaction. The return + * Gets the list of committed children of the current transaction. The return * value is the number of child transactions. *ptr is set to point to an * array of TransactionIds. The array is allocated in TopTransactionContext; * the caller should *not* pfree() it (this is a change from pre-8.4 code!). diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index a636bb6d2b0..3406fa5a29d 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -101,7 +101,7 @@ bool XLOG_DEBUG = false; * future XLOG segment as long as there aren't already XLOGfileslop future * segments; else we'll delete it. This could be made a separate GUC * variable, but at present I think it's sufficient to hardwire it as - * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free + * 2*CheckPointSegments+1. Under normal conditions, a checkpoint will free * no more than 2*CheckPointSegments log segments, and we want to recycle all * of them; the +1 allows boundary cases to happen without wasting a * delete/create-segment cycle. @@ -190,7 +190,7 @@ static bool LocalHotStandbyActive = false; * 0: unconditionally not allowed to insert XLOG * -1: must check RecoveryInProgress(); disallow until it is false * Most processes start with -1 and transition to 1 after seeing that recovery - * is not in progress. But we can also force the value for special cases. + * is not in progress. But we can also force the value for special cases. * The coding in XLogInsertAllowed() depends on the first two of these states * being numerically the same as bool true and false. */ @@ -223,7 +223,7 @@ static bool recoveryPauseAtTarget = true; static TransactionId recoveryTargetXid; static TimestampTz recoveryTargetTime; static char *recoveryTargetName; -static int min_recovery_apply_delay = 0; +static int min_recovery_apply_delay = 0; static TimestampTz recoveryDelayUntilTime; /* options taken from recovery.conf for XLOG streaming */ @@ -261,7 +261,7 @@ static bool recoveryStopAfter; * * expectedTLEs: a list of TimeLineHistoryEntries for recoveryTargetTLI and the timelines of * its known parents, newest first (so recoveryTargetTLI is always the - * first list member). Only these TLIs are expected to be seen in the WAL + * first list member). Only these TLIs are expected to be seen in the WAL * segments we read, and indeed only these TLIs will be considered as * candidate WAL files to open at all. * @@ -290,7 +290,7 @@ XLogRecPtr XactLastRecEnd = InvalidXLogRecPtr; /* * RedoRecPtr is this backend's local copy of the REDO record pointer * (which is almost but not quite the same as a pointer to the most recent - * CHECKPOINT record). We update this from the shared-memory copy, + * CHECKPOINT record). We update this from the shared-memory copy, * XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we * hold an insertion lock). See XLogInsert for details. We are also allowed * to update from XLogCtl->RedoRecPtr if we hold the info_lck; @@ -418,11 +418,11 @@ typedef struct XLogCtlInsert slock_t insertpos_lck; /* protects CurrBytePos and PrevBytePos */ /* - * CurrBytePos is the end of reserved WAL. The next record will be inserted - * at that position. PrevBytePos is the start position of the previously - * inserted (or rather, reserved) record - it is copied to the prev-link - * of the next record. These are stored as "usable byte positions" rather - * than XLogRecPtrs (see XLogBytePosToRecPtr()). + * CurrBytePos is the end of reserved WAL. The next record will be + * inserted at that position. PrevBytePos is the start position of the + * previously inserted (or rather, reserved) record - it is copied to the + * prev-link of the next record. These are stored as "usable byte + * positions" rather than XLogRecPtrs (see XLogBytePosToRecPtr()). */ uint64 CurrBytePos; uint64 PrevBytePos; @@ -464,7 +464,7 @@ typedef struct XLogCtlInsert /* * WAL insertion locks. */ - WALInsertLockPadded *WALInsertLocks; + WALInsertLockPadded *WALInsertLocks; LWLockTranche WALInsertLockTranche; int WALInsertLockTrancheId; } XLogCtlInsert; @@ -504,10 +504,11 @@ typedef struct XLogCtlData * Latest initialized page in the cache (last byte position + 1). * * To change the identity of a buffer (and InitializedUpTo), you need to - * hold WALBufMappingLock. To change the identity of a buffer that's still - * dirty, the old page needs to be written out first, and for that you - * need WALWriteLock, and you need to ensure that there are no in-progress - * insertions to the page by calling WaitXLogInsertionsToFinish(). + * hold WALBufMappingLock. To change the identity of a buffer that's + * still dirty, the old page needs to be written out first, and for that + * you need WALWriteLock, and you need to ensure that there are no + * in-progress insertions to the page by calling + * WaitXLogInsertionsToFinish(). */ XLogRecPtr InitializedUpTo; @@ -799,8 +800,8 @@ static void rm_redo_error_callback(void *arg); static int get_sync_bit(int method); static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch, - XLogRecData *rdata, - XLogRecPtr StartPos, XLogRecPtr EndPos); + XLogRecData *rdata, + XLogRecPtr StartPos, XLogRecPtr EndPos); static void ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr); static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, @@ -860,6 +861,7 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata) if (rechdr == NULL) { static char rechdrbuf[SizeOfXLogRecord + MAXIMUM_ALIGNOF]; + rechdr = (XLogRecord *) MAXALIGN(&rechdrbuf); MemSet(rechdr, 0, SizeOfXLogRecord); } @@ -1075,12 +1077,12 @@ begin:; * record to the shared WAL buffer cache is a two-step process: * * 1. Reserve the right amount of space from the WAL. The current head of - * reserved space is kept in Insert->CurrBytePos, and is protected by - * insertpos_lck. + * reserved space is kept in Insert->CurrBytePos, and is protected by + * insertpos_lck. * * 2. Copy the record to the reserved WAL space. This involves finding the - * correct WAL buffer containing the reserved space, and copying the - * record in place. This can be done concurrently in multiple processes. + * correct WAL buffer containing the reserved space, and copying the + * record in place. This can be done concurrently in multiple processes. * * To keep track of which insertions are still in-progress, each concurrent * inserter acquires an insertion lock. In addition to just indicating that @@ -1232,6 +1234,7 @@ begin:; { TRACE_POSTGRESQL_XLOG_SWITCH(); XLogFlush(EndPos); + /* * Even though we reserved the rest of the segment for us, which is * reflected in EndPos, we return a pointer to just the end of the @@ -1272,7 +1275,7 @@ begin:; rdt_lastnormal->next = NULL; initStringInfo(&recordbuf); - for (;rdata != NULL; rdata = rdata->next) + for (; rdata != NULL; rdata = rdata->next) appendBinaryStringInfo(&recordbuf, rdata->data, rdata->len); appendStringInfoString(&buf, " - "); @@ -1514,8 +1517,8 @@ CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rdata, /* * If this was an xlog-switch, it's not enough to write the switch record, - * we also have to consume all the remaining space in the WAL segment. - * We have already reserved it for us, but we still need to make sure it's + * we also have to consume all the remaining space in the WAL segment. We + * have already reserved it for us, but we still need to make sure it's * allocated and zeroed in the WAL buffers so that when the caller (or * someone else) does XLogWrite(), it can really write out all the zeros. */ @@ -1556,14 +1559,14 @@ WALInsertLockAcquire(void) /* * It doesn't matter which of the WAL insertion locks we acquire, so try - * the one we used last time. If the system isn't particularly busy, - * it's a good bet that it's still available, and it's good to have some + * the one we used last time. If the system isn't particularly busy, it's + * a good bet that it's still available, and it's good to have some * affinity to a particular lock so that you don't unnecessarily bounce * cache lines between processes when there's no contention. * * If this is the first time through in this backend, pick a lock - * (semi-)randomly. This allows the locks to be used evenly if you have - * a lot of very short connections. + * (semi-)randomly. This allows the locks to be used evenly if you have a + * lot of very short connections. */ static int lockToTry = -1; @@ -1583,10 +1586,10 @@ WALInsertLockAcquire(void) /* * If we couldn't get the lock immediately, try another lock next * time. On a system with more insertion locks than concurrent - * inserters, this causes all the inserters to eventually migrate - * to a lock that no-one else is using. On a system with more - * inserters than locks, it still helps to distribute the inserters - * evenly across the locks. + * inserters, this causes all the inserters to eventually migrate to a + * lock that no-one else is using. On a system with more inserters + * than locks, it still helps to distribute the inserters evenly + * across the locks. */ lockToTry = (lockToTry + 1) % num_xloginsert_locks; } @@ -1604,8 +1607,8 @@ WALInsertLockAcquireExclusive(void) /* * When holding all the locks, we only update the last lock's insertingAt * indicator. The others are set to 0xFFFFFFFFFFFFFFFF, which is higher - * than any real XLogRecPtr value, to make sure that no-one blocks - * waiting on those. + * than any real XLogRecPtr value, to make sure that no-one blocks waiting + * on those. */ for (i = 0; i < num_xloginsert_locks - 1; i++) { @@ -1655,7 +1658,7 @@ WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt) * WALInsertLockAcquireExclusive. */ LWLockUpdateVar(&WALInsertLocks[num_xloginsert_locks - 1].l.lock, - &WALInsertLocks[num_xloginsert_locks - 1].l.insertingAt, + &WALInsertLocks[num_xloginsert_locks - 1].l.insertingAt, insertingAt); } else @@ -1716,15 +1719,16 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto) * Loop through all the locks, sleeping on any in-progress insert older * than 'upto'. * - * finishedUpto is our return value, indicating the point upto which - * all the WAL insertions have been finished. Initialize it to the head - * of reserved WAL, and as we iterate through the insertion locks, back it + * finishedUpto is our return value, indicating the point upto which all + * the WAL insertions have been finished. Initialize it to the head of + * reserved WAL, and as we iterate through the insertion locks, back it * out for any insertion that's still in progress. */ finishedUpto = reservedUpto; for (i = 0; i < num_xloginsert_locks; i++) { - XLogRecPtr insertingat = InvalidXLogRecPtr; + XLogRecPtr insertingat = InvalidXLogRecPtr; + do { /* @@ -1797,9 +1801,9 @@ GetXLogBuffer(XLogRecPtr ptr) } /* - * The XLog buffer cache is organized so that a page is always loaded - * to a particular buffer. That way we can easily calculate the buffer - * a given page must be loaded into, from the XLogRecPtr alone. + * The XLog buffer cache is organized so that a page is always loaded to a + * particular buffer. That way we can easily calculate the buffer a given + * page must be loaded into, from the XLogRecPtr alone. */ idx = XLogRecPtrToBufIdx(ptr); @@ -1827,8 +1831,8 @@ GetXLogBuffer(XLogRecPtr ptr) if (expectedEndPtr != endptr) { /* - * Let others know that we're finished inserting the record up - * to the page boundary. + * Let others know that we're finished inserting the record up to the + * page boundary. */ WALInsertLockUpdateInsertingAt(expectedEndPtr - XLOG_BLCKSZ); @@ -1837,7 +1841,7 @@ GetXLogBuffer(XLogRecPtr ptr) if (expectedEndPtr != endptr) elog(PANIC, "could not find WAL buffer for %X/%X", - (uint32) (ptr >> 32) , (uint32) ptr); + (uint32) (ptr >> 32), (uint32) ptr); } else { @@ -1974,8 +1978,8 @@ XLogRecPtrToBytePos(XLogRecPtr ptr) else { result = fullsegs * UsableBytesInSegment + - (XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */ - (fullpages - 1) * UsableBytesInPage; /* full pages */ + (XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */ + (fullpages - 1) * UsableBytesInPage; /* full pages */ if (offset > 0) { Assert(offset >= SizeOfXLogShortPHD); @@ -2170,8 +2174,8 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) } /* - * Now the next buffer slot is free and we can set it up to be the next - * output page. + * Now the next buffer slot is free and we can set it up to be the + * next output page. */ NewPageBeginPtr = XLogCtl->InitializedUpTo; NewPageEndPtr = NewPageBeginPtr + XLOG_BLCKSZ; @@ -2194,7 +2198,8 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) /* NewPage->xlp_info = 0; */ /* done by memset */ NewPage ->xlp_tli = ThisTimeLineID; NewPage ->xlp_pageaddr = NewPageBeginPtr; - /* NewPage->xlp_rem_len = 0; */ /* done by memset */ + + /* NewPage->xlp_rem_len = 0; */ /* done by memset */ /* * If online backup is not in progress, mark the header to indicate @@ -2202,12 +2207,12 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) * blocks. This allows the WAL archiver to know whether it is safe to * compress archived WAL data by transforming full-block records into * the non-full-block format. It is sufficient to record this at the - * page level because we force a page switch (in fact a segment switch) - * when starting a backup, so the flag will be off before any records - * can be written during the backup. At the end of a backup, the last - * page will be marked as all unsafe when perhaps only part is unsafe, - * but at worst the archiver would miss the opportunity to compress a - * few records. + * page level because we force a page switch (in fact a segment + * switch) when starting a backup, so the flag will be off before any + * records can be written during the backup. At the end of a backup, + * the last page will be marked as all unsafe when perhaps only part + * is unsafe, but at worst the archiver would miss the opportunity to + * compress a few records. */ if (!Insert->forcePageWrites) NewPage ->xlp_info |= XLP_BKP_REMOVABLE; @@ -2329,7 +2334,8 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) * if we're passed a bogus WriteRqst.Write that is past the end of the * last page that's been initialized by AdvanceXLInsertBuffer. */ - XLogRecPtr EndPtr = XLogCtl->xlblocks[curridx]; + XLogRecPtr EndPtr = XLogCtl->xlblocks[curridx]; + if (LogwrtResult.Write >= EndPtr) elog(PANIC, "xlog write request %X/%X is past end of log %X/%X", (uint32) (LogwrtResult.Write >> 32), @@ -2413,7 +2419,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) do { errno = 0; - written = write(openLogFile, from, nleft); + written = write(openLogFile, from, nleft); if (written <= 0) { if (errno == EINTR) @@ -2422,7 +2428,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) (errcode_for_file_access(), errmsg("could not write to log file %s " "at offset %u, length %zu: %m", - XLogFileNameP(ThisTimeLineID, openLogSegNo), + XLogFileNameP(ThisTimeLineID, openLogSegNo), openLogOff, nbytes))); } nleft -= written; @@ -2500,7 +2506,7 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) { /* * Could get here without iterating above loop, in which case we might - * have no open file or the wrong one. However, we do not need to + * have no open file or the wrong one. However, we do not need to * fsync more than one file. */ if (sync_method != SYNC_METHOD_OPEN && @@ -2569,7 +2575,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN) /* * If the WALWriter is sleeping, we should kick it to make it come out of - * low-power mode. Otherwise, determine whether there's a full page of + * low-power mode. Otherwise, determine whether there's a full page of * WAL available to write. */ if (!sleeping) @@ -2616,7 +2622,8 @@ XLogGetReplicationSlotMinimumLSN(void) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - XLogRecPtr retval; + XLogRecPtr retval; + SpinLockAcquire(&xlogctl->info_lck); retval = xlogctl->replicationSlotMinLSN; SpinLockRelease(&xlogctl->info_lck); @@ -2883,9 +2890,9 @@ XLogFlush(XLogRecPtr record) * We normally flush only completed blocks; but if there is nothing to do on * that basis, we check for unflushed async commits in the current incomplete * block, and flush through the latest one of those. Thus, if async commits - * are not being used, we will flush complete blocks only. We can guarantee + * are not being used, we will flush complete blocks only. We can guarantee * that async commits reach disk after at most three cycles; normally only - * one or two. (When flushing complete blocks, we allow XLogWrite to write + * one or two. (When flushing complete blocks, we allow XLogWrite to write * "flexibly", meaning it can stop at the end of the buffer ring; this makes a * difference only with very high load or long wal_writer_delay, but imposes * one extra cycle for the worst case for async commits.) @@ -3060,7 +3067,7 @@ XLogNeedsFlush(XLogRecPtr record) * log, seg: identify segment to be created/opened. * * *use_existent: if TRUE, OK to use a pre-existing file (else, any - * pre-existing file will be deleted). On return, TRUE if a pre-existing + * pre-existing file will be deleted). On return, TRUE if a pre-existing * file was used. * * use_lock: if TRUE, acquire ControlFileLock while moving file into @@ -3127,11 +3134,11 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) errmsg("could not create file \"%s\": %m", tmppath))); /* - * Zero-fill the file. We have to do this the hard way to ensure that all + * Zero-fill the file. We have to do this the hard way to ensure that all * the file space has really been allocated --- on platforms that allow * "holes" in files, just seeking to the end doesn't allocate intermediate * space. This way, we know that we have all the space and (after the - * fsync below) that all the indirect blocks are down on disk. Therefore, + * fsync below) that all the indirect blocks are down on disk. Therefore, * fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the * log file. * @@ -3223,7 +3230,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) * a different timeline) * * Currently this is only used during recovery, and so there are no locking - * considerations. But we should be just as tense as XLogFileInit to avoid + * considerations. But we should be just as tense as XLogFileInit to avoid * emplacing a bogus file. */ static void @@ -3434,7 +3441,7 @@ XLogFileOpen(XLogSegNo segno) if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open transaction log file \"%s\": %m", path))); + errmsg("could not open transaction log file \"%s\": %m", path))); return fd; } @@ -3541,13 +3548,13 @@ XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source) * the timelines listed in expectedTLEs. * * We expect curFileTLI on entry to be the TLI of the preceding file in - * sequence, or 0 if there was no predecessor. We do not allow curFileTLI + * sequence, or 0 if there was no predecessor. We do not allow curFileTLI * to go backwards; this prevents us from picking up the wrong file when a * parent timeline extends to higher segment numbers than the child we * want to read. * * If we haven't read the timeline history file yet, read it now, so that - * we know which TLIs to scan. We don't save the list in expectedTLEs, + * we know which TLIs to scan. We don't save the list in expectedTLEs, * however, unless we actually find a valid segment. That way if there is * neither a timeline history file nor a WAL segment in the archive, and * streaming replication is set up, we'll read the timeline history file @@ -3611,7 +3618,7 @@ XLogFileClose(void) /* * WAL segment files will not be re-read in normal operation, so we advise - * the OS to release any cached pages. But do not do so if WAL archiving + * the OS to release any cached pages. But do not do so if WAL archiving * or streaming is active, because archiver and walsender process could * use the cache to read the WAL segment. */ @@ -3777,7 +3784,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr) { /* * We ignore the timeline part of the XLOG segment identifiers in - * deciding whether a segment is still needed. This ensures that we + * deciding whether a segment is still needed. This ensures that we * won't prematurely remove a segment from a parent timeline. We could * probably be a little more proactive about removing segments of * non-parent timelines, but that would be a whole lot more @@ -3828,6 +3835,7 @@ RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr endptr) xlde->d_name))); #ifdef WIN32 + /* * On Windows, if another process (e.g another backend) * holds the file open in FILE_SHARE_DELETE mode, unlink @@ -4310,7 +4318,7 @@ rescanLatestTimeLine(void) * I/O routines for pg_control * * *ControlFile is a buffer in shared memory that holds an image of the - * contents of pg_control. WriteControlFile() initializes pg_control + * contents of pg_control. WriteControlFile() initializes pg_control * given a preloaded buffer, ReadControlFile() loads the buffer from * the pg_control file (during postmaster or standalone-backend startup), * and UpdateControlFile() rewrites pg_control after we modify xlog state. @@ -4715,7 +4723,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source) { /* * If we haven't yet changed the boot_val default of -1, just let it - * be. We'll fix it when XLOGShmemSize is called. + * be. We'll fix it when XLOGShmemSize is called. */ if (XLOGbuffers == -1) return true; @@ -4815,7 +4823,7 @@ XLOGShmemInit(void) /* WAL insertion locks. Ensure they're aligned to the full padded size */ allocptr += sizeof(WALInsertLockPadded) - - ((uintptr_t) allocptr) % sizeof(WALInsertLockPadded); + ((uintptr_t) allocptr) %sizeof(WALInsertLockPadded); WALInsertLocks = XLogCtl->Insert.WALInsertLocks = (WALInsertLockPadded *) allocptr; allocptr += sizeof(WALInsertLockPadded) * num_xloginsert_locks; @@ -4836,8 +4844,8 @@ XLOGShmemInit(void) /* * Align the start of the page buffers to a full xlog block size boundary. - * This simplifies some calculations in XLOG insertion. It is also required - * for O_DIRECT. + * This simplifies some calculations in XLOG insertion. It is also + * required for O_DIRECT. */ allocptr = (char *) TYPEALIGN(XLOG_BLCKSZ, allocptr); XLogCtl->pages = allocptr; @@ -5233,7 +5241,7 @@ readRecoveryCommandFile(void) const char *hintmsg; if (!parse_int(item->value, &min_recovery_apply_delay, GUC_UNIT_MS, - &hintmsg)) + &hintmsg)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("parameter \"%s\" requires a temporal value", "min_recovery_apply_delay"), @@ -5271,7 +5279,7 @@ readRecoveryCommandFile(void) /* * If user specified recovery_target_timeline, validate it or compute the - * "latest" value. We can't do this until after we've gotten the restore + * "latest" value. We can't do this until after we've gotten the restore * command and set InArchiveRecovery, because we need to fetch timeline * history files from the archive. */ @@ -5464,8 +5472,8 @@ recoveryStopsBefore(XLogRecord *record) * * when testing for an xid, we MUST test for equality only, since * transactions are numbered in the order they start, not the order - * they complete. A higher numbered xid will complete before you - * about 50% of the time... + * they complete. A higher numbered xid will complete before you about + * 50% of the time... */ stopsHere = (record->xl_xid == recoveryTargetXid); } @@ -5525,8 +5533,8 @@ recoveryStopsAfter(XLogRecord *record) record_info = record->xl_info & ~XLR_INFO_MASK; /* - * There can be many restore points that share the same name; we stop - * at the first one. + * There can be many restore points that share the same name; we stop at + * the first one. */ if (recoveryTarget == RECOVERY_TARGET_NAME && record->xl_rmid == RM_XLOG_ID && record_info == XLOG_RESTORE_POINT) @@ -5543,9 +5551,9 @@ recoveryStopsAfter(XLogRecord *record) strlcpy(recoveryStopName, recordRestorePointData->rp_name, MAXFNAMELEN); ereport(LOG, - (errmsg("recovery stopping at restore point \"%s\", time %s", - recoveryStopName, - timestamptz_to_str(recoveryStopTime)))); + (errmsg("recovery stopping at restore point \"%s\", time %s", + recoveryStopName, + timestamptz_to_str(recoveryStopTime)))); return true; } } @@ -5688,10 +5696,10 @@ recoveryApplyDelay(XLogRecord *record) /* * Is it a COMMIT record? * - * We deliberately choose not to delay aborts since they have no effect - * on MVCC. We already allow replay of records that don't have a - * timestamp, so there is already opportunity for issues caused by early - * conflicts on standbys. + * We deliberately choose not to delay aborts since they have no effect on + * MVCC. We already allow replay of records that don't have a timestamp, + * so there is already opportunity for issues caused by early conflicts on + * standbys. */ record_info = record->xl_info & ~XLR_INFO_MASK; if (!(record->xl_rmid == RM_XACT_ID && @@ -5711,7 +5719,7 @@ recoveryApplyDelay(XLogRecord *record) */ TimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime, &secs, µsecs); - if (secs <= 0 && microsecs <=0) + if (secs <= 0 && microsecs <= 0) return false; while (true) @@ -5731,15 +5739,15 @@ recoveryApplyDelay(XLogRecord *record) TimestampDifference(GetCurrentTimestamp(), recoveryDelayUntilTime, &secs, µsecs); - if (secs <= 0 && microsecs <=0) + if (secs <= 0 && microsecs <= 0) break; elog(DEBUG2, "recovery apply delay %ld seconds, %d milliseconds", - secs, microsecs / 1000); + secs, microsecs / 1000); WaitLatch(&XLogCtl->recoveryWakeupLatch, - WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, - secs * 1000L + microsecs / 1000); + WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, + secs * 1000L + microsecs / 1000); } return true; } @@ -5978,7 +5986,7 @@ StartupXLOG(void) ValidateXLOGDirectoryStructure(); /* - * Clear out any old relcache cache files. This is *necessary* if we do + * Clear out any old relcache cache files. This is *necessary* if we do * any WAL replay, since that would probably result in the cache files * being out of sync with database reality. In theory we could leave them * in place if the database had been cleanly shut down, but it seems @@ -6050,7 +6058,7 @@ StartupXLOG(void) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), errmsg("out of memory"), - errdetail("Failed while allocating an XLog reading processor."))); + errdetail("Failed while allocating an XLog reading processor."))); xlogreader->system_identifier = ControlFile->system_identifier; if (read_backup_label(&checkPointLoc, &backupEndRequired, @@ -6261,9 +6269,9 @@ StartupXLOG(void) StartupReorderBuffer(); /* - * Startup MultiXact. We need to do this early for two reasons: one - * is that we might try to access multixacts when we do tuple freezing, - * and the other is we need its state initialized because we attempt + * Startup MultiXact. We need to do this early for two reasons: one is + * that we might try to access multixacts when we do tuple freezing, and + * the other is we need its state initialized because we attempt * truncation during restartpoints. */ StartupMultiXact(); @@ -6517,9 +6525,9 @@ StartupXLOG(void) } /* - * Initialize shared variables for tracking progress of WAL replay, - * as if we had just replayed the record before the REDO location - * (or the checkpoint record itself, if it's a shutdown checkpoint). + * Initialize shared variables for tracking progress of WAL replay, as + * if we had just replayed the record before the REDO location (or the + * checkpoint record itself, if it's a shutdown checkpoint). */ SpinLockAcquire(&xlogctl->info_lck); if (checkPoint.redo < RecPtr) @@ -6646,17 +6654,17 @@ StartupXLOG(void) } /* - * If we've been asked to lag the master, wait on - * latch until enough time has passed. + * If we've been asked to lag the master, wait on latch until + * enough time has passed. */ if (recoveryApplyDelay(record)) { /* - * We test for paused recovery again here. If - * user sets delayed apply, it may be because - * they expect to pause recovery in case of - * problems, so we must test again here otherwise - * pausing during the delay-wait wouldn't work. + * We test for paused recovery again here. If user sets + * delayed apply, it may be because they expect to pause + * recovery in case of problems, so we must test again + * here otherwise pausing during the delay-wait wouldn't + * work. */ if (xlogctl->recoveryPause) recoveryPausesHere(); @@ -6893,8 +6901,8 @@ StartupXLOG(void) /* * Consider whether we need to assign a new timeline ID. * - * If we are doing an archive recovery, we always assign a new ID. This - * handles a couple of issues. If we stopped short of the end of WAL + * If we are doing an archive recovery, we always assign a new ID. This + * handles a couple of issues. If we stopped short of the end of WAL * during recovery, then we are clearly generating a new timeline and must * assign it a unique new ID. Even if we ran to the end, modifying the * current last segment is problematic because it may result in trying to @@ -6969,7 +6977,7 @@ StartupXLOG(void) /* * Tricky point here: readBuf contains the *last* block that the LastRec - * record spans, not the one it starts in. The last block is indeed the + * record spans, not the one it starts in. The last block is indeed the * one we want to use. */ if (EndOfLog % XLOG_BLCKSZ != 0) @@ -6996,9 +7004,9 @@ StartupXLOG(void) else { /* - * There is no partial block to copy. Just set InitializedUpTo, - * and let the first attempt to insert a log record to initialize - * the next buffer. + * There is no partial block to copy. Just set InitializedUpTo, and + * let the first attempt to insert a log record to initialize the next + * buffer. */ XLogCtl->InitializedUpTo = EndOfLog; } @@ -7162,7 +7170,7 @@ StartupXLOG(void) XLogReportParameters(); /* - * All done. Allow backends to write WAL. (Although the bool flag is + * All done. Allow backends to write WAL. (Although the bool flag is * probably atomic in itself, we use the info_lck here to ensure that * there are no race conditions concerning visibility of other recent * updates to shared memory.) @@ -7200,7 +7208,7 @@ StartupXLOG(void) static void CheckRecoveryConsistency(void) { - XLogRecPtr lastReplayedEndRecPtr; + XLogRecPtr lastReplayedEndRecPtr; /* * During crash recovery, we don't reach a consistent state until we've @@ -7322,7 +7330,7 @@ RecoveryInProgress(void) /* * Initialize TimeLineID and RedoRecPtr when we discover that recovery * is finished. InitPostgres() relies upon this behaviour to ensure - * that InitXLOGAccess() is called at backend startup. (If you change + * that InitXLOGAccess() is called at backend startup. (If you change * this, see also LocalSetXLogInsertAllowed.) */ if (!LocalRecoveryInProgress) @@ -7335,6 +7343,7 @@ RecoveryInProgress(void) pg_memory_barrier(); InitXLOGAccess(); } + /* * Note: We don't need a memory barrier when we're still in recovery. * We might exit recovery immediately after return, so the caller @@ -7594,7 +7603,7 @@ GetRedoRecPtr(void) { /* use volatile pointer to prevent code rearrangement */ volatile XLogCtlData *xlogctl = XLogCtl; - XLogRecPtr ptr; + XLogRecPtr ptr; /* * The possibly not up-to-date copy in XlogCtl is enough. Even if we @@ -7983,7 +7992,7 @@ CreateCheckPoint(int flags) /* * If this isn't a shutdown or forced checkpoint, and we have not inserted * any XLOG records since the start of the last checkpoint, skip the - * checkpoint. The idea here is to avoid inserting duplicate checkpoints + * checkpoint. The idea here is to avoid inserting duplicate checkpoints * when the system is idle. That wastes log space, and more importantly it * exposes us to possible loss of both current and previous checkpoint * records if the machine crashes just as we're writing the update. @@ -8120,7 +8129,7 @@ CreateCheckPoint(int flags) * performing those groups of actions. * * One example is end of transaction, so we must wait for any transactions - * that are currently in commit critical sections. If an xact inserted + * that are currently in commit critical sections. If an xact inserted * its commit record into XLOG just before the REDO point, then a crash * restart from the REDO point would not replay that record, which means * that our flushing had better include the xact's update of pg_clog. So @@ -8131,9 +8140,8 @@ CreateCheckPoint(int flags) * fuzzy: it is possible that we will wait for xacts we didn't really need * to wait for. But the delay should be short and it seems better to make * checkpoint take a bit longer than to hold off insertions longer than - * necessary. - * (In fact, the whole reason we have this issue is that xact.c does - * commit record XLOG insertion and clog update as two separate steps + * necessary. (In fact, the whole reason we have this issue is that xact.c + * does commit record XLOG insertion and clog update as two separate steps * protected by different locks, but again that seems best on grounds of * minimizing lock contention.) * @@ -8280,9 +8288,9 @@ CreateCheckPoint(int flags) /* * Truncate pg_subtrans if possible. We can throw away all data before - * the oldest XMIN of any running transaction. No future transaction will + * the oldest XMIN of any running transaction. No future transaction will * attempt to reference any pg_subtrans entry older than that (see Asserts - * in subtrans.c). During recovery, though, we mustn't do this because + * in subtrans.c). During recovery, though, we mustn't do this because * StartupSUBTRANS hasn't been called yet. */ if (!RecoveryInProgress()) @@ -8600,11 +8608,11 @@ CreateRestartPoint(int flags) _logSegNo--; /* - * Try to recycle segments on a useful timeline. If we've been promoted - * since the beginning of this restartpoint, use the new timeline - * chosen at end of recovery (RecoveryInProgress() sets ThisTimeLineID - * in that case). If we're still in recovery, use the timeline we're - * currently replaying. + * Try to recycle segments on a useful timeline. If we've been + * promoted since the beginning of this restartpoint, use the new + * timeline chosen at end of recovery (RecoveryInProgress() sets + * ThisTimeLineID in that case). If we're still in recovery, use the + * timeline we're currently replaying. * * There is no guarantee that the WAL segments will be useful on the * current timeline; if recovery proceeds to a new timeline right @@ -8636,9 +8644,9 @@ CreateRestartPoint(int flags) /* * Truncate pg_subtrans if possible. We can throw away all data before - * the oldest XMIN of any running transaction. No future transaction will + * the oldest XMIN of any running transaction. No future transaction will * attempt to reference any pg_subtrans entry older than that (see Asserts - * in subtrans.c). When hot standby is disabled, though, we mustn't do + * in subtrans.c). When hot standby is disabled, though, we mustn't do * this because StartupSUBTRANS hasn't been called yet. */ if (EnableHotStandby) @@ -8697,7 +8705,7 @@ KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo) /* then check whether slots limit removal further */ if (max_replication_slots > 0 && keep != InvalidXLogRecPtr) { - XLogRecPtr slotSegNo; + XLogRecPtr slotSegNo; XLByteToSeg(keep, slotSegNo); @@ -8730,7 +8738,7 @@ XLogPutNextOid(Oid nextOid) * We need not flush the NEXTOID record immediately, because any of the * just-allocated OIDs could only reach disk as part of a tuple insert or * update that would have its own XLOG record that must follow the NEXTOID - * record. Therefore, the standard buffer LSN interlock applied to those + * record. Therefore, the standard buffer LSN interlock applied to those * records will ensure no such OID reaches disk before the NEXTOID record * does. * @@ -8859,8 +8867,9 @@ XLogSaveBufferForHint(Buffer buffer, bool buffer_std) * lsn updates. We assume pd_lower/upper cannot be changed without an * exclusive lock, so the contents bkp are not racy. * - * With buffer_std set to false, XLogCheckBuffer() sets hole_length and - * hole_offset to 0; so the following code is safe for either case. + * With buffer_std set to false, XLogCheckBuffer() sets hole_length + * and hole_offset to 0; so the following code is safe for either + * case. */ memcpy(copied_buffer, origdata, bkpb.hole_offset); memcpy(copied_buffer + bkpb.hole_offset, @@ -9072,7 +9081,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) /* * We used to try to take the maximum of ShmemVariableCache->nextOid * and the recorded nextOid, but that fails if the OID counter wraps - * around. Since no OID allocation should be happening during replay + * around. Since no OID allocation should be happening during replay * anyway, better to just believe the record exactly. We still take * OidGenLock while setting the variable, just in case. */ @@ -9262,10 +9271,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) BkpBlock bkpb; /* - * Full-page image (FPI) records contain a backup block stored "inline" - * in the normal data since the locking when writing hint records isn't - * sufficient to use the normal backup block mechanism, which assumes - * exclusive lock on the buffer supplied. + * Full-page image (FPI) records contain a backup block stored + * "inline" in the normal data since the locking when writing hint + * records isn't sufficient to use the normal backup block mechanism, + * which assumes exclusive lock on the buffer supplied. * * Since the only change in these backup block are hint bits, there * are no recovery conflicts generated. @@ -9415,7 +9424,7 @@ get_sync_bit(int method) /* * Optimize writes by bypassing kernel cache with O_DIRECT when using - * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are + * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are * disabled, otherwise the archive command or walsender process will read * the WAL soon after writing it, which is guaranteed to cause a physical * read if we bypassed the kernel cache. We also skip the @@ -9619,7 +9628,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, * during an on-line backup even if not doing so at other times, because * it's quite possible for the backup dump to obtain a "torn" (partially * written) copy of a database page if it reads the page concurrently with - * our write to the same page. This can be fixed as long as the first + * our write to the same page. This can be fixed as long as the first * write to the page in the WAL sequence is a full-page write. Hence, we * turn on forcePageWrites and then force a CHECKPOINT, to ensure there * are no dirty pages in shared memory that might get dumped while the @@ -9663,7 +9672,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, * old timeline IDs. That would otherwise happen if you called * pg_start_backup() right after restoring from a PITR archive: the * first WAL segment containing the startup checkpoint has pages in - * the beginning with the old timeline ID. That can cause trouble at + * the beginning with the old timeline ID. That can cause trouble at * recovery: we won't have a history file covering the old timeline if * pg_xlog directory was not included in the base backup and the WAL * archive was cleared too before starting the backup. @@ -9686,7 +9695,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, bool checkpointfpw; /* - * Force a CHECKPOINT. Aside from being necessary to prevent torn + * Force a CHECKPOINT. Aside from being necessary to prevent torn * page problems, this guarantees that two successive backup runs * will have different checkpoint positions and hence different * history file names, even if nothing happened in between. @@ -10339,7 +10348,7 @@ GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli) * * If we see a backup_label during recovery, we assume that we are recovering * from a backup dump file, and we therefore roll forward from the checkpoint - * identified by the label file, NOT what pg_control says. This avoids the + * identified by the label file, NOT what pg_control says. This avoids the * problem that pg_control might have been archived one or more checkpoints * later than the start of the dump, and so if we rely on it as the start * point, we will fail to restore a consistent database state. @@ -10686,7 +10695,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * Standby mode is implemented by a state machine: * * 1. Read from either archive or pg_xlog (XLOG_FROM_ARCHIVE), or just - * pg_xlog (XLOG_FROM_XLOG) + * pg_xlog (XLOG_FROM_XLOG) * 2. Check trigger file * 3. Read from primary server via walreceiver (XLOG_FROM_STREAM) * 4. Rescan timelines @@ -10887,8 +10896,8 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * file from pg_xlog. */ readFile = XLogFileReadAnyTLI(readSegNo, DEBUG2, - currentSource == XLOG_FROM_ARCHIVE ? XLOG_FROM_ANY : - currentSource); + currentSource == XLOG_FROM_ARCHIVE ? XLOG_FROM_ANY : + currentSource); if (readFile >= 0) return true; /* success! */ @@ -10945,11 +10954,11 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, if (havedata) { /* - * Great, streamed far enough. Open the file if it's + * Great, streamed far enough. Open the file if it's * not open already. Also read the timeline history * file if we haven't initialized timeline history * yet; it should be streamed over and present in - * pg_xlog by now. Use XLOG_FROM_STREAM so that + * pg_xlog by now. Use XLOG_FROM_STREAM so that * source info is set correctly and XLogReceiptTime * isn't changed. */ @@ -11014,7 +11023,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, HandleStartupProcInterrupts(); } - return false; /* not reached */ + return false; /* not reached */ } /* @@ -11022,9 +11031,9 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess, * in the current WAL page, previously read by XLogPageRead(). * * 'emode' is the error mode that would be used to report a file-not-found - * or legitimate end-of-WAL situation. Generally, we use it as-is, but if + * or legitimate end-of-WAL situation. Generally, we use it as-is, but if * we're retrying the exact same record that we've tried previously, only - * complain the first time to keep the noise down. However, we only do when + * complain the first time to keep the noise down. However, we only do when * reading from pg_xlog, because we don't expect any invalid records in archive * or in records streamed from master. Files in the archive should be complete, * and we should never hit the end of WAL because we stop and wait for more WAL diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c index a43793382e4..37745dce890 100644 --- a/src/backend/access/transam/xlogarchive.c +++ b/src/backend/access/transam/xlogarchive.c @@ -300,8 +300,8 @@ RestoreArchivedFile(char *path, const char *xlogfname, signaled = WIFSIGNALED(rc) || WEXITSTATUS(rc) > 125; ereport(signaled ? FATAL : DEBUG2, - (errmsg("could not restore file \"%s\" from archive: %s", - xlogfname, wait_result_to_str(rc)))); + (errmsg("could not restore file \"%s\" from archive: %s", + xlogfname, wait_result_to_str(rc)))); not_available: diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c index 5f8d65514c1..8a87581e79c 100644 --- a/src/backend/access/transam/xlogfuncs.c +++ b/src/backend/access/transam/xlogfuncs.c @@ -429,7 +429,7 @@ pg_is_in_recovery(PG_FUNCTION_ARGS) Datum pg_xlog_location_diff(PG_FUNCTION_ARGS) { - Datum result; + Datum result; result = DirectFunctionCall2(pg_lsn_mi, PG_GETARG_DATUM(0), diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index eff2081afe8..f06daa2638f 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -199,7 +199,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg) randAccess = true; /* - * RecPtr is pointing to end+1 of the previous WAL record. If we're + * RecPtr is pointing to end+1 of the previous WAL record. If we're * at a page boundary, no more records can fit on the current page. We * must skip over the page header, but we can't do that until we've * read in the page, since the header size is variable. @@ -277,7 +277,7 @@ XLogReadRecord(XLogReaderState *state, XLogRecPtr RecPtr, char **errormsg) /* * If the whole record header is on this page, validate it immediately. * Otherwise do just a basic sanity check on xl_tot_len, and validate the - * rest of the header after reading it from the next page. The xl_tot_len + * rest of the header after reading it from the next page. The xl_tot_len * check is necessary here to ensure that we enter the "Need to reassemble * record" code path below; otherwise we might fail to apply * ValidXLogRecordHeader at all. @@ -572,7 +572,7 @@ err: * Validate an XLOG record header. * * This is just a convenience subroutine to avoid duplicated code in - * XLogReadRecord. It's not intended for use from anywhere else. + * XLogReadRecord. It's not intended for use from anywhere else. */ static bool ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, @@ -661,7 +661,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr, * data to read in) until we've checked the CRCs. * * We assume all of the record (that is, xl_tot_len bytes) has been read - * into memory at *record. Also, ValidXLogRecordHeader() has accepted the + * into memory at *record. Also, ValidXLogRecordHeader() has accepted the * record's header, which means in particular that xl_tot_len is at least * SizeOfXlogRecord, so it is safe to fetch xl_len. */ diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index c36e71d8066..4a542e65ca2 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -76,7 +76,7 @@ int numattr; /* number of attributes for cur. rel */ * in the core "bootstrapped" catalogs. * * XXX several of these input/output functions do catalog scans - * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some + * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some * order dependencies in the catalog creation process. */ struct typinfo @@ -374,9 +374,9 @@ AuxiliaryProcessMain(int argc, char *argv[]) #endif /* - * Assign the ProcSignalSlot for an auxiliary process. Since it + * Assign the ProcSignalSlot for an auxiliary process. Since it * doesn't have a BackendId, the slot is statically allocated based on - * the auxiliary process type (MyAuxProcType). Backends use slots + * the auxiliary process type (MyAuxProcType). Backends use slots * indexed in the range from 1 to MaxBackends (inclusive), so we use * MaxBackends + AuxProcType + 1 as the index of the slot for an * auxiliary process. @@ -561,7 +561,7 @@ bootstrap_signals(void) } /* - * Begin shutdown of an auxiliary process. This is approximately the equivalent + * Begin shutdown of an auxiliary process. This is approximately the equivalent * of ShutdownPostgres() in postinit.c. We can't run transactions in an * auxiliary process, so most of the work of AbortTransaction() is not needed, * but we do need to make sure we've released any LWLocks we are holding. @@ -876,7 +876,7 @@ cleanup(void) * and not an OID at all, until the first reference to a type not known in * TypInfo[]. At that point it will read and cache pg_type in the Typ array, * and subsequently return a real OID (and set the global pointer Ap to - * point at the found row in Typ). So caller must check whether Typ is + * point at the found row in Typ). So caller must check whether Typ is * still NULL to determine what the return value is! * ---------------- */ @@ -1073,9 +1073,9 @@ MapArrayTypeName(char *s) * * At bootstrap time, we define a bunch of indexes on system catalogs. * We postpone actually building the indexes until just before we're - * finished with initialization, however. This is because the indexes + * finished with initialization, however. This is because the indexes * themselves have catalog entries, and those have to be included in the - * indexes on those catalogs. Doing it in two phases is the simplest + * indexes on those catalogs. Doing it in two phases is the simplest * way of making sure the indexes have the right contents at the end. */ void @@ -1088,7 +1088,7 @@ index_register(Oid heap, /* * XXX mao 10/31/92 -- don't gc index reldescs, associated info at - * bootstrap time. we'll declare the indexes now, but want to create them + * bootstrap time. we'll declare the indexes now, but want to create them * later. */ diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index f4fc12d83ac..d9745cabd24 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -313,7 +313,7 @@ restrict_and_check_grant(bool is_grant, AclMode avail_goptions, bool all_privs, /* * Restrict the operation to what we can actually grant or revoke, and - * issue a warning if appropriate. (For REVOKE this isn't quite what the + * issue a warning if appropriate. (For REVOKE this isn't quite what the * spec says to do: the spec seems to want a warning only if no privilege * bits actually change in the ACL. In practice that behavior seems much * too noisy, as well as inconsistent with the GRANT case.) @@ -1092,7 +1092,7 @@ SetDefaultACL(InternalDefaultACL *iacls) /* * The default for a global entry is the hard-wired default ACL for the - * particular object type. The default for non-global entries is an empty + * particular object type. The default for non-global entries is an empty * ACL. This must be so because global entries replace the hard-wired * defaults, while others are added on. */ @@ -1662,7 +1662,7 @@ ExecGrant_Attribute(InternalGrant *istmt, Oid relOid, const char *relname, * If the updated ACL is empty, we can set attacl to null, and maybe even * avoid an update of the pg_attribute row. This is worth testing because * we'll come through here multiple times for any relation-level REVOKE, - * even if there were never any column GRANTs. Note we are assuming that + * even if there were never any column GRANTs. Note we are assuming that * the "default" ACL state for columns is empty. */ if (ACL_NUM(new_acl) > 0) @@ -1787,7 +1787,7 @@ ExecGrant_Relation(InternalGrant *istmt) { /* * Mention the object name because the user needs to know - * which operations succeeded. This is required because + * which operations succeeded. This is required because * WARNING allows the command to continue. */ ereport(WARNING, @@ -1816,7 +1816,7 @@ ExecGrant_Relation(InternalGrant *istmt) /* * Set up array in which we'll accumulate any column privilege bits - * that need modification. The array is indexed such that entry [0] + * that need modification. The array is indexed such that entry [0] * corresponds to FirstLowInvalidHeapAttributeNumber. */ num_col_privileges = pg_class_tuple->relnatts - FirstLowInvalidHeapAttributeNumber + 1; @@ -3507,7 +3507,7 @@ pg_aclmask(AclObjectKind objkind, Oid table_oid, AttrNumber attnum, Oid roleid, * * Note: this considers only privileges granted specifically on the column. * It is caller's responsibility to take relation-level privileges into account - * as appropriate. (For the same reason, we have no special case for + * as appropriate. (For the same reason, we have no special case for * superuser-ness here.) */ AclMode @@ -3620,12 +3620,12 @@ pg_class_aclmask(Oid table_oid, Oid roleid, /* * Deny anyone permission to update a system catalog unless - * pg_authid.rolcatupdate is set. (This is to let superusers protect + * pg_authid.rolcatupdate is set. (This is to let superusers protect * themselves from themselves.) Also allow it if allowSystemTableMods. * * As of 7.4 we have some updatable system views; those shouldn't be * protected in this way. Assume the view rules can take care of - * themselves. ACL_USAGE is if we ever have system sequences. + * themselves. ACL_USAGE is if we ever have system sequences. */ if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE | ACL_TRUNCATE | ACL_USAGE)) && IsSystemClass(table_oid, classForm) && @@ -4331,7 +4331,7 @@ pg_attribute_aclcheck_all(Oid table_oid, Oid roleid, AclMode mode, ReleaseSysCache(classTuple); /* - * Initialize result in case there are no non-dropped columns. We want to + * Initialize result in case there are no non-dropped columns. We want to * report failure in such cases for either value of 'how'. */ result = ACLCHECK_NO_PRIV; diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c index 3ec360c2be5..2eb2c2fddf6 100644 --- a/src/backend/catalog/catalog.c +++ b/src/backend/catalog/catalog.c @@ -48,7 +48,7 @@ * IsSystemRelation * True iff the relation is either a system catalog or toast table. * By a system catalog, we mean one that created in the pg_catalog schema - * during initdb. User-created relations in pg_catalog don't count as + * during initdb. User-created relations in pg_catalog don't count as * system catalogs. * * NB: TOAST relations are considered system relations by this test @@ -100,7 +100,7 @@ IsCatalogRelation(Relation relation) bool IsCatalogClass(Oid relid, Form_pg_class reltuple) { - Oid relnamespace = reltuple->relnamespace; + Oid relnamespace = reltuple->relnamespace; /* * Never consider relations outside pg_catalog/pg_toast to be catalog @@ -268,7 +268,7 @@ IsSharedRelation(Oid relationId) * Since the OID is not immediately inserted into the table, there is a * race condition here; but a problem could occur only if someone else * managed to cycle through 2^32 OIDs and generate the same OID before we - * finish inserting our row. This seems unlikely to be a problem. Note + * finish inserting our row. This seems unlikely to be a problem. Note * that if we had to *commit* the row to end the race condition, the risk * would be rather higher; therefore we use SnapshotDirty in the test, * so that we will see uncommitted rows. @@ -314,7 +314,7 @@ GetNewOid(Relation relation) * This is exported separately because there are cases where we want to use * an index that will not be recognized by RelationGetOidIndex: TOAST tables * have indexes that are usable, but have multiple columns and are on - * ordinary columns rather than a true OID column. This code will work + * ordinary columns rather than a true OID column. This code will work * anyway, so long as the OID is the index's first column. The caller must * pass in the actual heap attnum of the OID column, however. * diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index e5116693cf7..d41ba49f877 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -261,7 +261,7 @@ performDeletion(const ObjectAddress *object, depRel = heap_open(DependRelationId, RowExclusiveLock); /* - * Acquire deletion lock on the target object. (Ideally the caller has + * Acquire deletion lock on the target object. (Ideally the caller has * done this already, but many places are sloppy about it.) */ AcquireDeletionLock(object, 0); @@ -373,7 +373,7 @@ performMultipleDeletions(const ObjectAddresses *objects, /* * deleteWhatDependsOn: attempt to drop everything that depends on the - * specified object, though not the object itself. Behavior is always + * specified object, though not the object itself. Behavior is always * CASCADE. * * This is currently used only to clean out the contents of a schema @@ -399,7 +399,7 @@ deleteWhatDependsOn(const ObjectAddress *object, depRel = heap_open(DependRelationId, RowExclusiveLock); /* - * Acquire deletion lock on the target object. (Ideally the caller has + * Acquire deletion lock on the target object. (Ideally the caller has * done this already, but many places are sloppy about it.) */ AcquireDeletionLock(object, 0); @@ -441,7 +441,7 @@ deleteWhatDependsOn(const ObjectAddress *object, * Since this function is currently only used to clean out temporary * schemas, we pass PERFORM_DELETION_INTERNAL here, indicating that * the operation is an automatic system operation rather than a user - * action. If, in the future, this function is used for other + * action. If, in the future, this function is used for other * purposes, we might need to revisit this. */ deleteOneObject(thisobj, &depRel, PERFORM_DELETION_INTERNAL); @@ -458,7 +458,7 @@ deleteWhatDependsOn(const ObjectAddress *object, * * For every object that depends on the starting object, acquire a deletion * lock on the object, add it to targetObjects (if not already there), - * and recursively find objects that depend on it. An object's dependencies + * and recursively find objects that depend on it. An object's dependencies * will be placed into targetObjects before the object itself; this means * that the finished list's order represents a safe deletion order. * @@ -510,7 +510,7 @@ findDependentObjects(const ObjectAddress *object, * will not break a loop at an internal dependency: if we enter the loop * at an "owned" object we will switch and start at the "owning" object * instead. We could probably hack something up to avoid breaking at an - * auto dependency, too, if we had to. However there are no known cases + * auto dependency, too, if we had to. However there are no known cases * where that would be necessary. */ if (stack_address_present_add_flags(object, flags, stack)) @@ -531,7 +531,7 @@ findDependentObjects(const ObjectAddress *object, /* * The target object might be internally dependent on some other object * (its "owner"), and/or be a member of an extension (also considered its - * owner). If so, and if we aren't recursing from the owning object, we + * owner). If so, and if we aren't recursing from the owning object, we * have to transform this deletion request into a deletion request of the * owning object. (We'll eventually recurse back to this object, but the * owning object has to be visited first so it will be deleted after.) The @@ -594,7 +594,7 @@ findDependentObjects(const ObjectAddress *object, /* * Exception 1a: if the owning object is listed in * pendingObjects, just release the caller's lock and - * return. We'll eventually complete the DROP when we + * return. We'll eventually complete the DROP when we * reach that entry in the pending list. */ if (pendingObjects && @@ -647,7 +647,7 @@ findDependentObjects(const ObjectAddress *object, * owning object. * * First, release caller's lock on this object and get - * deletion lock on the owning object. (We must release + * deletion lock on the owning object. (We must release * caller's lock to avoid deadlock against a concurrent * deletion of the owning object.) */ @@ -809,7 +809,7 @@ findDependentObjects(const ObjectAddress *object, systable_endscan(scan); /* - * Finally, we can add the target object to targetObjects. Be careful to + * Finally, we can add the target object to targetObjects. Be careful to * include any flags that were passed back down to us from inner recursion * levels. */ @@ -864,7 +864,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects, /* * We limit the number of dependencies reported to the client to * MAX_REPORTED_DEPS, since client software may not deal well with - * enormous error strings. The server log always gets a full report. + * enormous error strings. The server log always gets a full report. */ #define MAX_REPORTED_DEPS 100 @@ -897,7 +897,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects, DEPFLAG_EXTENSION)) { /* - * auto-cascades are reported at DEBUG2, not msglevel. We don't + * auto-cascades are reported at DEBUG2, not msglevel. We don't * try to combine them with the regular message because the * results are too confusing when client_min_messages and * log_min_messages are different. @@ -1079,7 +1079,7 @@ deleteOneObject(const ObjectAddress *object, Relation *depRel, int flags) systable_endscan(scan); /* - * Delete shared dependency references related to this object. Again, if + * Delete shared dependency references related to this object. Again, if * subId = 0, remove records for sub-objects too. */ deleteSharedDependencyRecordsFor(object->classId, object->objectId, @@ -1344,13 +1344,13 @@ recordDependencyOnExpr(const ObjectAddress *depender, * recordDependencyOnSingleRelExpr - find expression dependencies * * As above, but only one relation is expected to be referenced (with - * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a + * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a * range table. An additional frammish is that dependencies on that * relation (or its component columns) will be marked with 'self_behavior', * whereas 'behavior' is used for everything else. * * NOTE: the caller should ensure that a whole-table dependency on the - * specified relation is created separately, if one is needed. In particular, + * specified relation is created separately, if one is needed. In particular, * a whole-row Var "relation.*" will not cause this routine to emit any * dependency item. This is appropriate behavior for subexpressions of an * ordinary query, so other cases need to cope as necessary. @@ -1470,7 +1470,7 @@ find_expr_references_walker(Node *node, /* * A whole-row Var references no specific columns, so adds no new - * dependency. (We assume that there is a whole-table dependency + * dependency. (We assume that there is a whole-table dependency * arising from each underlying rangetable entry. While we could * record such a dependency when finding a whole-row Var that * references a relation directly, it's quite unclear how to extend @@ -1529,7 +1529,7 @@ find_expr_references_walker(Node *node, /* * If it's a regclass or similar literal referring to an existing - * object, add a reference to that object. (Currently, only the + * object, add a reference to that object. (Currently, only the * regclass and regconfig cases have any likely use, but we may as * well handle all the OID-alias datatypes consistently.) */ @@ -2130,7 +2130,7 @@ object_address_present_add_flags(const ObjectAddress *object, { /* * We get here if we find a need to delete a column after - * having already decided to drop its whole table. Obviously + * having already decided to drop its whole table. Obviously * we no longer need to drop the column. But don't plaster * its flags on the table. */ diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 2cf4bc033c8..33eef9f1caf 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -21,7 +21,7 @@ * the old heap_create_with_catalog, amcreate, and amdestroy. * those routines will soon call these routines using the function * manager, - * just like the poorly named "NewXXX" routines do. The + * just like the poorly named "NewXXX" routines do. The * "New" routines are all going to die soon, once and for all! * -cim 1/13/91 * @@ -199,7 +199,7 @@ SystemAttributeDefinition(AttrNumber attno, bool relhasoids) /* * If the given name is a system attribute name, return a Form_pg_attribute - * pointer for a prototype definition. If not, return NULL. + * pointer for a prototype definition. If not, return NULL. */ Form_pg_attribute SystemAttributeByName(const char *attname, bool relhasoids) @@ -527,7 +527,7 @@ CheckAttributeType(const char *attname, int i; /* - * Check for self-containment. Eventually we might be able to allow + * Check for self-containment. Eventually we might be able to allow * this (just return without complaint, if so) but it's not clear how * many other places would require anti-recursion defenses before it * would be safe to allow tables to contain their own rowtype. @@ -590,7 +590,7 @@ CheckAttributeType(const char *attname, * attribute to insert (but we ignore attacl and attoptions, which are always * initialized to NULL). * - * indstate is the index state for CatalogIndexInsert. It can be passed as + * indstate is the index state for CatalogIndexInsert. It can be passed as * NULL, in which case we'll fetch the necessary info. (Don't do this when * inserting multiple attributes, because it's a tad more expensive.) */ @@ -757,7 +757,7 @@ AddNewAttributeTuples(Oid new_rel_oid, * Tuple data is taken from new_rel_desc->rd_rel, except for the * variable-width fields which are not present in a cached reldesc. * relacl and reloptions are passed in Datum form (to avoid having - * to reference the data types in heap.h). Pass (Datum) 0 to set them + * to reference the data types in heap.h). Pass (Datum) 0 to set them * to NULL. * -------------------------------- */ @@ -816,7 +816,7 @@ InsertPgClassTuple(Relation pg_class_desc, tup = heap_form_tuple(RelationGetDescr(pg_class_desc), values, nulls); /* - * The new tuple must have the oid already chosen for the rel. Sure would + * The new tuple must have the oid already chosen for the rel. Sure would * be embarrassing to do this sort of thing in polite company. */ HeapTupleSetOid(tup, new_rel_oid); @@ -1372,8 +1372,8 @@ heap_create_init_fork(Relation rel) * RelationRemoveInheritance * * Formerly, this routine checked for child relations and aborted the - * deletion if any were found. Now we rely on the dependency mechanism - * to check for or delete child relations. By the time we get here, + * deletion if any were found. Now we rely on the dependency mechanism + * to check for or delete child relations. By the time we get here, * there are no children and we need only remove any pg_inherits rows * linking this relation to its parent(s). */ @@ -1658,7 +1658,7 @@ RemoveAttrDefault(Oid relid, AttrNumber attnum, /* * RemoveAttrDefaultById * - * Remove a pg_attrdef entry specified by OID. This is the guts of + * Remove a pg_attrdef entry specified by OID. This is the guts of * attribute-default removal. Note it should be called via performDeletion, * not directly. */ @@ -2065,7 +2065,7 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal) /* * Deparsing of constraint expressions will fail unless the just-created - * pg_attribute tuples for this relation are made visible. So, bump the + * pg_attribute tuples for this relation are made visible. So, bump the * command counter. CAUTION: this will cause a relcache entry rebuild. */ CommandCounterIncrement(); @@ -2117,7 +2117,7 @@ StoreConstraints(Relation rel, List *cooked_constraints, bool is_internal) * the default and constraint expressions added to the relation. * * NB: caller should have opened rel with AccessExclusiveLock, and should - * hold that lock till end of transaction. Also, we assume the caller has + * hold that lock till end of transaction. Also, we assume the caller has * done a CommandCounterIncrement if necessary to make the relation's catalog * tuples visible. */ @@ -2262,7 +2262,7 @@ AddRelationNewConstraints(Relation rel, checknames = lappend(checknames, ccname); /* - * Check against pre-existing constraints. If we are allowed to + * Check against pre-existing constraints. If we are allowed to * merge with an existing constraint, there's no more to do here. * (We omit the duplicate constraint from the result, which is * what ATAddCheckConstraint wants.) @@ -2279,7 +2279,7 @@ AddRelationNewConstraints(Relation rel, * column constraint and "tab_check" for a table constraint. We * no longer have any info about the syntactic positioning of the * constraint phrase, so we approximate this by seeing whether the - * expression references more than one column. (If the user + * expression references more than one column. (If the user * played by the rules, the result is the same...) * * Note: pull_var_clause() doesn't descend into sublinks, but we @@ -2664,7 +2664,7 @@ RemoveStatistics(Oid relid, AttrNumber attnum) * with the heap relation to zero tuples. * * The routine will truncate and then reconstruct the indexes on - * the specified relation. Caller must hold exclusive lock on rel. + * the specified relation. Caller must hold exclusive lock on rel. */ static void RelationTruncateIndexes(Relation heapRelation) @@ -2704,7 +2704,7 @@ RelationTruncateIndexes(Relation heapRelation) * This routine deletes all data within all the specified relations. * * This is not transaction-safe! There is another, transaction-safe - * implementation in commands/tablecmds.c. We now use this only for + * implementation in commands/tablecmds.c. We now use this only for * ON COMMIT truncation of temporary tables, where it doesn't matter. */ void @@ -2813,7 +2813,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables) return; /* - * Otherwise, must scan pg_constraint. We make one pass with all the + * Otherwise, must scan pg_constraint. We make one pass with all the * relations considered; if this finds nothing, then all is well. */ dependents = heap_truncate_find_FKs(oids); @@ -2874,7 +2874,7 @@ heap_truncate_check_FKs(List *relations, bool tempTables) * behavior to change depending on chance locations of rows in pg_constraint.) * * Note: caller should already have appropriate lock on all rels mentioned - * in relationIds. Since adding or dropping an FK requires exclusive lock + * in relationIds. Since adding or dropping an FK requires exclusive lock * on both rels, this ensures that the answer will be stable. */ List * diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index c932c833421..80acc0ec27f 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -411,7 +411,7 @@ ConstructTupleDescriptor(Relation heapRelation, /* * We do not yet have the correct relation OID for the index, so just - * set it invalid for now. InitializeAttributeOids() will fix it + * set it invalid for now. InitializeAttributeOids() will fix it * later. */ to->attrelid = InvalidOid; @@ -651,7 +651,7 @@ UpdateIndexRelation(Oid indexoid, * heapRelation: table to build index on (suitably locked by caller) * indexRelationName: what it say * indexRelationId: normally, pass InvalidOid to let this routine - * generate an OID for the index. During bootstrap this may be + * generate an OID for the index. During bootstrap this may be * nonzero to specify a preselected OID. * relFileNode: normally, pass InvalidOid to get new storage. May be * nonzero to attach an existing valid build. @@ -670,7 +670,7 @@ UpdateIndexRelation(Oid indexoid, * allow_system_table_mods: allow table to be a system catalog * skip_build: true to skip the index_build() step for the moment; caller * must do it later (typically via reindex_index()) - * concurrent: if true, do not lock the table against writers. The index + * concurrent: if true, do not lock the table against writers. The index * will be marked "invalid" and the caller must take additional steps * to fix it up. * is_internal: if true, post creation hook for new index @@ -960,7 +960,7 @@ index_create(Relation heapRelation, /* * If there are no simply-referenced columns, give the index an - * auto dependency on the whole table. In most cases, this will + * auto dependency on the whole table. In most cases, this will * be redundant, but it might not be if the index expressions and * predicate contain no Vars or only whole-row Vars. */ @@ -1085,7 +1085,7 @@ index_create(Relation heapRelation, /* * Close the index; but we keep the lock that we acquired above until end - * of transaction. Closing the heap is caller's responsibility. + * of transaction. Closing the heap is caller's responsibility. */ index_close(indexRelation, NoLock); @@ -1243,7 +1243,7 @@ index_constraint_create(Relation heapRelation, * have been so marked already, so no need to clear the flag in the other * case. * - * Note: this might better be done by callers. We do it here to avoid + * Note: this might better be done by callers. We do it here to avoid * exposing index_update_stats() globally, but that wouldn't be necessary * if relhaspkey went away. */ @@ -1256,10 +1256,10 @@ index_constraint_create(Relation heapRelation, /* * If needed, mark the index as primary and/or deferred in pg_index. * - * Note: When making an existing index into a constraint, caller must - * have a table lock that prevents concurrent table updates; otherwise, - * there is a risk that concurrent readers of the table will miss seeing - * this index at all. + * Note: When making an existing index into a constraint, caller must have + * a table lock that prevents concurrent table updates; otherwise, there + * is a risk that concurrent readers of the table will miss seeing this + * index at all. */ if (update_pgindex && (mark_as_primary || deferrable)) { @@ -1336,7 +1336,7 @@ index_drop(Oid indexId, bool concurrent) * in multiple steps and waiting out any transactions that might be using * the index, so we don't need exclusive lock on the parent table. Instead * we take ShareUpdateExclusiveLock, to ensure that two sessions aren't - * doing CREATE/DROP INDEX CONCURRENTLY on the same index. (We will get + * doing CREATE/DROP INDEX CONCURRENTLY on the same index. (We will get * AccessExclusiveLock on the index below, once we're sure nobody else is * using it.) */ @@ -1376,7 +1376,7 @@ index_drop(Oid indexId, bool concurrent) * non-concurrent case we can just do that now. In the concurrent case * it's a bit trickier. The predicate locks must be moved when there are * no index scans in progress on the index and no more can subsequently - * start, so that no new predicate locks can be made on the index. Also, + * start, so that no new predicate locks can be made on the index. Also, * they must be moved before heap inserts stop maintaining the index, else * the conflict with the predicate lock on the index gap could be missed * before the lock on the heap relation is in place to detect a conflict @@ -1386,11 +1386,11 @@ index_drop(Oid indexId, bool concurrent) { /* * We must commit our transaction in order to make the first pg_index - * state update visible to other sessions. If the DROP machinery has + * state update visible to other sessions. If the DROP machinery has * already performed any other actions (removal of other objects, * pg_depend entries, etc), the commit would make those actions * permanent, which would leave us with inconsistent catalog state if - * we fail partway through the following sequence. Since DROP INDEX + * we fail partway through the following sequence. Since DROP INDEX * CONCURRENTLY is restricted to dropping just one index that has no * dependencies, we should get here before anything's been done --- * but let's check that to be sure. We can verify that the current @@ -1426,7 +1426,7 @@ index_drop(Oid indexId, bool concurrent) * We must commit our current transaction so that the indisvalid * update becomes visible to other transactions; then start another. * Note that any previously-built data structures are lost in the - * commit. The only data we keep past here are the relation IDs. + * commit. The only data we keep past here are the relation IDs. * * Before committing, get a session-level lock on the table, to ensure * that neither it nor the index can be dropped before we finish. This @@ -1443,10 +1443,10 @@ index_drop(Oid indexId, bool concurrent) /* * Now we must wait until no running transaction could be using the * index for a query. Use AccessExclusiveLock here to check for - * running transactions that hold locks of any kind on the table. - * Note we do not need to worry about xacts that open the table for - * reading after this point; they will see the index as invalid when - * they open the relation. + * running transactions that hold locks of any kind on the table. Note + * we do not need to worry about xacts that open the table for reading + * after this point; they will see the index as invalid when they open + * the relation. * * Note: the reason we use actual lock acquisition here, rather than * just checking the ProcArray and sleeping, is that deadlock is @@ -1468,7 +1468,7 @@ index_drop(Oid indexId, bool concurrent) /* * Now we are sure that nobody uses the index for queries; they just - * might have it open for updating it. So now we can unset indisready + * might have it open for updating it. So now we can unset indisready * and indislive, then wait till nobody could be using it at all * anymore. */ @@ -1599,7 +1599,7 @@ index_drop(Oid indexId, bool concurrent) * * IndexInfo stores the information about the index that's needed by * FormIndexDatum, which is used for both index_build() and later insertion - * of individual index tuples. Normally we build an IndexInfo for an index + * of individual index tuples. Normally we build an IndexInfo for an index * just once per command, and then use it for (potentially) many tuples. * ---------------- */ @@ -1669,7 +1669,7 @@ BuildIndexInfo(Relation index) * context must point to the heap tuple passed in. * * Notice we don't actually call index_form_tuple() here; we just prepare - * its input arrays values[] and isnull[]. This is because the index AM + * its input arrays values[] and isnull[]. This is because the index AM * may wish to alter the data before storage. * ---------------- */ @@ -1735,7 +1735,7 @@ FormIndexDatum(IndexInfo *indexInfo, * index_update_stats --- update pg_class entry after CREATE INDEX or REINDEX * * This routine updates the pg_class row of either an index or its parent - * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed + * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed * to ensure we can do all the necessary work in just one update. * * hasindex: set relhasindex to this value @@ -1747,7 +1747,7 @@ FormIndexDatum(IndexInfo *indexInfo, * * NOTE: an important side-effect of this operation is that an SI invalidation * message is sent out to all backends --- including me --- causing relcache - * entries to be flushed or updated with the new data. This must happen even + * entries to be flushed or updated with the new data. This must happen even * if we find that no change is needed in the pg_class row. When updating * a heap entry, this ensures that other backends find out about the new * index. When updating an index, it's important because some index AMs @@ -1786,13 +1786,13 @@ index_update_stats(Relation rel, * 4. Even with just a single CREATE INDEX, there's a risk factor because * someone else might be trying to open the rel while we commit, and this * creates a race condition as to whether he will see both or neither of - * the pg_class row versions as valid. Again, a non-transactional update + * the pg_class row versions as valid. Again, a non-transactional update * avoids the risk. It is indeterminate which state of the row the other * process will see, but it doesn't matter (if he's only taking * AccessShareLock, then it's not critical that he see relhasindex true). * * It is safe to use a non-transactional update even though our - * transaction could still fail before committing. Setting relhasindex + * transaction could still fail before committing. Setting relhasindex * true is safe even if there are no indexes (VACUUM will eventually fix * it), likewise for relhaspkey. And of course the new relpages and * reltuples counts are correct regardless. However, we don't want to @@ -1804,7 +1804,7 @@ index_update_stats(Relation rel, pg_class = heap_open(RelationRelationId, RowExclusiveLock); /* - * Make a copy of the tuple to update. Normally we use the syscache, but + * Make a copy of the tuple to update. Normally we use the syscache, but * we can't rely on that during bootstrap or while reindexing pg_class * itself. */ @@ -1903,7 +1903,7 @@ index_update_stats(Relation rel, * index_build - invoke access-method-specific index build procedure * * On entry, the index's catalog entries are valid, and its physical disk - * file has been created but is empty. We call the AM-specific build + * file has been created but is empty. We call the AM-specific build * procedure to fill in the index contents. We then update the pg_class * entries of the index and heap relation as needed, using statistics * returned by ambuild as well as data passed by the caller. @@ -2001,7 +2001,7 @@ index_build(Relation heapRelation, * Therefore, this code path can only be taken during non-concurrent * CREATE INDEX. Thus the fact that heap_update will set the pg_index * tuple's xmin doesn't matter, because that tuple was created in the - * current transaction anyway. That also means we don't need to worry + * current transaction anyway. That also means we don't need to worry * about any concurrent readers of the tuple; no other transaction can see * it yet. */ @@ -2050,7 +2050,7 @@ index_build(Relation heapRelation, /* * If it's for an exclusion constraint, make a second pass over the heap - * to verify that the constraint is satisfied. We must not do this until + * to verify that the constraint is satisfied. We must not do this until * the index is fully valid. (Broken HOT chains shouldn't matter, though; * see comments for IndexCheckExclusion.) */ @@ -2075,8 +2075,8 @@ index_build(Relation heapRelation, * things to add it to the new index. After we return, the AM's index * build procedure does whatever cleanup it needs. * - * The total count of heap tuples is returned. This is for updating pg_class - * statistics. (It's annoying not to be able to do that here, but we want + * The total count of heap tuples is returned. This is for updating pg_class + * statistics. (It's annoying not to be able to do that here, but we want * to merge that update with others; see index_update_stats.) Note that the * index AM itself must keep track of the number of index tuples; we don't do * so here because the AM might reject some of the tuples for its own reasons, @@ -2126,7 +2126,7 @@ IndexBuildHeapScan(Relation heapRelation, /* * Need an EState for evaluation of index expressions and partial-index - * predicates. Also a slot to hold the current tuple. + * predicates. Also a slot to hold the current tuple. */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); @@ -2251,7 +2251,7 @@ IndexBuildHeapScan(Relation heapRelation, * building it, and may need to see such tuples.) * * However, if it was HOT-updated then we must only index - * the live tuple at the end of the HOT-chain. Since this + * the live tuple at the end of the HOT-chain. Since this * breaks semantics for pre-existing snapshots, mark the * index as unusable for them. */ @@ -2271,7 +2271,7 @@ IndexBuildHeapScan(Relation heapRelation, /* * Since caller should hold ShareLock or better, normally * the only way to see this is if it was inserted earlier - * in our own transaction. However, it can happen in + * in our own transaction. However, it can happen in * system catalogs, since we tend to release write lock * before commit there. Give a warning if neither case * applies. @@ -2426,7 +2426,7 @@ IndexBuildHeapScan(Relation heapRelation, /* * You'd think we should go ahead and build the index tuple here, but - * some index AMs want to do further processing on the data first. So + * some index AMs want to do further processing on the data first. So * pass the values[] and isnull[] arrays, instead. */ @@ -2517,7 +2517,7 @@ IndexCheckExclusion(Relation heapRelation, /* * Need an EState for evaluation of index expressions and partial-index - * predicates. Also a slot to hold the current tuple. + * predicates. Also a slot to hold the current tuple. */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); @@ -2597,11 +2597,11 @@ IndexCheckExclusion(Relation heapRelation, * We do a concurrent index build by first inserting the catalog entry for the * index via index_create(), marking it not indisready and not indisvalid. * Then we commit our transaction and start a new one, then we wait for all - * transactions that could have been modifying the table to terminate. Now + * transactions that could have been modifying the table to terminate. Now * we know that any subsequently-started transactions will see the index and * honor its constraints on HOT updates; so while existing HOT-chains might * be broken with respect to the index, no currently live tuple will have an - * incompatible HOT update done to it. We now build the index normally via + * incompatible HOT update done to it. We now build the index normally via * index_build(), while holding a weak lock that allows concurrent * insert/update/delete. Also, we index only tuples that are valid * as of the start of the scan (see IndexBuildHeapScan), whereas a normal @@ -2615,13 +2615,13 @@ IndexCheckExclusion(Relation heapRelation, * * Next, we mark the index "indisready" (but still not "indisvalid") and * commit the second transaction and start a third. Again we wait for all - * transactions that could have been modifying the table to terminate. Now + * transactions that could have been modifying the table to terminate. Now * we know that any subsequently-started transactions will see the index and * insert their new tuples into it. We then take a new reference snapshot * which is passed to validate_index(). Any tuples that are valid according * to this snap, but are not in the index, must be added to the index. * (Any tuples committed live after the snap will be inserted into the - * index by their originating transaction. Any tuples committed dead before + * index by their originating transaction. Any tuples committed dead before * the snap need not be indexed, because we will wait out all transactions * that might care about them before we mark the index valid.) * @@ -2630,7 +2630,7 @@ IndexCheckExclusion(Relation heapRelation, * ever say "delete it". (This should be faster than a plain indexscan; * also, not all index AMs support full-index indexscan.) Then we sort the * TIDs, and finally scan the table doing a "merge join" against the TID list - * to see which tuples are missing from the index. Thus we will ensure that + * to see which tuples are missing from the index. Thus we will ensure that * all tuples valid according to the reference snapshot are in the index. * * Building a unique index this way is tricky: we might try to insert a @@ -2646,7 +2646,7 @@ IndexCheckExclusion(Relation heapRelation, * were alive at the time of the reference snapshot are gone; this is * necessary to be sure there are none left with a transaction snapshot * older than the reference (and hence possibly able to see tuples we did - * not index). Then we mark the index "indisvalid" and commit. Subsequent + * not index). Then we mark the index "indisvalid" and commit. Subsequent * transactions will be able to use it for queries. * * Doing two full table scans is a brute-force strategy. We could try to be @@ -2672,7 +2672,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot) indexRelation = index_open(indexId, RowExclusiveLock); /* - * Fetch info needed for index_insert. (You might think this should be + * Fetch info needed for index_insert. (You might think this should be * passed in from DefineIndex, but its copy is long gone due to having * been built in a previous transaction.) */ @@ -2789,7 +2789,7 @@ validate_index_heapscan(Relation heapRelation, /* * Need an EState for evaluation of index expressions and partial-index - * predicates. Also a slot to hold the current tuple. + * predicates. Also a slot to hold the current tuple. */ estate = CreateExecutorState(); econtext = GetPerTupleExprContext(estate); @@ -2838,7 +2838,7 @@ validate_index_heapscan(Relation heapRelation, * visit the live tuples in order by their offsets, but the root * offsets that we need to compare against the index contents might be * ordered differently. So we might have to "look back" within the - * tuplesort output, but only within the current page. We handle that + * tuplesort output, but only within the current page. We handle that * by keeping a bool array in_index[] showing all the * already-passed-over tuplesort output TIDs of the current page. We * clear that array here, when advancing onto a new heap page. @@ -2923,7 +2923,7 @@ validate_index_heapscan(Relation heapRelation, /* * For the current heap tuple, extract all the attributes we use - * in this index, and note which are null. This also performs + * in this index, and note which are null. This also performs * evaluation of any expressions needed. */ FormIndexDatum(indexInfo, @@ -2945,7 +2945,7 @@ validate_index_heapscan(Relation heapRelation, * for a uniqueness check on the whole HOT-chain. That is, the * tuple we have here could be dead because it was already * HOT-updated, and if so the updating transaction will not have - * thought it should insert index entries. The index AM will + * thought it should insert index entries. The index AM will * check the whole HOT-chain and correctly detect a conflict if * there is one. */ @@ -3068,7 +3068,7 @@ index_set_state_flags(Oid indexId, IndexStateFlagsAction action) /* * IndexGetRelation: given an index's relation OID, get the OID of the - * relation it is an index on. Uses the system cache. + * relation it is an index on. Uses the system cache. */ Oid IndexGetRelation(Oid indexId, bool missing_ok) @@ -3105,7 +3105,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks) volatile bool skipped_constraint = false; /* - * Open and lock the parent heap relation. ShareLock is sufficient since + * Open and lock the parent heap relation. ShareLock is sufficient since * we only need to be sure no schema or data changes are going on. */ heapId = IndexGetRelation(indexId, false); @@ -3193,7 +3193,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks) * chains, we had better force indcheckxmin true, because the normal * argument that the HOT chains couldn't conflict with the index is * suspect for an invalid index. (A conflict is definitely possible if - * the index was dead. It probably shouldn't happen otherwise, but let's + * the index was dead. It probably shouldn't happen otherwise, but let's * be conservative.) In this case advancing the usability horizon is * appropriate. * @@ -3277,7 +3277,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks) * the data in a manner that risks a change in constraint validity. * * Returns true if any indexes were rebuilt (including toast table's index - * when relevant). Note that a CommandCounterIncrement will occur after each + * when relevant). Note that a CommandCounterIncrement will occur after each * index rebuild. */ bool @@ -3290,7 +3290,7 @@ reindex_relation(Oid relid, int flags) bool result; /* - * Open and lock the relation. ShareLock is sufficient since we only need + * Open and lock the relation. ShareLock is sufficient since we only need * to prevent schema and data changes in it. The lock level used here * should match ReindexTable(). */ @@ -3309,7 +3309,7 @@ reindex_relation(Oid relid, int flags) * reindex_index will attempt to update the pg_class rows for the relation * and index. If we are processing pg_class itself, we want to make sure * that the updates do not try to insert index entries into indexes we - * have not processed yet. (When we are trying to recover from corrupted + * have not processed yet. (When we are trying to recover from corrupted * indexes, that could easily cause a crash.) We can accomplish this * because CatalogUpdateIndexes will use the relcache's index list to know * which indexes to update. We just force the index list to be only the @@ -3318,7 +3318,7 @@ reindex_relation(Oid relid, int flags) * It is okay to not insert entries into the indexes we have not processed * yet because all of this is transaction-safe. If we fail partway * through, the updated rows are dead and it doesn't matter whether they - * have index entries. Also, a new pg_class index will be created with a + * have index entries. Also, a new pg_class index will be created with a * correct entry for its own pg_class row because we do * RelationSetNewRelfilenode() before we do index_build(). * diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c index 4bf412fb0b6..05aa56e8593 100644 --- a/src/backend/catalog/indexing.c +++ b/src/backend/catalog/indexing.c @@ -149,7 +149,7 @@ CatalogIndexInsert(CatalogIndexState indstate, HeapTuple heapTuple) * CatalogUpdateIndexes - do all the indexing work for a new catalog tuple * * This is a convenience routine for the common case where we only need - * to insert or update a single tuple in a system catalog. Avoid using it for + * to insert or update a single tuple in a system catalog. Avoid using it for * multiple tuples, since opening the indexes and building the index info * structures is moderately expensive. */ diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 5bf6d289d84..89df585b870 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -66,10 +66,10 @@ * when we are obeying an override search path spec that says not to use the * temp namespace, or the temp namespace is included in the explicit list.) * - * 2. The system catalog namespace is always searched. If the system + * 2. The system catalog namespace is always searched. If the system * namespace is present in the explicit path then it will be searched in * the specified order; otherwise it will be searched after TEMP tables and - * *before* the explicit list. (It might seem that the system namespace + * *before* the explicit list. (It might seem that the system namespace * should be implicitly last, but this behavior appears to be required by * SQL99. Also, this provides a way to search the system namespace first * without thereby making it the default creation target namespace.) @@ -87,7 +87,7 @@ * to refer to the current backend's temp namespace. This is usually also * ignorable if the temp namespace hasn't been set up, but there's a special * case: if "pg_temp" appears first then it should be the default creation - * target. We kluge this case a little bit so that the temp namespace isn't + * target. We kluge this case a little bit so that the temp namespace isn't * set up until the first attempt to create something in it. (The reason for * klugery is that we can't create the temp namespace outside a transaction, * but initial GUC processing of search_path happens outside a transaction.) @@ -98,7 +98,7 @@ * In bootstrap mode, the search path is set equal to "pg_catalog", so that * the system namespace is the only one searched or inserted into. * initdb is also careful to set search_path to "pg_catalog" for its - * post-bootstrap standalone backend runs. Otherwise the default search + * post-bootstrap standalone backend runs. Otherwise the default search * path is determined by GUC. The factory default path contains the PUBLIC * namespace (if it exists), preceded by the user's personal namespace * (if one exists). @@ -162,13 +162,13 @@ static List *overrideStack = NIL; /* * myTempNamespace is InvalidOid until and unless a TEMP namespace is set up * in a particular backend session (this happens when a CREATE TEMP TABLE - * command is first executed). Thereafter it's the OID of the temp namespace. + * command is first executed). Thereafter it's the OID of the temp namespace. * * myTempToastNamespace is the OID of the namespace for my temp tables' toast - * tables. It is set when myTempNamespace is, and is InvalidOid before that. + * tables. It is set when myTempNamespace is, and is InvalidOid before that. * * myTempNamespaceSubID shows whether we've created the TEMP namespace in the - * current subtransaction. The flag propagates up the subtransaction tree, + * current subtransaction. The flag propagates up the subtransaction tree, * so the main transaction will correctly recognize the flag if all * intermediate subtransactions commit. When it is InvalidSubTransactionId, * we either haven't made the TEMP namespace yet, or have successfully @@ -250,7 +250,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, } /* - * DDL operations can change the results of a name lookup. Since all such + * DDL operations can change the results of a name lookup. Since all such * operations will generate invalidation messages, we keep track of * whether any such messages show up while we're performing the operation, * and retry until either (1) no more invalidation messages show up or (2) @@ -259,7 +259,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, * But if lockmode = NoLock, then we assume that either the caller is OK * with the answer changing under them, or that they already hold some * appropriate lock, and therefore return the first answer we get without - * checking for invalidation messages. Also, if the requested lock is + * checking for invalidation messages. Also, if the requested lock is * already held, no LockRelationOid will not AcceptInvalidationMessages, * so we may fail to notice a change. We could protect against that case * by calling AcceptInvalidationMessages() before beginning this loop, but @@ -396,7 +396,7 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode, break; /* - * Something may have changed. Let's repeat the name lookup, to make + * Something may have changed. Let's repeat the name lookup, to make * sure this name still references the same relation it did * previously. */ @@ -869,7 +869,7 @@ TypeIsVisible(Oid typid) * and the returned nvargs will always be zero. * * If expand_defaults is true, functions that could match after insertion of - * default argument values will also be retrieved. In this case the returned + * default argument values will also be retrieved. In this case the returned * structs could have nargs > passed-in nargs, and ndargs is set to the number * of additional args (which can be retrieved from the function's * proargdefaults entry). @@ -1032,7 +1032,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames, * Call uses positional notation * * Check if function is variadic, and get variadic element type if - * so. If expand_variadic is false, we should just ignore + * so. If expand_variadic is false, we should just ignore * variadic-ness. */ if (pronargs <= nargs && expand_variadic) @@ -1162,7 +1162,7 @@ FuncnameGetCandidates(List *names, int nargs, List *argnames, if (prevResult) { /* - * We have a match with a previous result. Decide which one + * We have a match with a previous result. Decide which one * to keep, or mark it ambiguous if we can't decide. The * logic here is preference > 0 means prefer the old result, * preference < 0 means prefer the new, preference = 0 means @@ -1553,7 +1553,7 @@ OpernameGetOprid(List *names, Oid oprleft, Oid oprright) * identical entries in later namespaces. * * The returned items always have two args[] entries --- one or the other - * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too. + * will be InvalidOid for a prefix or postfix oprkind. nargs is 2, too. */ FuncCandidateList OpernameGetCandidates(List *names, char oprkind, bool missing_schema_ok) @@ -2536,7 +2536,7 @@ get_ts_config_oid(List *names, bool missing_ok) /* * TSConfigIsVisible * Determine whether a text search configuration (identified by OID) - * is visible in the current search path. Visible means "would be found + * is visible in the current search path. Visible means "would be found * by searching for the unqualified text search configuration name". */ bool @@ -2855,7 +2855,7 @@ QualifiedNameGetCreationNamespace(List *names, char **objname_p) /* * get_namespace_oid - given a namespace name, look up the OID * - * If missing_ok is false, throw an error if namespace name not found. If + * If missing_ok is false, throw an error if namespace name not found. If * true, just return InvalidOid. */ Oid @@ -3070,7 +3070,7 @@ GetTempNamespaceBackendId(Oid namespaceId) /* * GetTempToastNamespace - get the OID of my temporary-toast-table namespace, - * which must already be assigned. (This is only used when creating a toast + * which must already be assigned. (This is only used when creating a toast * table for a temp table, so we must have already done InitTempTableNamespace) */ Oid @@ -3168,8 +3168,8 @@ OverrideSearchPathMatchesCurrent(OverrideSearchPath *path) * * It's possible that newpath->useTemp is set but there is no longer any * active temp namespace, if the path was saved during a transaction that - * created a temp namespace and was later rolled back. In that case we just - * ignore useTemp. A plausible alternative would be to create a new temp + * created a temp namespace and was later rolled back. In that case we just + * ignore useTemp. A plausible alternative would be to create a new temp * namespace, but for existing callers that's not necessary because an empty * temp namespace wouldn't affect their results anyway. * @@ -3202,7 +3202,7 @@ PushOverrideSearchPath(OverrideSearchPath *newpath) firstNS = linitial_oid(oidlist); /* - * Add any implicitly-searched namespaces to the list. Note these go on + * Add any implicitly-searched namespaces to the list. Note these go on * the front, not the back; also notice that we do not check USAGE * permissions for these. */ @@ -3525,7 +3525,7 @@ recomputeNamespacePath(void) } /* - * Remember the first member of the explicit list. (Note: this is + * Remember the first member of the explicit list. (Note: this is * nominally wrong if temp_missing, but we need it anyway to distinguish * explicit from implicit mention of pg_catalog.) */ @@ -3535,7 +3535,7 @@ recomputeNamespacePath(void) firstNS = linitial_oid(oidlist); /* - * Add any implicitly-searched namespaces to the list. Note these go on + * Add any implicitly-searched namespaces to the list. Note these go on * the front, not the back; also notice that we do not check USAGE * permissions for these. */ @@ -3590,7 +3590,7 @@ InitTempTableNamespace(void) /* * First, do permission check to see if we are authorized to make temp - * tables. We use a nonstandard error message here since "databasename: + * tables. We use a nonstandard error message here since "databasename: * permission denied" might be a tad cryptic. * * Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask; @@ -3609,9 +3609,9 @@ InitTempTableNamespace(void) * Do not allow a Hot Standby slave session to make temp tables. Aside * from problems with modifying the system catalogs, there is a naming * conflict: pg_temp_N belongs to the session with BackendId N on the - * master, not to a slave session with the same BackendId. We should not + * master, not to a slave session with the same BackendId. We should not * be able to get here anyway due to XactReadOnly checks, but let's just - * make real sure. Note that this also backstops various operations that + * make real sure. Note that this also backstops various operations that * allow XactReadOnly transactions to modify temp tables; they'd need * RecoveryInProgress checks if not for this. */ @@ -3967,7 +3967,7 @@ fetch_search_path(bool includeImplicit) /* * If the temp namespace should be first, force it to exist. This is so * that callers can trust the result to reflect the actual default - * creation namespace. It's a bit bogus to do this here, since + * creation namespace. It's a bit bogus to do this here, since * current_schema() is supposedly a stable function without side-effects, * but the alternatives seem worse. */ @@ -3989,7 +3989,7 @@ fetch_search_path(bool includeImplicit) /* * Fetch the active search path into a caller-allocated array of OIDs. - * Returns the number of path entries. (If this is more than sarray_len, + * Returns the number of path entries. (If this is more than sarray_len, * then the data didn't fit and is not all stored.) * * The returned list always includes the implicitly-prepended namespaces, diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 2b837a99c10..c7c8f4b1a36 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -467,7 +467,7 @@ static void getRelationIdentity(StringInfo buffer, Oid relid); * drop operation. * * Note: If the object is not found, we don't give any indication of the - * reason. (It might have been a missing schema if the name was qualified, or + * reason. (It might have been a missing schema if the name was qualified, or * an inexistant type name in case of a cast, function or operator; etc). * Currently there is only one caller that might be interested in such info, so * we don't spend much effort here. If more callers start to care, it might be @@ -665,7 +665,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs, /* * If we're dealing with a relation or attribute, then the relation is - * already locked. Otherwise, we lock it now. + * already locked. Otherwise, we lock it now. */ if (address.classId != RelationRelationId) { diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index d99c2e5edae..1ad923ca6c5 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -152,10 +152,10 @@ AggregateCreate(const char *aggName, errdetail("An aggregate using a polymorphic transition type must have at least one polymorphic argument."))); /* - * An ordered-set aggregate that is VARIADIC must be VARIADIC ANY. In + * An ordered-set aggregate that is VARIADIC must be VARIADIC ANY. In * principle we could support regular variadic types, but it would make * things much more complicated because we'd have to assemble the correct - * subsets of arguments into array values. Since no standard aggregates + * subsets of arguments into array values. Since no standard aggregates * have use for such a case, we aren't bothering for now. */ if (AGGKIND_IS_ORDERED_SET(aggKind) && OidIsValid(variadicArgType) && @@ -167,7 +167,7 @@ AggregateCreate(const char *aggName, /* * If it's a hypothetical-set aggregate, there must be at least as many * direct arguments as aggregated ones, and the last N direct arguments - * must match the aggregated ones in type. (We have to check this again + * must match the aggregated ones in type. (We have to check this again * when the aggregate is called, in case ANY is involved, but it makes * sense to reject the aggregate definition now if the declared arg types * don't match up.) It's unconditionally OK if numDirectArgs == numArgs, diff --git a/src/backend/catalog/pg_collation.c b/src/backend/catalog/pg_collation.c index fb947051214..434dbce97f9 100644 --- a/src/backend/catalog/pg_collation.c +++ b/src/backend/catalog/pg_collation.c @@ -78,7 +78,7 @@ CollationCreate(const char *collname, Oid collnamespace, collname, pg_encoding_to_char(collencoding)))); /* - * Also forbid matching an any-encoding entry. This test of course is not + * Also forbid matching an any-encoding entry. This test of course is not * backed up by the unique index, but it's not a problem since we don't * support adding any-encoding entries after initdb. */ diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c index 5fd9822c6ed..041f5ad6865 100644 --- a/src/backend/catalog/pg_constraint.c +++ b/src/backend/catalog/pg_constraint.c @@ -38,7 +38,7 @@ * Create a constraint table entry. * * Subsidiary records (such as triggers or indexes to implement the - * constraint) are *not* created here. But we do make dependency links + * constraint) are *not* created here. But we do make dependency links * from the constraint to the things it depends on. */ Oid @@ -305,7 +305,7 @@ CreateConstraintEntry(const char *constraintName, { /* * Register normal dependency on the unique index that supports a - * foreign-key constraint. (Note: for indexes associated with unique + * foreign-key constraint. (Note: for indexes associated with unique * or primary-key constraints, the dependency runs the other way, and * is not made here.) */ @@ -759,7 +759,7 @@ void get_constraint_relation_oids(Oid constraint_oid, Oid *conrelid, Oid *confrelid) { HeapTuple tup; - Form_pg_constraint con; + Form_pg_constraint con; tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(constraint_oid)); if (!HeapTupleIsValid(tup)) /* should not happen */ @@ -895,10 +895,10 @@ get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok) * the rel of interest are Vars with the indicated varno/varlevelsup. * * Currently we only check to see if the rel has a primary key that is a - * subset of the grouping_columns. We could also use plain unique constraints + * subset of the grouping_columns. We could also use plain unique constraints * if all their columns are known not null, but there's a problem: we need * to be able to represent the not-null-ness as part of the constraints added - * to *constraintDeps. FIXME whenever not-null constraints get represented + * to *constraintDeps. FIXME whenever not-null constraints get represented * in pg_constraint. */ bool diff --git a/src/backend/catalog/pg_db_role_setting.c b/src/backend/catalog/pg_db_role_setting.c index 9f9bbe20742..3e73e0f45b8 100644 --- a/src/backend/catalog/pg_db_role_setting.c +++ b/src/backend/catalog/pg_db_role_setting.c @@ -172,7 +172,7 @@ AlterSetting(Oid databaseid, Oid roleid, VariableSetStmt *setstmt) /* * Drop some settings from the catalog. These can be for a particular - * database, or for a particular role. (It is of course possible to do both + * database, or for a particular role. (It is of course possible to do both * too, but it doesn't make sense for current uses.) */ void diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c index fabc51c35c8..7b2d0a7649f 100644 --- a/src/backend/catalog/pg_depend.c +++ b/src/backend/catalog/pg_depend.c @@ -50,7 +50,7 @@ recordDependencyOn(const ObjectAddress *depender, /* * Record multiple dependencies (of the same kind) for a single dependent - * object. This has a little less overhead than recording each separately. + * object. This has a little less overhead than recording each separately. */ void recordMultipleDependencies(const ObjectAddress *depender, @@ -127,7 +127,7 @@ recordMultipleDependencies(const ObjectAddress *depender, /* * If we are executing a CREATE EXTENSION operation, mark the given object - * as being a member of the extension. Otherwise, do nothing. + * as being a member of the extension. Otherwise, do nothing. * * This must be called during creation of any user-definable object type * that could be a member of an extension. @@ -186,7 +186,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object, * (possibly with some differences from before). * * If skipExtensionDeps is true, we do not delete any dependencies that - * show that the given object is a member of an extension. This avoids + * show that the given object is a member of an extension. This avoids * needing a lot of extra logic to fetch and recreate that dependency. */ long @@ -492,7 +492,7 @@ getExtensionOfObject(Oid classId, Oid objectId) * Detect whether a sequence is marked as "owned" by a column * * An ownership marker is an AUTO dependency from the sequence to the - * column. If we find one, store the identity of the owning column + * column. If we find one, store the identity of the owning column * into *tableId and *colId and return TRUE; else return FALSE. * * Note: if there's more than one such pg_depend entry then you get diff --git a/src/backend/catalog/pg_enum.c b/src/backend/catalog/pg_enum.c index 4168c0e84af..b4f2051749d 100644 --- a/src/backend/catalog/pg_enum.c +++ b/src/backend/catalog/pg_enum.c @@ -465,7 +465,7 @@ restart: * We avoid doing this unless absolutely necessary; in most installations * it will never happen. The reason is that updating existing pg_enum * entries creates hazards for other backends that are concurrently reading - * pg_enum. Although system catalog scans now use MVCC semantics, the + * pg_enum. Although system catalog scans now use MVCC semantics, the * syscache machinery might read different pg_enum entries under different * snapshots, so some other backend might get confused about the proper * ordering if a concurrent renumbering occurs. diff --git a/src/backend/catalog/pg_largeobject.c b/src/backend/catalog/pg_largeobject.c index ed2a41bfd8c..a54bc1b1faa 100644 --- a/src/backend/catalog/pg_largeobject.c +++ b/src/backend/catalog/pg_largeobject.c @@ -76,7 +76,7 @@ LargeObjectCreate(Oid loid) } /* - * Drop a large object having the given LO identifier. Both the data pages + * Drop a large object having the given LO identifier. Both the data pages * and metadata must be dropped. */ void diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c index 8faa0152768..9a3e20a7aed 100644 --- a/src/backend/catalog/pg_operator.c +++ b/src/backend/catalog/pg_operator.c @@ -315,7 +315,7 @@ OperatorShellMake(const char *operatorName, * specify operators that do not exist. For example, if operator * "op" is being defined, the negator operator "negop" and the * commutator "commop" can also be defined without specifying - * any information other than their names. Since in order to + * any information other than their names. Since in order to * add "op" to the PG_OPERATOR catalog, all the Oid's for these * operators must be placed in the fields of "op", a forward * declaration is done on the commutator and negator operators. @@ -433,7 +433,7 @@ OperatorCreate(const char *operatorName, operatorName); /* - * Set up the other operators. If they do not currently exist, create + * Set up the other operators. If they do not currently exist, create * shells in order to get ObjectId's. */ diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index abf2f497e41..0fa331ad18f 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -229,7 +229,7 @@ ProcedureCreate(const char *procedureName, /* * Do not allow polymorphic return type unless at least one input argument - * is polymorphic. ANYRANGE return type is even stricter: must have an + * is polymorphic. ANYRANGE return type is even stricter: must have an * ANYRANGE input (since we can't deduce the specific range type from * ANYELEMENT). Also, do not allow return type INTERNAL unless at least * one input argument is INTERNAL. @@ -676,7 +676,7 @@ ProcedureCreate(const char *procedureName, /* * Set per-function configuration parameters so that the validation is - * done with the environment the function expects. However, if + * done with the environment the function expects. However, if * check_function_bodies is off, we don't do this, because that would * create dump ordering hazards that pg_dump doesn't know how to deal * with. (For example, a SET clause might refer to a not-yet-created @@ -948,7 +948,7 @@ sql_function_parse_error_callback(void *arg) /* * Adjust a syntax error occurring inside the function body of a CREATE - * FUNCTION or DO command. This can be used by any function validator or + * FUNCTION or DO command. This can be used by any function validator or * anonymous-block handler, not only for SQL-language functions. * It is assumed that the syntax error position is initially relative to the * function body string (as passed in). If possible, we adjust the position @@ -1081,7 +1081,7 @@ match_prosrc_to_literal(const char *prosrc, const char *literal, /* * This implementation handles backslashes and doubled quotes in the - * string literal. It does not handle the SQL syntax for literals + * string literal. It does not handle the SQL syntax for literals * continued across line boundaries. * * We do the comparison a character at a time, not a byte at a time, so diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 8942441dc50..7aa70fa3b2f 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -167,7 +167,7 @@ recordDependencyOnOwner(Oid classId, Oid objectId, Oid owner) * shdepChangeDep * * Update shared dependency records to account for an updated referenced - * object. This is an internal workhorse for operations such as changing + * object. This is an internal workhorse for operations such as changing * an object's owner. * * There must be no more than one existing entry for the given dependent @@ -316,7 +316,7 @@ changeDependencyOnOwner(Oid classId, Oid objectId, Oid newOwnerId) * was previously granted some rights to the object. * * This step is analogous to aclnewowner's removal of duplicate entries - * in the ACL. We have to do it to handle this scenario: + * in the ACL. We have to do it to handle this scenario: * A grants some rights on an object to B * ALTER OWNER changes the object's owner to B * ALTER OWNER changes the object's owner to C @@ -402,9 +402,9 @@ getOidListDiff(Oid *list1, int *nlist1, Oid *list2, int *nlist2) * and then insert or delete from pg_shdepend as appropriate. * * Note that we can't just insert all referenced roles blindly during GRANT, - * because we would end up with duplicate registered dependencies. We could + * because we would end up with duplicate registered dependencies. We could * check for existence of the tuples before inserting, but that seems to be - * more expensive than what we are doing here. Likewise we can't just delete + * more expensive than what we are doing here. Likewise we can't just delete * blindly during REVOKE, because the user may still have other privileges. * It is also possible that REVOKE actually adds dependencies, due to * instantiation of a formerly implicit default ACL (although at present, @@ -535,7 +535,7 @@ checkSharedDependencies(Oid classId, Oid objectId, /* * We limit the number of dependencies reported to the client to * MAX_REPORTED_DEPS, since client software may not deal well with - * enormous error strings. The server log always gets a full report. + * enormous error strings. The server log always gets a full report. */ #define MAX_REPORTED_DEPS 100 @@ -616,7 +616,7 @@ checkSharedDependencies(Oid classId, Oid objectId, bool stored = false; /* - * XXX this info is kept on a simple List. Maybe it's not good + * XXX this info is kept on a simple List. Maybe it's not good * for performance, but using a hash table seems needlessly * complex. The expected number of databases is not high anyway, * I suppose. @@ -853,7 +853,7 @@ shdepAddDependency(Relation sdepRel, /* * Make sure the object doesn't go away while we record the dependency on - * it. DROP routines should lock the object exclusively before they check + * it. DROP routines should lock the object exclusively before they check * shared dependencies. */ shdepLockAndCheckObject(refclassId, refobjId); @@ -1004,7 +1004,7 @@ shdepLockAndCheckObject(Oid classId, Oid objectId) /* * Currently, this routine need not support any other shared - * object types besides roles. If we wanted to record explicit + * object types besides roles. If we wanted to record explicit * dependencies on databases or tablespaces, we'd need code along * these lines: */ @@ -1150,7 +1150,7 @@ isSharedObjectPinned(Oid classId, Oid objectId, Relation sdepRel) /* * shdepDropOwned * - * Drop the objects owned by any one of the given RoleIds. If a role has + * Drop the objects owned by any one of the given RoleIds. If a role has * access to an object, the grant will be removed as well (but the object * will not, of course). * diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c index 8e0e65b7219..f614915abfb 100644 --- a/src/backend/catalog/pg_type.c +++ b/src/backend/catalog/pg_type.c @@ -394,7 +394,7 @@ TypeCreate(Oid newTypeOid, if (HeapTupleIsValid(tup)) { /* - * check that the type is not already defined. It may exist as a + * check that the type is not already defined. It may exist as a * shell type, however. */ if (((Form_pg_type) GETSTRUCT(tup))->typisdefined) diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c index 85df9a10929..c3b2f072e44 100644 --- a/src/backend/catalog/storage.c +++ b/src/backend/catalog/storage.c @@ -35,7 +35,7 @@ * that have been created or deleted in the current transaction. When * a relation is created, we create the physical file immediately, but * remember it so that we can delete the file again if the current - * transaction is aborted. Conversely, a deletion request is NOT + * transaction is aborted. Conversely, a deletion request is NOT * executed immediately, but is just entered in the list. When and if * the transaction commits, we can delete the physical file. * @@ -344,7 +344,7 @@ smgrDoPendingDeletes(bool isCommit) if (maxrels == 0) { maxrels = 8; - srels = palloc(sizeof(SMgrRelation) * maxrels ); + srels = palloc(sizeof(SMgrRelation) * maxrels); } else if (maxrels <= nrels) { @@ -378,7 +378,7 @@ smgrDoPendingDeletes(bool isCommit) * *ptr is set to point to a freshly-palloc'd array of RelFileNodes. * If there are no relations to be deleted, *ptr is set to NULL. * - * Only non-temporary relations are included in the returned list. This is OK + * Only non-temporary relations are included in the returned list. This is OK * because the list is used only in contexts where temporary relations don't * matter: we're either writing to the two-phase state file (and transactions * that have touched temp tables can't be prepared) or we're writing to xlog diff --git a/src/backend/catalog/toasting.c b/src/backend/catalog/toasting.c index 5275e4bfdb3..bdfeb90dd10 100644 --- a/src/backend/catalog/toasting.c +++ b/src/backend/catalog/toasting.c @@ -36,9 +36,9 @@ Oid binary_upgrade_next_toast_pg_type_oid = InvalidOid; static void CheckAndCreateToastTable(Oid relOid, Datum reloptions, - LOCKMODE lockmode, bool check); + LOCKMODE lockmode, bool check); static bool create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, - Datum reloptions, LOCKMODE lockmode, bool check); + Datum reloptions, LOCKMODE lockmode, bool check); static bool needs_toast_table(Relation rel); @@ -106,7 +106,7 @@ BootstrapToastTable(char *relName, Oid toastOid, Oid toastIndexOid) /* create_toast_table does all the work */ if (!create_toast_table(rel, toastOid, toastIndexOid, (Datum) 0, - AccessExclusiveLock, false)) + AccessExclusiveLock, false)) elog(ERROR, "\"%s\" does not require a toast table", relName); @@ -177,8 +177,8 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, return false; /* - * If requested check lockmode is sufficient. This is a cross check - * in case of errors or conflicting decisions in earlier code. + * If requested check lockmode is sufficient. This is a cross check in + * case of errors or conflicting decisions in earlier code. */ if (check && lockmode != AccessExclusiveLock) elog(ERROR, "AccessExclusiveLock required to add toast table."); @@ -362,7 +362,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, } /* - * Check to see whether the table needs a TOAST table. It does only if + * Check to see whether the table needs a TOAST table. It does only if * (1) there are any toastable attributes, and (2) the maximum length * of a tuple could exceed TOAST_TUPLE_THRESHOLD. (We don't want to * create a toast table for something like "f1 varchar(20)".) diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c index a73d7094376..fcf86dd0d93 100644 --- a/src/backend/commands/aggregatecmds.c +++ b/src/backend/commands/aggregatecmds.c @@ -296,7 +296,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, * * transtype can't be a pseudo-type, since we need to be able to store * values of the transtype. However, we can allow polymorphic transtype - * in some cases (AggregateCreate will check). Also, we allow "internal" + * in some cases (AggregateCreate will check). Also, we allow "internal" * for functions that want to pass pointers to private data structures; * but allow that only to superusers, since you could crash the system (or * worse) by connecting up incompatible internal-using functions in an @@ -317,7 +317,7 @@ DefineAggregate(List *name, List *args, bool oldstyle, List *parameters, } /* - * If a moving-aggregate transtype is specified, look that up. Same + * If a moving-aggregate transtype is specified, look that up. Same * restrictions as for transtype. */ if (mtransType) diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index a43457bb575..80c9743a0d5 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -296,7 +296,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name) } /* - * Executes an ALTER OBJECT / RENAME TO statement. Based on the object + * Executes an ALTER OBJECT / RENAME TO statement. Based on the object * type, the function appropriate to that type is executed. */ Oid diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index a04adeaac75..c09ca7e6db1 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -409,7 +409,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, /* * Open all indexes of the relation, and see if there are any analyzable - * columns in the indexes. We do not analyze index columns if there was + * columns in the indexes. We do not analyze index columns if there was * an explicit column list in the ANALYZE command, however. If we are * doing a recursive scan, we don't want to touch the parent's indexes at * all. @@ -466,7 +466,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, /* * Determine how many rows we need to sample, using the worst case from - * all analyzable columns. We use a lower bound of 100 rows to avoid + * all analyzable columns. We use a lower bound of 100 rows to avoid * possible overflow in Vitter's algorithm. (Note: that will also be the * target in the corner case where there are no analyzable columns.) */ @@ -501,7 +501,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, &totalrows, &totaldeadrows); /* - * Compute the statistics. Temporary results during the calculations for + * Compute the statistics. Temporary results during the calculations for * each column are stored in a child context. The calc routines are * responsible to make sure that whatever they store into the VacAttrStats * structure is allocated in anl_context. @@ -558,7 +558,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, /* * Emit the completed stats rows into pg_statistic, replacing any - * previous statistics for the target columns. (If there are stats in + * previous statistics for the target columns. (If there are stats in * pg_statistic for columns we didn't process, we leave them alone.) */ update_attstats(RelationGetRelid(onerel), inh, @@ -610,7 +610,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, } /* - * Report ANALYZE to the stats collector, too. However, if doing + * Report ANALYZE to the stats collector, too. However, if doing * inherited stats we shouldn't report, because the stats collector only * tracks per-table stats. */ @@ -872,7 +872,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr) return NULL; /* - * Create the VacAttrStats struct. Note that we only have a copy of the + * Create the VacAttrStats struct. Note that we only have a copy of the * fixed fields of the pg_attribute tuple. */ stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats)); @@ -882,7 +882,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr) /* * When analyzing an expression index, believe the expression tree's type * not the column datatype --- the latter might be the opckeytype storage - * type of the opclass, which is not interesting for our purposes. (Note: + * type of the opclass, which is not interesting for our purposes. (Note: * if we did anything with non-expression index columns, we'd need to * figure out where to get the correct type info from, but for now that's * not a problem.) It's not clear whether anyone will care about the @@ -921,7 +921,7 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr) } /* - * Call the type-specific typanalyze function. If none is specified, use + * Call the type-specific typanalyze function. If none is specified, use * std_typanalyze(). */ if (OidIsValid(stats->attrtype->typanalyze)) @@ -997,7 +997,7 @@ BlockSampler_Next(BlockSampler bs) * If we are to skip, we should advance t (hence decrease K), and * repeat the same probabilistic test for the next block. The naive * implementation thus requires an anl_random_fract() call for each block - * number. But we can reduce this to one anl_random_fract() call per + * number. But we can reduce this to one anl_random_fract() call per * selected block, by noting that each time the while-test succeeds, * we can reinterpret V as a uniform random number in the range 0 to p. * Therefore, instead of choosing a new V, we just adjust p to be @@ -1127,7 +1127,7 @@ acquire_sample_rows(Relation onerel, int elevel, /* * We ignore unused and redirect line pointers. DEAD line * pointers should be counted as dead, because we need vacuum to - * run to get rid of them. Note that this rule agrees with the + * run to get rid of them. Note that this rule agrees with the * way that heap_page_prune() counts things. */ if (!ItemIdIsNormal(itemid)) @@ -1173,7 +1173,7 @@ acquire_sample_rows(Relation onerel, int elevel, * is the safer option. * * A special case is that the inserting transaction might - * be our own. In this case we should count and sample + * be our own. In this case we should count and sample * the row, to accommodate users who load a table and * analyze it in one transaction. (pgstat_report_analyze * has to adjust the numbers we send to the stats @@ -1215,7 +1215,7 @@ acquire_sample_rows(Relation onerel, int elevel, /* * The first targrows sample rows are simply copied into the * reservoir. Then we start replacing tuples in the sample - * until we reach the end of the relation. This algorithm is + * until we reach the end of the relation. This algorithm is * from Jeff Vitter's paper (see full citation below). It * works by repeatedly computing the number of tuples to skip * before selecting a tuple, which replaces a randomly chosen @@ -1274,7 +1274,7 @@ acquire_sample_rows(Relation onerel, int elevel, qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows); /* - * Estimate total numbers of rows in relation. For live rows, use + * Estimate total numbers of rows in relation. For live rows, use * vac_estimate_reltuples; for dead rows, we have no source of old * information, so we have to assume the density is the same in unseen * pages as in the pages we scanned. @@ -1597,7 +1597,7 @@ acquire_inherited_sample_rows(Relation onerel, int elevel, * Statistics are stored in several places: the pg_class row for the * relation has stats about the whole relation, and there is a * pg_statistic row for each (non-system) attribute that has ever - * been analyzed. The pg_class values are updated by VACUUM, not here. + * been analyzed. The pg_class values are updated by VACUUM, not here. * * pg_statistic rows are just added or updated normally. This means * that pg_statistic will probably contain some deleted rows at the @@ -2001,7 +2001,7 @@ compute_minimal_stats(VacAttrStatsP stats, /* * If the value is toasted, we want to detoast it just once to * avoid repeated detoastings and resultant excess memory usage - * during the comparisons. Also, check to see if the value is + * during the comparisons. Also, check to see if the value is * excessively wide, and if so don't detoast at all --- just * ignore the value. */ @@ -2121,7 +2121,7 @@ compute_minimal_stats(VacAttrStatsP stats, * We assume (not very reliably!) that all the multiply-occurring * values are reflected in the final track[] list, and the other * nonnull values all appeared but once. (XXX this usually - * results in a drastic overestimate of ndistinct. Can we do + * results in a drastic overestimate of ndistinct. Can we do * any better?) *---------- */ @@ -2158,7 +2158,7 @@ compute_minimal_stats(VacAttrStatsP stats, * Decide how many values are worth storing as most-common values. If * we are able to generate a complete MCV list (all the values in the * sample will fit, and we think these are all the ones in the table), - * then do so. Otherwise, store only those values that are + * then do so. Otherwise, store only those values that are * significantly more common than the (estimated) average. We set the * threshold rather arbitrarily at 25% more than average, with at * least 2 instances in the sample. @@ -2326,7 +2326,7 @@ compute_scalar_stats(VacAttrStatsP stats, /* * If the value is toasted, we want to detoast it just once to * avoid repeated detoastings and resultant excess memory usage - * during the comparisons. Also, check to see if the value is + * during the comparisons. Also, check to see if the value is * excessively wide, and if so don't detoast at all --- just * ignore the value. */ @@ -2371,7 +2371,7 @@ compute_scalar_stats(VacAttrStatsP stats, * accumulate ordering-correlation statistics. * * To determine which are most common, we first have to count the - * number of duplicates of each value. The duplicates are adjacent in + * number of duplicates of each value. The duplicates are adjacent in * the sorted list, so a brute-force approach is to compare successive * datum values until we find two that are not equal. However, that * requires N-1 invocations of the datum comparison routine, which are @@ -2380,7 +2380,7 @@ compute_scalar_stats(VacAttrStatsP stats, * that are adjacent in the sorted order; otherwise it could not know * that it's ordered the pair correctly.) We exploit this by having * compare_scalars remember the highest tupno index that each - * ScalarItem has been found equal to. At the end of the sort, a + * ScalarItem has been found equal to. At the end of the sort, a * ScalarItem's tupnoLink will still point to itself if and only if it * is the last item of its group of duplicates (since the group will * be ordered by tupno). @@ -2500,7 +2500,7 @@ compute_scalar_stats(VacAttrStatsP stats, * Decide how many values are worth storing as most-common values. If * we are able to generate a complete MCV list (all the values in the * sample will fit, and we think these are all the ones in the table), - * then do so. Otherwise, store only those values that are + * then do so. Otherwise, store only those values that are * significantly more common than the (estimated) average. We set the * threshold rather arbitrarily at 25% more than average, with at * least 2 instances in the sample. Also, we won't suppress values @@ -2655,7 +2655,7 @@ compute_scalar_stats(VacAttrStatsP stats, /* * The object of this loop is to copy the first and last values[] - * entries along with evenly-spaced values in between. So the + * entries along with evenly-spaced values in between. So the * i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But * computing that subscript directly risks integer overflow when * the stats target is more than a couple thousand. Instead we @@ -2766,7 +2766,7 @@ compute_scalar_stats(VacAttrStatsP stats, * qsort_arg comparator for sorting ScalarItems * * Aside from sorting the items, we update the tupnoLink[] array - * whenever two ScalarItems are found to contain equal datums. The array + * whenever two ScalarItems are found to contain equal datums. The array * is indexed by tupno; for each ScalarItem, it contains the highest * tupno that that item's datum has been found to be equal to. This allows * us to avoid additional comparisons in compute_scalar_stats(). diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index 09fb99bb73e..92f2077d487 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -151,7 +151,7 @@ * * This struct declaration has the maximal length, but in a real queue entry * the data area is only big enough for the actual channel and payload strings - * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible + * (each null-terminated). AsyncQueueEntryEmptySize is the minimum possible * entry size, if both channel and payload strings are empty (but note it * doesn't include alignment padding). * @@ -265,7 +265,7 @@ static SlruCtlData AsyncCtlData; * * The most data we can have in the queue at a time is QUEUE_MAX_PAGE/2 * pages, because more than that would confuse slru.c into thinking there - * was a wraparound condition. With the default BLCKSZ this means there + * was a wraparound condition. With the default BLCKSZ this means there * can be up to 8GB of queued-and-not-read data. * * Note: it's possible to redefine QUEUE_MAX_PAGE with a smaller multiple of @@ -395,7 +395,7 @@ asyncQueuePagePrecedes(int p, int q) int diff; /* - * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be + * We have to compare modulo (QUEUE_MAX_PAGE+1)/2. Both inputs should be * in the range 0..QUEUE_MAX_PAGE. */ Assert(p >= 0 && p <= QUEUE_MAX_PAGE); @@ -826,7 +826,7 @@ PreCommit_Notify(void) while (nextNotify != NULL) { /* - * Add the pending notifications to the queue. We acquire and + * Add the pending notifications to the queue. We acquire and * release AsyncQueueLock once per page, which might be overkill * but it does allow readers to get in while we're doing this. * @@ -1042,12 +1042,12 @@ Exec_UnlistenAllCommit(void) * The reason that this is not done in AtCommit_Notify is that there is * a nonzero chance of errors here (for example, encoding conversion errors * while trying to format messages to our frontend). An error during - * AtCommit_Notify would be a PANIC condition. The timing is also arranged + * AtCommit_Notify would be a PANIC condition. The timing is also arranged * to ensure that a transaction's self-notifies are delivered to the frontend * before it gets the terminating ReadyForQuery message. * * Note that we send signals and process the queue even if the transaction - * eventually aborted. This is because we need to clean out whatever got + * eventually aborted. This is because we need to clean out whatever got * added to the queue. * * NOTE: we are outside of any transaction here. @@ -1137,7 +1137,7 @@ IsListeningOn(const char *channel) /* * Remove our entry from the listeners array when we are no longer listening - * on any channel. NB: must not fail if we're already not listening. + * on any channel. NB: must not fail if we're already not listening. */ static void asyncQueueUnregister(void) @@ -1179,7 +1179,7 @@ asyncQueueIsFull(void) /* * The queue is full if creating a new head page would create a page that * logically precedes the current global tail pointer, ie, the head - * pointer would wrap around compared to the tail. We cannot create such + * pointer would wrap around compared to the tail. We cannot create such * a head page for fear of confusing slru.c. For safety we round the tail * pointer back to a segment boundary (compare the truncation logic in * asyncQueueAdvanceTail). @@ -1198,7 +1198,7 @@ asyncQueueIsFull(void) /* * Advance the QueuePosition to the next entry, assuming that the current - * entry is of length entryLength. If we jump to a new page the function + * entry is of length entryLength. If we jump to a new page the function * returns true, else false. */ static bool @@ -1267,7 +1267,7 @@ asyncQueueNotificationToEntry(Notification *n, AsyncQueueEntry *qe) * the last byte which simplifies reading the page later. * * We are passed the list cell containing the next notification to write - * and return the first still-unwritten cell back. Eventually we will return + * and return the first still-unwritten cell back. Eventually we will return * NULL indicating all is done. * * We are holding AsyncQueueLock already from the caller and grab AsyncCtlLock @@ -1344,7 +1344,7 @@ asyncQueueAddEntries(ListCell *nextNotify) * Page is full, so we're done here, but first fill the next page * with zeroes. The reason to do this is to ensure that slru.c's * idea of the head page is always the same as ours, which avoids - * boundary problems in SimpleLruTruncate. The test in + * boundary problems in SimpleLruTruncate. The test in * asyncQueueIsFull() ensured that there is room to create this * page without overrunning the queue. */ @@ -1518,7 +1518,7 @@ AtAbort_Notify(void) /* * If we LISTEN but then roll back the transaction after PreCommit_Notify, * we have registered as a listener but have not made any entry in - * listenChannels. In that case, deregister again. + * listenChannels. In that case, deregister again. */ if (amRegisteredListener && listenChannels == NIL) asyncQueueUnregister(); @@ -1771,7 +1771,7 @@ EnableNotifyInterrupt(void) * is disabled until the next EnableNotifyInterrupt call. * * The PROCSIG_CATCHUP_INTERRUPT signal handler also needs to call this, - * so as to prevent conflicts if one signal interrupts the other. So we + * so as to prevent conflicts if one signal interrupts the other. So we * must return the previous state of the flag. */ bool @@ -1866,7 +1866,7 @@ asyncQueueReadAllNotifications(void) /* * We copy the data from SLRU into a local buffer, so as to avoid * holding the AsyncCtlLock while we are examining the entries and - * possibly transmitting them to our frontend. Copy only the part + * possibly transmitting them to our frontend. Copy only the part * of the page we will actually inspect. */ slotno = SimpleLruReadPage_ReadOnly(AsyncCtl, curpage, @@ -1940,7 +1940,7 @@ asyncQueueReadAllNotifications(void) * and deliver relevant ones to my frontend. * * The current page must have been fetched into page_buffer from shared - * memory. (We could access the page right in shared memory, but that + * memory. (We could access the page right in shared memory, but that * would imply holding the AsyncCtlLock throughout this routine.) * * We stop if we reach the "stop" position, or reach a notification from an @@ -2146,7 +2146,7 @@ NotifyMyFrontEnd(const char *channel, const char *payload, int32 srcPid) pq_endmessage(&buf); /* - * NOTE: we do not do pq_flush() here. For a self-notify, it will + * NOTE: we do not do pq_flush() here. For a self-notify, it will * happen at the end of the transaction, and for incoming notifies * ProcessIncomingNotify will do it after finding all the notifies. */ diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 4ac1e0b864f..54a27531825 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * cluster.c - * CLUSTER a table on an index. This is now also used for VACUUM FULL. + * CLUSTER a table on an index. This is now also used for VACUUM FULL. * * There is hardly anything left of Paul Brown's original implementation... * @@ -94,7 +94,7 @@ static void reform_and_rewrite_tuple(HeapTuple tuple, * * The single-relation case does not have any such overhead. * - * We also allow a relation to be specified without index. In that case, + * We also allow a relation to be specified without index. In that case, * the indisclustered bit will be looked up, and an ERROR will be thrown * if there is no index with the bit set. *--------------------------------------------------------------------------- @@ -206,7 +206,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) ALLOCSET_DEFAULT_MAXSIZE); /* - * Build the list of relations to cluster. Note that this lives in + * Build the list of relations to cluster. Note that this lives in * cluster_context. */ rvs = get_tables_to_cluster(cluster_context); @@ -243,7 +243,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) * * This clusters the table by creating a new, clustered table and * swapping the relfilenodes of the new table and the old table, so - * the OID of the original table is preserved. Thus we do not lose + * the OID of the original table is preserved. Thus we do not lose * GRANT, inheritance nor references to this table (this was a bug * in releases thru 7.3). * @@ -252,7 +252,7 @@ cluster(ClusterStmt *stmt, bool isTopLevel) * them incrementally while we load the table. * * If indexOid is InvalidOid, the table will be rewritten in physical order - * instead of index order. This is the new implementation of VACUUM FULL, + * instead of index order. This is the new implementation of VACUUM FULL, * and error messages should refer to the operation as VACUUM not CLUSTER. */ void @@ -265,7 +265,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose) /* * We grab exclusive access to the target rel and index for the duration - * of the transaction. (This is redundant for the single-transaction + * of the transaction. (This is redundant for the single-transaction * case, since cluster() already did it.) The index lock is taken inside * check_index_is_clusterable. */ @@ -300,7 +300,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose) * check in the "recheck" case is appropriate (which currently means * somebody is executing a database-wide CLUSTER), because there is * another check in cluster() which will stop any attempt to cluster - * remote temp tables by name. There is another check in cluster_rel + * remote temp tables by name. There is another check in cluster_rel * which is redundant, but we leave it for extra safety. */ if (RELATION_IS_OTHER_TEMP(OldHeap)) @@ -393,7 +393,7 @@ cluster_rel(Oid tableOid, Oid indexOid, bool recheck, bool verbose) /* * All predicate locks on the tuples or pages are about to be made - * invalid, because we move tuples around. Promote them to relation + * invalid, because we move tuples around. Promote them to relation * locks. Predicate locks on indexes will be promoted when they are * reindexed. */ @@ -440,7 +440,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck, LOCKMOD /* * Disallow clustering on incomplete indexes (those that might not index - * every row of the relation). We could relax this by making a separate + * every row of the relation). We could relax this by making a separate * seqscan pass over the table to copy the missing rows, but that seems * expensive and tedious. */ @@ -649,14 +649,14 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, bool forcetemp, /* * Create the new heap, using a temporary name in the same namespace as - * the existing table. NOTE: there is some risk of collision with user + * the existing table. NOTE: there is some risk of collision with user * relnames. Working around this seems more trouble than it's worth; in * particular, we can't create the new heap in a different namespace from * the old, or we will have problems with the TEMP status of temp tables. * * Note: the new heap is not a shared relation, even if we are rebuilding * a shared rel. However, we do make the new heap mapped if the source is - * mapped. This simplifies swap_relation_files, and is absolutely + * mapped. This simplifies swap_relation_files, and is absolutely * necessary for rebuilding pg_class, for reasons explained there. */ snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", OIDOldHeap); @@ -696,11 +696,11 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace, bool forcetemp, * * If the relation doesn't have a TOAST table already, we can't need one * for the new relation. The other way around is possible though: if some - * wide columns have been dropped, NewHeapCreateToastTable can decide - * that no TOAST table is needed for the new table. + * wide columns have been dropped, NewHeapCreateToastTable can decide that + * no TOAST table is needed for the new table. * - * Note that NewHeapCreateToastTable ends with CommandCounterIncrement, - * so that the TOAST table will be visible for insertion. + * Note that NewHeapCreateToastTable ends with CommandCounterIncrement, so + * that the TOAST table will be visible for insertion. */ toastid = OldHeap->rd_rel->reltoastrelid; if (OidIsValid(toastid)) @@ -788,12 +788,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, /* * If the OldHeap has a toast table, get lock on the toast table to keep - * it from being vacuumed. This is needed because autovacuum processes + * it from being vacuumed. This is needed because autovacuum processes * toast tables independently of their main tables, with no lock on the - * latter. If an autovacuum were to start on the toast table after we + * latter. If an autovacuum were to start on the toast table after we * compute our OldestXmin below, it would use a later OldestXmin, and then * possibly remove as DEAD toast tuples belonging to main tuples we think - * are only RECENTLY_DEAD. Then we'd fail while trying to copy those + * are only RECENTLY_DEAD. Then we'd fail while trying to copy those * tuples. * * We don't need to open the toast relation here, just lock it. The lock @@ -814,7 +814,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, /* * If both tables have TOAST tables, perform toast swap by content. It is * possible that the old table has a toast table but the new one doesn't, - * if toastable columns have been dropped. In that case we have to do + * if toastable columns have been dropped. In that case we have to do * swap by links. This is okay because swap by content is only essential * for system catalogs, and we don't support schema changes for them. */ @@ -833,7 +833,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, * * Note that we must hold NewHeap open until we are done writing data, * since the relcache will not guarantee to remember this setting once - * the relation is closed. Also, this technique depends on the fact + * the relation is closed. Also, this technique depends on the fact * that no one will try to read from the NewHeap until after we've * finished writing it and swapping the rels --- otherwise they could * follow the toast pointers to the wrong place. (It would actually @@ -929,7 +929,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose, /* * Scan through the OldHeap, either in OldIndex order or sequentially; * copy each tuple into the NewHeap, or transiently to the tuplesort - * module. Note that we don't bother sorting dead tuples (they won't get + * module. Note that we don't bother sorting dead tuples (they won't get * to the new table anyway). */ for (;;) @@ -1217,7 +1217,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class, NameStr(relform2->relname), r2); /* - * Send replacement mappings to relmapper. Note these won't actually + * Send replacement mappings to relmapper. Note these won't actually * take effect until CommandCounterIncrement. */ RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false); @@ -1404,7 +1404,8 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class, relform1->relkind == RELKIND_TOASTVALUE && relform2->relkind == RELKIND_TOASTVALUE) { - Oid toastIndex1, toastIndex2; + Oid toastIndex1, + toastIndex2; /* Get valid index for each relation */ toastIndex1 = toast_get_valid_index(r1, @@ -1440,7 +1441,7 @@ swap_relation_files(Oid r1, Oid r2, bool target_is_pg_class, * non-transient relation.) * * Caution: the placement of this step interacts with the decision to - * handle toast rels by recursion. When we are trying to rebuild pg_class + * handle toast rels by recursion. When we are trying to rebuild pg_class * itself, the smgr close on pg_class must happen after all accesses in * this function. */ @@ -1487,9 +1488,9 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, /* * Rebuild each index on the relation (but not the toast table, which is - * all-new at this point). It is important to do this before the DROP + * all-new at this point). It is important to do this before the DROP * step because if we are processing a system catalog that will be used - * during DROP, we want to have its indexes available. There is no + * during DROP, we want to have its indexes available. There is no * advantage to the other order anyway because this is all transactional, * so no chance to reclaim disk space before commit. We do not need a * final CommandCounterIncrement() because reindex_relation does it. @@ -1511,11 +1512,11 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, * swap_relation_files()), thus relfrozenxid was not updated. That's * annoying because a potential reason for doing a VACUUM FULL is a * imminent or actual anti-wraparound shutdown. So, now that we can - * access the new relation using it's indices, update - * relfrozenxid. pg_class doesn't have a toast relation, so we don't need - * to update the corresponding toast relation. Not that there's little - * point moving all relfrozenxid updates here since swap_relation_files() - * needs to write to pg_class for non-mapped relations anyway. + * access the new relation using it's indices, update relfrozenxid. + * pg_class doesn't have a toast relation, so we don't need to update the + * corresponding toast relation. Not that there's little point moving all + * relfrozenxid updates here since swap_relation_files() needs to write to + * pg_class for non-mapped relations anyway. */ if (OIDOldHeap == RelationRelationId) { diff --git a/src/backend/commands/constraint.c b/src/backend/commands/constraint.c index 751f89e514d..b0cad4634b2 100644 --- a/src/backend/commands/constraint.c +++ b/src/backend/commands/constraint.c @@ -50,7 +50,7 @@ unique_key_recheck(PG_FUNCTION_ARGS) bool isnull[INDEX_MAX_KEYS]; /* - * Make sure this is being called as an AFTER ROW trigger. Note: + * Make sure this is being called as an AFTER ROW trigger. Note: * translatable error strings are shared with ri_triggers.c, so resist the * temptation to fold the function name into them. */ @@ -87,7 +87,7 @@ unique_key_recheck(PG_FUNCTION_ARGS) * If the new_row is now dead (ie, inserted and then deleted within our * transaction), we can skip the check. However, we have to be careful, * because this trigger gets queued only in response to index insertions; - * which means it does not get queued for HOT updates. The row we are + * which means it does not get queued for HOT updates. The row we are * called for might now be dead, but have a live HOT child, in which case * we still need to make the check. Therefore we have to use * heap_hot_search, not just HeapTupleSatisfiesVisibility as is done in diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 70ee7e50486..fbd7492a73f 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -125,8 +125,8 @@ typedef struct CopyStateData bool *force_quote_flags; /* per-column CSV FQ flags */ List *force_notnull; /* list of column names */ bool *force_notnull_flags; /* per-column CSV FNN flags */ - List *force_null; /* list of column names */ - bool *force_null_flags; /* per-column CSV FN flags */ + List *force_null; /* list of column names */ + bool *force_null_flags; /* per-column CSV FN flags */ bool convert_selectively; /* do selective binary conversion? */ List *convert_select; /* list of column names (can be NIL) */ bool *convert_select_flags; /* per-column CSV/TEXT CS flags */ @@ -189,7 +189,7 @@ typedef struct CopyStateData /* * Finally, raw_buf holds raw data read from the data source (file or - * client connection). CopyReadLine parses this data sufficiently to + * client connection). CopyReadLine parses this data sufficiently to * locate line boundaries, then transfers the data to line_buf and * converts it. Note: we guarantee that there is a \0 at * raw_buf[raw_buf_len]. @@ -215,7 +215,7 @@ typedef struct * function call overhead in tight COPY loops. * * We must use "if (1)" because the usual "do {...} while(0)" wrapper would - * prevent the continue/break processing from working. We end the "if (1)" + * prevent the continue/break processing from working. We end the "if (1)" * with "else ((void) 0)" to ensure the "if" does not unintentionally match * any "else" in the calling code, and to avoid any compiler warnings about * empty statements. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros. @@ -549,7 +549,7 @@ CopySendEndOfRow(CopyState cstate) * CopyGetData reads data from the source (file or frontend) * * We attempt to read at least minread, and at most maxread, bytes from - * the source. The actual number of bytes read is returned; if this is + * the source. The actual number of bytes read is returned; if this is * less than minread, EOF was detected. * * Note: when copying from the frontend, we expect a proper EOF mark per @@ -766,7 +766,7 @@ CopyLoadRawBuf(CopyState cstate) * we also support copying the output of an arbitrary SELECT query. * * If <pipe> is false, transfer is between the table and the file named - * <filename>. Otherwise, transfer is between the table and our regular + * <filename>. Otherwise, transfer is between the table and our regular * input/output stream. The latter could be either stdin/stdout or a * socket, depending on whether we're running under Postmaster control. * @@ -1203,7 +1203,7 @@ ProcessCopyOptions(CopyState cstate, if (cstate->force_null != NIL && !is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force null only available using COPY FROM"))); + errmsg("COPY force null only available using COPY FROM"))); /* Don't allow the delimiter to appear in the null string. */ if (strchr(cstate->null_print, cstate->delim[0]) != NULL) @@ -1298,7 +1298,7 @@ BeginCopy(bool is_from, errmsg("COPY (SELECT) WITH OIDS is not supported"))); /* - * Run parse analysis and rewrite. Note this also acquires sufficient + * Run parse analysis and rewrite. Note this also acquires sufficient * locks on the source table(s). * * Because the parser and planner tend to scribble on their input, we @@ -1428,8 +1428,8 @@ BeginCopy(bool is_from, if (!list_member_int(cstate->attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("FORCE NULL column \"%s\" not referenced by COPY", - NameStr(tupDesc->attrs[attnum - 1]->attname)))); + errmsg("FORCE NULL column \"%s\" not referenced by COPY", + NameStr(tupDesc->attrs[attnum - 1]->attname)))); cstate->force_null_flags[attnum - 1] = true; } } @@ -1730,7 +1730,7 @@ CopyTo(CopyState cstate) * Create a temporary memory context that we can reset once per row to * recover palloc'd memory. This avoids any problems with leaks inside * datatype output routines, and should be faster than retail pfree's - * anyway. (We don't need a whole econtext as CopyFrom does.) + * anyway. (We don't need a whole econtext as CopyFrom does.) */ cstate->rowcontext = AllocSetContextCreate(CurrentMemoryContext, "COPY TO", @@ -2248,8 +2248,8 @@ CopyFrom(CopyState cstate) { /* * Reset the per-tuple exprcontext. We can only do this if the - * tuple buffer is empty. (Calling the context the per-tuple memory - * context is a bit of a misnomer now.) + * tuple buffer is empty. (Calling the context the per-tuple + * memory context is a bit of a misnomer now.) */ ResetPerTupleExprContext(estate); } @@ -2569,19 +2569,20 @@ BeginCopyFrom(Relation rel, num_defaults++; /* - * If a default expression looks at the table being loaded, then - * it could give the wrong answer when using multi-insert. Since - * database access can be dynamic this is hard to test for - * exactly, so we use the much wider test of whether the - * default expression is volatile. We allow for the special case - * of when the default expression is the nextval() of a sequence - * which in this specific case is known to be safe for use with - * the multi-insert optimisation. Hence we use this special case - * function checker rather than the standard check for + * If a default expression looks at the table being loaded, + * then it could give the wrong answer when using + * multi-insert. Since database access can be dynamic this is + * hard to test for exactly, so we use the much wider test of + * whether the default expression is volatile. We allow for + * the special case of when the default expression is the + * nextval() of a sequence which in this specific case is + * known to be safe for use with the multi-insert + * optimisation. Hence we use this special case function + * checker rather than the standard check for * contain_volatile_functions(). */ if (!volatile_defexprs) - volatile_defexprs = contain_volatile_functions_not_nextval((Node *)defexpr); + volatile_defexprs = contain_volatile_functions_not_nextval((Node *) defexpr); } } } @@ -2861,8 +2862,8 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, if (cstate->csv_mode) { - if(string == NULL && - cstate->force_notnull_flags[m]) + if (string == NULL && + cstate->force_notnull_flags[m]) { /* * FORCE_NOT_NULL option is set and column is NULL - @@ -2870,14 +2871,14 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, */ string = cstate->null_print; } - else if(string != NULL && cstate->force_null_flags[m] - && strcmp(string,cstate->null_print) == 0 ) + else if (string != NULL && cstate->force_null_flags[m] + && strcmp(string, cstate->null_print) == 0) { /* - * FORCE_NULL option is set and column matches the NULL string. - * It must have been quoted, or otherwise the string would already - * have been set to NULL. - * Convert it to NULL as specified. + * FORCE_NULL option is set and column matches the NULL + * string. It must have been quoted, or otherwise the + * string would already have been set to NULL. Convert it + * to NULL as specified. */ string = NULL; } @@ -2920,7 +2921,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, * if client chooses to send that now. * * Note that we MUST NOT try to read more data in an old-protocol - * copy, since there is no protocol-level EOF marker then. We + * copy, since there is no protocol-level EOF marker then. We * could go either way for copy from file, but choose to throw * error if there's data after the EOF marker, for consistency * with the new-protocol case. @@ -2982,7 +2983,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext, /* * Now compute and insert any defaults available for the columns not - * provided by the input data. Anything not processed here or above will + * provided by the input data. Anything not processed here or above will * remain NULL. */ for (i = 0; i < num_defaults; i++) @@ -3017,7 +3018,7 @@ EndCopyFrom(CopyState cstate) * server encoding. * * Result is true if read was terminated by EOF, false if terminated - * by newline. The terminating newline or EOF marker is not included + * by newline. The terminating newline or EOF marker is not included * in the final value of line_buf. */ static bool @@ -3173,7 +3174,7 @@ CopyReadLineText(CopyState cstate) * of read-ahead and avoid the many calls to * IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol * does not allow us to read too far ahead or we might read into the - * next data, so we read-ahead only as far we know we can. One + * next data, so we read-ahead only as far we know we can. One * optimization would be to read-ahead four byte here if * cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it, * considering the size of the buffer. @@ -3183,7 +3184,7 @@ CopyReadLineText(CopyState cstate) REFILL_LINEBUF; /* - * Try to read some more data. This will certainly reset + * Try to read some more data. This will certainly reset * raw_buf_index to zero, and raw_buf_ptr must go with it. */ if (!CopyLoadRawBuf(cstate)) @@ -3241,7 +3242,7 @@ CopyReadLineText(CopyState cstate) /* * Updating the line count for embedded CR and/or LF chars is * necessarily a little fragile - this test is probably about the - * best we can do. (XXX it's arguable whether we should do this + * best we can do. (XXX it's arguable whether we should do this * at all --- is cur_lineno a physical or logical count?) */ if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r')) @@ -3420,7 +3421,7 @@ CopyReadLineText(CopyState cstate) * after a backslash is special, so we skip over that second * character too. If we didn't do that \\. would be * considered an eof-of copy, while in non-CSV mode it is a - * literal backslash followed by a period. In CSV mode, + * literal backslash followed by a period. In CSV mode, * backslashes are not special, so we want to process the * character after the backslash just like a normal character, * so we don't increment in those cases. @@ -3523,7 +3524,7 @@ CopyReadAttributesText(CopyState cstate) /* * The de-escaped attributes will certainly not be longer than the input * data line, so we can just force attribute_buf to be large enough and - * then transfer data without any checks for enough space. We need to do + * then transfer data without any checks for enough space. We need to do * it this way because enlarging attribute_buf mid-stream would invalidate * pointers already stored into cstate->raw_fields[]. */ @@ -3753,7 +3754,7 @@ CopyReadAttributesCSV(CopyState cstate) /* * The de-escaped attributes will certainly not be longer than the input * data line, so we can just force attribute_buf to be large enough and - * then transfer data without any checks for enough space. We need to do + * then transfer data without any checks for enough space. We need to do * it this way because enlarging attribute_buf mid-stream would invalidate * pointers already stored into cstate->raw_fields[]. */ @@ -3968,7 +3969,7 @@ CopyAttributeOutText(CopyState cstate, char *string) /* * We have to grovel through the string searching for control characters * and instances of the delimiter character. In most cases, though, these - * are infrequent. To avoid overhead from calling CopySendData once per + * are infrequent. To avoid overhead from calling CopySendData once per * character, we dump out all characters between escaped characters in a * single call. The loop invariant is that the data from "start" to "ptr" * can be sent literally, but hasn't yet been. diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c index e434d38702e..96806eed98b 100644 --- a/src/backend/commands/createas.c +++ b/src/backend/commands/createas.c @@ -104,7 +104,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, /* * For materialized views, lock down security-restricted operations and - * arrange to make GUC variable changes local to this command. This is + * arrange to make GUC variable changes local to this command. This is * not necessary for security, but this keeps the behavior similar to * REFRESH MATERIALIZED VIEW. Otherwise, one could create a materialized * view not possible to refresh. @@ -124,9 +124,9 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, * plancache.c. * * Because the rewriter and planner tend to scribble on the input, we make - * a preliminary copy of the source querytree. This prevents problems in + * a preliminary copy of the source querytree. This prevents problems in * the case that CTAS is in a portal or plpgsql function and is executed - * repeatedly. (See also the same hack in EXPLAIN and PREPARE.) + * repeatedly. (See also the same hack in EXPLAIN and PREPARE.) */ rewritten = QueryRewrite((Query *) copyObject(query)); @@ -141,7 +141,7 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString, /* * Use a snapshot with an updated command ID to ensure this query sees - * results of any previously executed queries. (This could only matter if + * results of any previously executed queries. (This could only matter if * the planner executed an allegedly-stable function that changed the * database contents, but let's do it anyway to be parallel to the EXPLAIN * code path.) @@ -359,8 +359,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo) /* * If necessary, create a TOAST table for the target table. Note that - * NewRelationCreateToastTable ends with CommandCounterIncrement(), so that - * the TOAST table will be visible for insertion. + * NewRelationCreateToastTable ends with CommandCounterIncrement(), so + * that the TOAST table will be visible for insertion. */ CommandCounterIncrement(); diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 4996a2e7cd2..5705889f31d 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -265,7 +265,7 @@ createdb(const CreatedbStmt *stmt) * To create a database, must have createdb privilege and must be able to * become the target role (this does not imply that the target role itself * must have createdb privilege). The latter provision guards against - * "giveaway" attacks. Note that a superuser will always have both of + * "giveaway" attacks. Note that a superuser will always have both of * these privileges a fortiori. */ if (!have_createdb_privilege()) @@ -397,7 +397,7 @@ createdb(const CreatedbStmt *stmt) /* * If we are trying to change the default tablespace of the template, * we require that the template not have any files in the new default - * tablespace. This is necessary because otherwise the copied + * tablespace. This is necessary because otherwise the copied * database would contain pg_class rows that refer to its default * tablespace both explicitly (by OID) and implicitly (as zero), which * would cause problems. For example another CREATE DATABASE using @@ -433,7 +433,7 @@ createdb(const CreatedbStmt *stmt) } /* - * Check for db name conflict. This is just to give a more friendly error + * Check for db name conflict. This is just to give a more friendly error * message than "unique index violation". There's a race condition but * we're willing to accept the less friendly message in that case. */ @@ -498,7 +498,7 @@ createdb(const CreatedbStmt *stmt) /* * We deliberately set datacl to default (NULL), rather than copying it - * from the template database. Copying it would be a bad idea when the + * from the template database. Copying it would be a bad idea when the * owner is not the same as the template's owner. */ new_record_nulls[Anum_pg_database_datacl - 1] = true; @@ -751,7 +751,8 @@ dropdb(const char *dbname, bool missing_ok) HeapTuple tup; int notherbackends; int npreparedxacts; - int nslots, nslots_active; + int nslots, + nslots_active; /* * Look up the target database's OID, and get exclusive lock on it. We @@ -1160,7 +1161,7 @@ movedb(const char *dbname, const char *tblspcname) /* * Use an ENSURE block to make sure we remove the debris if the copy fails - * (eg, due to out-of-disk-space). This is not a 100% solution, because + * (eg, due to out-of-disk-space). This is not a 100% solution, because * of the possibility of failure during transaction commit, but it should * handle most scenarios. */ @@ -1647,7 +1648,7 @@ get_db_info(const char *name, LOCKMODE lockmode, LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode); /* - * And now, re-fetch the tuple by OID. If it's still there and still + * And now, re-fetch the tuple by OID. If it's still there and still * the same name, we win; else, drop the lock and loop back to try * again. */ diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c index f0cb4f544e0..dca6e952a52 100644 --- a/src/backend/commands/define.c +++ b/src/backend/commands/define.c @@ -202,7 +202,7 @@ defGetInt64(DefElem *def) /* * Values too large for int4 will be represented as Float - * constants by the lexer. Accept these if they are valid int8 + * constants by the lexer. Accept these if they are valid int8 * strings. */ return DatumGetInt64(DirectFunctionCall1(int8in, diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index 024a4778a94..96f926cbb2a 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -606,7 +606,7 @@ filter_event_trigger(const char **tag, EventTriggerCacheItem *item) } /* - * Setup for running triggers for the given event. Return value is an OID list + * Setup for running triggers for the given event. Return value is an OID list * of functions to run; if there are any, trigdata is filled with an * appropriate EventTriggerData for them to receive. */ @@ -625,7 +625,7 @@ EventTriggerCommonSetup(Node *parsetree, * invoked to match up exactly with the list that CREATE EVENT TRIGGER * accepts. This debugging cross-check will throw an error if this * function is invoked for a command tag that CREATE EVENT TRIGGER won't - * accept. (Unfortunately, there doesn't seem to be any simple, automated + * accept. (Unfortunately, there doesn't seem to be any simple, automated * way to verify that CREATE EVENT TRIGGER doesn't accept extra stuff that * never reaches this control point.) * @@ -655,7 +655,7 @@ EventTriggerCommonSetup(Node *parsetree, /* * Filter list of event triggers by command tag, and copy them into our - * memory context. Once we start running the command trigers, or indeed + * memory context. Once we start running the command trigers, or indeed * once we do anything at all that touches the catalogs, an invalidation * might leave cachelist pointing at garbage, so we must do this before we * can do much else. @@ -783,7 +783,7 @@ EventTriggerSQLDrop(Node *parsetree) return; /* - * Use current state to determine whether this event fires at all. If + * Use current state to determine whether this event fires at all. If * there are no triggers for the sql_drop event, then we don't have * anything to do here. Note that dropped object collection is disabled * if this is the case, so even if we were to try to run, the list would @@ -798,7 +798,7 @@ EventTriggerSQLDrop(Node *parsetree) &trigdata); /* - * Nothing to do if run list is empty. Note this shouldn't happen, + * Nothing to do if run list is empty. Note this shouldn't happen, * because if there are no sql_drop events, then objects-to-drop wouldn't * have been collected in the first place and we would have quitted above. */ @@ -813,7 +813,7 @@ EventTriggerSQLDrop(Node *parsetree) /* * Make sure pg_event_trigger_dropped_objects only works when running - * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when + * these triggers. Use PG_TRY to ensure in_sql_drop is reset even when * one trigger fails. (This is perhaps not necessary, as the currentState * variable will be removed shortly by our caller, but it seems better to * play safe.) @@ -1053,7 +1053,7 @@ EventTriggerBeginCompleteQuery(void) * returned false previously. * * Note: this might be called in the PG_CATCH block of a failing transaction, - * so be wary of running anything unnecessary. (In particular, it's probably + * so be wary of running anything unnecessary. (In particular, it's probably * unwise to try to allocate memory.) */ void diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 1104cc36312..794042b5501 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -86,7 +86,7 @@ static void show_sort_group_keys(PlanState *planstate, const char *qlabel, static void show_sort_info(SortState *sortstate, ExplainState *es); static void show_hash_info(HashState *hashstate, ExplainState *es); static void show_tidbitmap_info(BitmapHeapScanState *planstate, - ExplainState *es); + ExplainState *es); static void show_instrumentation_count(const char *qlabel, int which, PlanState *planstate, ExplainState *es); static void show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es); @@ -197,7 +197,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString, * plancache.c. * * Because the rewriter and planner tend to scribble on the input, we make - * a preliminary copy of the source querytree. This prevents problems in + * a preliminary copy of the source querytree. This prevents problems in * the case that the EXPLAIN is in a portal or plpgsql function and is * executed repeatedly. (See also the same hack in DECLARE CURSOR and * PREPARE.) XXX FIXME someday. @@ -320,8 +320,9 @@ ExplainOneQuery(Query *query, IntoClause *into, ExplainState *es, (*ExplainOneQuery_hook) (query, into, es, queryString, params); else { - PlannedStmt *plan; - instr_time planstart, planduration; + PlannedStmt *plan; + instr_time planstart, + planduration; INSTR_TIME_SET_CURRENT(planstart); @@ -493,7 +494,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, if (es->costs && planduration) { - double plantime = INSTR_TIME_GET_DOUBLE(*planduration); + double plantime = INSTR_TIME_GET_DOUBLE(*planduration); if (es->format == EXPLAIN_FORMAT_TEXT) appendStringInfo(es->str, "Planning time: %.3f ms\n", @@ -542,7 +543,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es, * convert a QueryDesc's plan tree to text and append it to es->str * * The caller should have set up the options fields of *es, as well as - * initializing the output buffer es->str. Other fields in *es are + * initializing the output buffer es->str. Other fields in *es are * initialized here. * * NB: will not work on utility statements @@ -567,7 +568,7 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc) * es->str * * The caller should have set up the options fields of *es, as well as - * initializing the output buffer es->str. Other fields in *es are + * initializing the output buffer es->str. Other fields in *es are * initialized here. */ void @@ -2193,7 +2194,7 @@ show_modifytable_info(ModifyTableState *mtstate, ExplainState *es) /* * If the first target relation is a foreign table, call its FDW to - * display whatever additional fields it wants to. For now, we ignore the + * display whatever additional fields it wants to. For now, we ignore the * possibility of other targets being foreign tables, although the API for * ExplainForeignModify is designed to allow them to be processed. */ @@ -2692,7 +2693,7 @@ ExplainXMLTag(const char *tagname, int flags, ExplainState *es) /* * Emit a JSON line ending. * - * JSON requires a comma after each property but the last. To facilitate this, + * JSON requires a comma after each property but the last. To facilitate this, * in JSON format, the text emitted for each property begins just prior to the * preceding line-break (and comma, if applicable). */ @@ -2713,7 +2714,7 @@ ExplainJSONLineEnding(ExplainState *es) * YAML lines are ordinarily indented by two spaces per indentation level. * The text emitted for each property begins just prior to the preceding * line-break, except for the first property in an unlabelled group, for which - * it begins immediately after the "- " that introduces the group. The first + * it begins immediately after the "- " that introduces the group. The first * property of the group appears on the same line as the opening "- ". */ static void diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c index 06bd90b9aa9..9a0afa4b5dc 100644 --- a/src/backend/commands/extension.c +++ b/src/backend/commands/extension.c @@ -108,7 +108,7 @@ static void ApplyExtensionUpdates(Oid extensionOid, /* * get_extension_oid - given an extension name, look up the OID * - * If missing_ok is false, throw an error if extension name not found. If + * If missing_ok is false, throw an error if extension name not found. If * true, just return InvalidOid. */ Oid @@ -257,9 +257,9 @@ check_valid_extension_name(const char *extensionname) errdetail("Extension names must not contain \"--\"."))); /* - * No leading or trailing dash either. (We could probably allow this, but + * No leading or trailing dash either. (We could probably allow this, but * it would require much care in filename parsing and would make filenames - * visually if not formally ambiguous. Since there's no real-world use + * visually if not formally ambiguous. Since there's no real-world use * case, let's just forbid it.) */ if (extensionname[0] == '-' || extensionname[namelen - 1] == '-') @@ -435,7 +435,7 @@ get_extension_script_filename(ExtensionControlFile *control, /* * Parse contents of primary or auxiliary control file, and fill in - * fields of *control. We parse primary file if version == NULL, + * fields of *control. We parse primary file if version == NULL, * else the optional auxiliary file for that version. * * Control files are supposed to be very short, half a dozen lines, @@ -673,7 +673,7 @@ read_extension_script_file(const ExtensionControlFile *control, * filename is used only to report errors. * * Note: it's tempting to just use SPI to execute the string, but that does - * not work very well. The really serious problem is that SPI will parse, + * not work very well. The really serious problem is that SPI will parse, * analyze, and plan the whole string before executing any of it; of course * this fails if there are any plannable statements referring to objects * created earlier in the script. A lesser annoyance is that SPI insists @@ -848,7 +848,7 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control, /* * Set creating_extension and related variables so that * recordDependencyOnCurrentExtension and other functions do the right - * things. On failure, ensure we reset these variables. + * things. On failure, ensure we reset these variables. */ creating_extension = true; CurrentExtensionObject = extensionOid; @@ -1092,7 +1092,7 @@ identify_update_path(ExtensionControlFile *control, * is still good. * * Result is a List of names of versions to transition through (the initial - * version is *not* included). Returns NIL if no such path. + * version is *not* included). Returns NIL if no such path. */ static List * find_update_path(List *evi_list, @@ -1193,7 +1193,7 @@ CreateExtension(CreateExtensionStmt *stmt) check_valid_extension_name(stmt->extname); /* - * Check for duplicate extension name. The unique index on + * Check for duplicate extension name. The unique index on * pg_extension.extname would catch this anyway, and serves as a backstop * in case of race conditions; but this is a friendlier error message, and * besides we need a check to support IF NOT EXISTS. @@ -1360,7 +1360,7 @@ CreateExtension(CreateExtensionStmt *stmt) { /* * The extension is not relocatable and the author gave us a schema - * for it. We create the schema here if it does not already exist. + * for it. We create the schema here if it does not already exist. */ schemaName = control->schema; schemaOid = get_namespace_oid(schemaName, true); @@ -1390,7 +1390,7 @@ CreateExtension(CreateExtensionStmt *stmt) */ List *search_path = fetch_search_path(false); - if (search_path == NIL) /* nothing valid in search_path? */ + if (search_path == NIL) /* nothing valid in search_path? */ ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), errmsg("no schema has been selected to create in"))); @@ -1589,7 +1589,7 @@ RemoveExtensionById(Oid extId) * might write "DROP EXTENSION foo" in foo's own script files, as because * errors in dependency management in extension script files could give * rise to cases where an extension is dropped as a result of recursing - * from some contained object. Because of that, we must test for the case + * from some contained object. Because of that, we must test for the case * here, not at some higher level of the DROP EXTENSION command. */ if (extId == CurrentExtensionObject) @@ -1620,7 +1620,7 @@ RemoveExtensionById(Oid extId) /* * This function lists the available extensions (one row per primary control - * file in the control directory). We parse each control file and report the + * file in the control directory). We parse each control file and report the * interesting fields. * * The system view pg_available_extensions provides a user interface to this @@ -1729,7 +1729,7 @@ pg_available_extensions(PG_FUNCTION_ARGS) /* * This function lists the available extension versions (one row per - * extension installation script). For each version, we parse the related + * extension installation script). For each version, we parse the related * control file(s) and report the interesting fields. * * The system view pg_available_extension_versions provides a user interface @@ -2517,7 +2517,7 @@ AlterExtensionNamespace(List *names, const char *newschema) Oid dep_oldNspOid; /* - * Ignore non-membership dependencies. (Currently, the only other + * Ignore non-membership dependencies. (Currently, the only other * case we could see here is a normal dependency from another * extension.) */ @@ -2929,7 +2929,7 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt) /* * Prevent a schema from being added to an extension if the schema - * contains the extension. That would create a dependency loop. + * contains the extension. That would create a dependency loop. */ if (object.classId == NamespaceRelationId && object.objectId == get_extension_schema(extension.objectId)) diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c index 7f007d7854a..8ab9c439db2 100644 --- a/src/backend/commands/foreigncmds.c +++ b/src/backend/commands/foreigncmds.c @@ -81,7 +81,7 @@ optionListToArray(List *options) /* - * Transform a list of DefElem into text array format. This is substantially + * Transform a list of DefElem into text array format. This is substantially * the same thing as optionListToArray(), except we recognize SET/ADD/DROP * actions for modifying an existing list of options, which is passed in * Datum form as oldOptions. Also, if fdwvalidator isn't InvalidOid @@ -125,7 +125,7 @@ transformGenericOptions(Oid catalogId, /* * It is possible to perform multiple SET/DROP actions on the same - * option. The standard permits this, as long as the options to be + * option. The standard permits this, as long as the options to be * added are unique. Note that an unspecified action is taken to be * ADD. */ diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 4c8119a474d..470db5705cc 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -74,7 +74,7 @@ * allow a shell type to be used, or even created if the specified return type * doesn't exist yet. (Without this, there's no way to define the I/O procs * for a new type.) But SQL function creation won't cope, so error out if - * the target language is SQL. (We do this here, not in the SQL-function + * the target language is SQL. (We do this here, not in the SQL-function * validator, so as not to produce a NOTICE and then an ERROR for the same * condition.) */ @@ -451,7 +451,7 @@ interpret_function_parameter_list(List *parameters, * FUNCTION and ALTER FUNCTION and return it via one of the out * parameters. Returns true if the passed option was recognized. If * the out parameter we were going to assign to points to non-NULL, - * raise a duplicate-clause error. (We don't try to detect duplicate + * raise a duplicate-clause error. (We don't try to detect duplicate * SET parameters though --- if you're redundant, the last one wins.) */ static bool @@ -760,7 +760,7 @@ interpret_AS_clause(Oid languageOid, const char *languageName, { /* * For "C" language, store the file name in probin and, when given, - * the link symbol name in prosrc. If link symbol is omitted, + * the link symbol name in prosrc. If link symbol is omitted, * substitute procedure name. We also allow link symbol to be * specified as "-", since that was the habit in PG versions before * 8.4, and there might be dump files out there that don't translate @@ -1394,7 +1394,7 @@ CreateCast(CreateCastStmt *stmt) /* * Restricting the volatility of a cast function may or may not be a * good idea in the abstract, but it definitely breaks many old - * user-defined types. Disable this check --- tgl 2/1/03 + * user-defined types. Disable this check --- tgl 2/1/03 */ #ifdef NOT_USED if (procstruct->provolatile == PROVOLATILE_VOLATILE) @@ -1458,7 +1458,7 @@ CreateCast(CreateCastStmt *stmt) /* * We know that composite, enum and array types are never binary- - * compatible with each other. They all have OIDs embedded in them. + * compatible with each other. They all have OIDs embedded in them. * * Theoretically you could build a user-defined base type that is * binary-compatible with a composite, enum, or array type. But we @@ -1487,7 +1487,7 @@ CreateCast(CreateCastStmt *stmt) * We also disallow creating binary-compatibility casts involving * domains. Casting from a domain to its base type is already * allowed, and casting the other way ought to go through domain - * coercion to permit constraint checking. Again, if you're intent on + * coercion to permit constraint checking. Again, if you're intent on * having your own semantics for that, create a no-op cast function. * * NOTE: if we were to relax this, the above checks for composites diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 38ce023a8a2..fdfa6ca4f5c 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -102,7 +102,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation, * concrete benefit for core types. * When a comparison or exclusion operator has a polymorphic input type, the - * actual input types must also match. This defends against the possibility + * actual input types must also match. This defends against the possibility * that operators could vary behavior in response to get_fn_expr_argtype(). * At present, this hazard is theoretical: check_exclusion_constraint() and * all core index access methods decline to set fn_expr for such calls. @@ -349,11 +349,11 @@ DefineIndex(Oid relationId, * index build; but for concurrent builds we allow INSERT/UPDATE/DELETE * (but not VACUUM). * - * NB: Caller is responsible for making sure that relationId refers - * to the relation on which the index should be built; except in bootstrap - * mode, this will typically require the caller to have already locked - * the relation. To avoid lock upgrade hazards, that lock should be at - * least as strong as the one we take here. + * NB: Caller is responsible for making sure that relationId refers to the + * relation on which the index should be built; except in bootstrap mode, + * this will typically require the caller to have already locked the + * relation. To avoid lock upgrade hazards, that lock should be at least + * as strong as the one we take here. */ lockmode = stmt->concurrent ? ShareUpdateExclusiveLock : ShareLock; rel = heap_open(relationId, lockmode); @@ -433,7 +433,7 @@ DefineIndex(Oid relationId, } /* - * Force shared indexes into the pg_global tablespace. This is a bit of a + * Force shared indexes into the pg_global tablespace. This is a bit of a * hack but seems simpler than marking them in the BKI commands. On the * other hand, if it's not shared, don't allow it to be placed there. */ @@ -628,7 +628,7 @@ DefineIndex(Oid relationId, /* * For a concurrent build, it's important to make the catalog entries * visible to other transactions before we start to build the index. That - * will prevent them from making incompatible HOT updates. The new index + * will prevent them from making incompatible HOT updates. The new index * will be marked not indisready and not indisvalid, so that no one else * tries to either insert into it or use it for queries. * @@ -676,7 +676,7 @@ DefineIndex(Oid relationId, * indexes. We have waited out all the existing transactions and any new * transaction will have the new index in its list, but the index is still * marked as "not-ready-for-inserts". The index is consulted while - * deciding HOT-safety though. This arrangement ensures that no new HOT + * deciding HOT-safety though. This arrangement ensures that no new HOT * chains can be created where the new tuple and the old tuple in the * chain have different index keys. * @@ -736,7 +736,7 @@ DefineIndex(Oid relationId, /* * Now take the "reference snapshot" that will be used by validate_index() - * to filter candidate tuples. Beware! There might still be snapshots in + * to filter candidate tuples. Beware! There might still be snapshots in * use that treat some transaction as in-progress that our reference * snapshot treats as committed. If such a recently-committed transaction * deleted tuples in the table, we will not include them in the index; yet @@ -761,7 +761,7 @@ DefineIndex(Oid relationId, * Drop the reference snapshot. We must do this before waiting out other * snapshot holders, else we will deadlock against other processes also * doing CREATE INDEX CONCURRENTLY, which would see our snapshot as one - * they must wait for. But first, save the snapshot's xmin to use as + * they must wait for. But first, save the snapshot's xmin to use as * limitXmin for GetCurrentVirtualXIDs(). */ limitXmin = snapshot->xmin; @@ -771,7 +771,7 @@ DefineIndex(Oid relationId, /* * The index is now valid in the sense that it contains all currently - * interesting tuples. But since it might not contain tuples deleted just + * interesting tuples. But since it might not contain tuples deleted just * before the reference snap was taken, we have to wait out any * transactions that might have older snapshots. Obtain a list of VXIDs * of such transactions, and wait for them individually. @@ -786,7 +786,7 @@ DefineIndex(Oid relationId, * * We can also exclude autovacuum processes and processes running manual * lazy VACUUMs, because they won't be fazed by missing index entries - * either. (Manual ANALYZEs, however, can't be excluded because they + * either. (Manual ANALYZEs, however, can't be excluded because they * might be within transactions that are going to do arbitrary operations * later.) * @@ -875,7 +875,7 @@ CheckMutability(Expr *expr) { /* * First run the expression through the planner. This has a couple of - * important consequences. First, function default arguments will get + * important consequences. First, function default arguments will get * inserted, which may affect volatility (consider "default now()"). * Second, inline-able functions will get inlined, which may allow us to * conclude that the function is really less volatile than it's marked. As @@ -898,7 +898,7 @@ CheckMutability(Expr *expr) * Checks that the given partial-index predicate is valid. * * This used to also constrain the form of the predicate to forms that - * indxpath.c could do something with. However, that seems overly + * indxpath.c could do something with. However, that seems overly * restrictive. One useful application of partial indexes is to apply * a UNIQUE constraint across a subset of a table, and in that scenario * any evaluatable predicate will work. So accept any predicate here @@ -1009,7 +1009,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo, attcollation = exprCollation(expr); /* - * Strip any top-level COLLATE clause. This ensures that we treat + * Strip any top-level COLLATE clause. This ensures that we treat * "x COLLATE y" and "(x COLLATE y)" alike. */ while (IsA(expr, CollateExpr)) @@ -1215,7 +1215,7 @@ GetIndexOpClass(List *opclass, Oid attrType, * 2000/07/30 * * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that - * too for awhile. I'm starting to think we need a better approach. tgl + * too for awhile. I'm starting to think we need a better approach. tgl * 2000/10/01 * * Release 8.0 removes bigbox_ops (which was dead code for a long while @@ -1284,7 +1284,7 @@ GetIndexOpClass(List *opclass, Oid attrType, NameListToString(opclass), accessMethodName))); /* - * Verify that the index operator class accepts this datatype. Note we + * Verify that the index operator class accepts this datatype. Note we * will accept binary compatibility. */ opClassId = HeapTupleGetOid(tuple); @@ -1305,7 +1305,7 @@ GetIndexOpClass(List *opclass, Oid attrType, * GetDefaultOpClass * * Given the OIDs of a datatype and an access method, find the default - * operator class, if any. Returns InvalidOid if there is none. + * operator class, if any. Returns InvalidOid if there is none. */ Oid GetDefaultOpClass(Oid type_id, Oid am_id) @@ -1400,7 +1400,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id) * Create a name for an implicitly created index, sequence, constraint, etc. * * The parameters are typically: the original table name, the original field - * name, and a "type" string (such as "seq" or "pkey"). The field name + * name, and a "type" string (such as "seq" or "pkey"). The field name * and/or type can be NULL if not relevant. * * The result is a palloc'd string. @@ -1408,7 +1408,7 @@ GetDefaultOpClass(Oid type_id, Oid am_id) * The basic result we want is "name1_name2_label", omitting "_name2" or * "_label" when those parameters are NULL. However, we must generate * a name with less than NAMEDATALEN characters! So, we truncate one or - * both names if necessary to make a short-enough string. The label part + * both names if necessary to make a short-enough string. The label part * is never truncated (so it had better be reasonably short). * * The caller is responsible for checking uniqueness of the generated @@ -1603,7 +1603,7 @@ ChooseIndexNameAddition(List *colnames) /* * Select the actual names to be used for the columns of an index, given the - * list of IndexElems for the columns. This is mostly about ensuring the + * list of IndexElems for the columns. This is mostly about ensuring the * names are unique so we don't get a conflicting-attribute-names error. * * Returns a List of plain strings (char *, not String nodes). @@ -1714,7 +1714,7 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation, /* * If the relation does exist, check whether it's an index. But note that * the relation might have been dropped between the time we did the name - * lookup and now. In that case, there's nothing to do. + * lookup and now. In that case, there's nothing to do. */ relkind = get_rel_relkind(relId); if (!relkind) diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c index a301d65b60e..5130d512a6a 100644 --- a/src/backend/commands/matview.c +++ b/src/backend/commands/matview.c @@ -240,9 +240,9 @@ ExecRefreshMatView(RefreshMatViewStmt *stmt, const char *queryString, owner = matviewRel->rd_rel->relowner; /* - * Create the transient table that will receive the regenerated data. - * Lock it against access by any other process until commit (by which time - * it will be gone). + * Create the transient table that will receive the regenerated data. Lock + * it against access by any other process until commit (by which time it + * will be gone). */ OIDNewHeap = make_new_heap(matviewOid, tableSpace, concurrent, ExclusiveLock); @@ -319,7 +319,7 @@ refresh_matview_datafill(DestReceiver *dest, Query *query, /* * Use a snapshot with an updated command ID to ensure this query sees - * results of any previously executed queries. (This could only matter if + * results of any previously executed queries. (This could only matter if * the planner executed an allegedly-stable function that changed the * database contents, but let's do it anyway to be safe.) */ @@ -495,9 +495,9 @@ mv_GenerateOper(StringInfo buf, Oid opoid) * * This is called after a new version of the data has been created in a * temporary table. It performs a full outer join against the old version of - * the data, producing "diff" results. This join cannot work if there are any + * the data, producing "diff" results. This join cannot work if there are any * duplicated rows in either the old or new versions, in the sense that every - * column would compare as equal between the two rows. It does work correctly + * column would compare as equal between the two rows. It does work correctly * in the face of rows which have at least one NULL value, with all non-NULL * columns equal. The behavior of NULLs on equality tests and on UNIQUE * indexes turns out to be quite convenient here; the tests we need to make @@ -561,7 +561,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid) /* * We need to ensure that there are not duplicate rows without NULLs in - * the new data set before we can count on the "diff" results. Check for + * the new data set before we can count on the "diff" results. Check for * that in a way that allows showing the first duplicated row found. Even * after we pass this test, a unique index on the materialized view may * find a duplicate key problem. @@ -707,7 +707,7 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid) /* Deletes must come before inserts; do them first. */ resetStringInfo(&querybuf); appendStringInfo(&querybuf, - "DELETE FROM %s mv WHERE ctid OPERATOR(pg_catalog.=) ANY " + "DELETE FROM %s mv WHERE ctid OPERATOR(pg_catalog.=) ANY " "(SELECT diff.tid FROM %s diff " "WHERE diff.tid IS NOT NULL " "AND diff.newdata IS NULL)", diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index 5d7b37c674a..4b2baaceff0 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -391,7 +391,7 @@ DefineOpClass(CreateOpClassStmt *stmt) * A minimum expectation therefore is that the caller have execute * privilege with grant option. Since we don't have a way to make the * opclass go away if the grant option is revoked, we choose instead to - * require ownership of the functions. It's also not entirely clear what + * require ownership of the functions. It's also not entirely clear what * permissions should be required on the datatype, but ownership seems * like a safe choice. * @@ -673,7 +673,7 @@ DefineOpClass(CreateOpClassStmt *stmt) opclassoid, procedures, false); /* - * Create dependencies for the opclass proper. Note: we do not create a + * Create dependencies for the opclass proper. Note: we do not create a * dependency link to the AM, because we don't currently support DROP * ACCESS METHOD. */ @@ -1090,7 +1090,7 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) if (OidIsValid(member->sortfamily)) { /* - * Ordering op, check index supports that. (We could perhaps also + * Ordering op, check index supports that. (We could perhaps also * check that the operator returns a type supported by the sortfamily, * but that seems more trouble than it's worth here. If it does not, * the operator will never be matchable to any ORDER BY clause, but no @@ -1219,7 +1219,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid) /* * The default in CREATE OPERATOR CLASS is to use the class' opcintype as - * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype + * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype * isn't available, so make the user specify the types. */ if (!OidIsValid(member->lefttype)) diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index c2560cbce38..85b81b7928f 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -211,7 +211,7 @@ DefineOperator(List *names, List *parameters) functionOid = LookupFuncName(functionName, nargs, typeId, false); /* - * We require EXECUTE rights for the function. This isn't strictly + * We require EXECUTE rights for the function. This isn't strictly * necessary, since EXECUTE will be checked at any attempted use of the * operator, but it seems like a good idea anyway. */ diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index e7c681ab7f4..28e785afb84 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -4,7 +4,7 @@ * Utility commands affecting portals (that is, SQL cursor commands) * * Note: see also tcop/pquery.c, which implements portal operations for - * the FE/BE protocol. This module uses pquery.c for some operations. + * the FE/BE protocol. This module uses pquery.c for some operations. * And both modules depend on utils/mmgr/portalmem.c, which controls * storage management for portals (but doesn't run any queries in them). * @@ -89,7 +89,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params, /*---------- * Also copy the outer portal's parameter list into the inner portal's - * memory context. We want to pass down the parameter values in case we + * memory context. We want to pass down the parameter values in case we * had a command like * DECLARE c CURSOR FOR SELECT ... WHERE foo = $1 * This will have been parsed using the outer parameter set and the @@ -106,7 +106,7 @@ PerformCursorOpen(PlannedStmt *stmt, ParamListInfo params, * * If the user didn't specify a SCROLL type, allow or disallow scrolling * based on whether it would require any additional runtime overhead to do - * so. Also, we disallow scrolling for FOR UPDATE cursors. + * so. Also, we disallow scrolling for FOR UPDATE cursors. */ portal->cursorOptions = cstmt->options; if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL))) @@ -365,7 +365,7 @@ PersistHoldablePortal(Portal portal) ExecutorRewind(queryDesc); /* - * Change the destination to output to the tuplestore. Note we tell + * Change the destination to output to the tuplestore. Note we tell * the tuplestore receiver to detoast all data passed through it. */ queryDesc->dest = CreateDestReceiver(DestTuplestore); diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 65431b713d0..10168e3e801 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -174,7 +174,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString) * ExecuteQuery --- implement the 'EXECUTE' utility statement. * * This code also supports CREATE TABLE ... AS EXECUTE. That case is - * indicated by passing a non-null intoClause. The DestReceiver is already + * indicated by passing a non-null intoClause. The DestReceiver is already * set up correctly for CREATE TABLE AS, but we still have to make a few * other adjustments here. * @@ -211,7 +211,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause, { /* * Need an EState to evaluate parameters; must not delete it till end - * of query, in case parameters are pass-by-reference. Note that the + * of query, in case parameters are pass-by-reference. Note that the * passed-in "params" could possibly be referenced in the parameter * expressions. */ @@ -237,7 +237,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause, /* * For CREATE TABLE ... AS EXECUTE, we must verify that the prepared * statement is one that produces tuples. Currently we insist that it be - * a plain old SELECT. In future we might consider supporting other + * a plain old SELECT. In future we might consider supporting other * things such as INSERT ... RETURNING, but there are a couple of issues * to be settled first, notably how WITH NO DATA should be handled in such * a case (do we really want to suppress execution?) and how to pass down @@ -529,7 +529,7 @@ FetchPreparedStatementResultDesc(PreparedStatement *stmt) /* * Given a prepared statement that returns tuples, extract the query - * targetlist. Returns NIL if the statement doesn't have a determinable + * targetlist. Returns NIL if the statement doesn't have a determinable * targetlist. * * Note: this is pretty ugly, but since it's only used in corner cases like @@ -644,7 +644,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es, { /* * Need an EState to evaluate parameters; must not delete it till end - * of query, in case parameters are pass-by-reference. Note that the + * of query, in case parameters are pass-by-reference. Note that the * passed-in "params" could possibly be referenced in the parameter * expressions. */ diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 75b4ce56ae8..6fb34637f88 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -260,7 +260,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) if (funcrettype != LANGUAGE_HANDLEROID) { /* - * We allow OPAQUE just so we can load old dump files. When we + * We allow OPAQUE just so we can load old dump files. When we * see a handler function declared OPAQUE, change it to * LANGUAGE_HANDLER. (This is probably obsolete and removable?) */ diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index 2599e28cc45..03f5514d39b 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -67,7 +67,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString) * To create a schema, must have schema-create privilege on the current * database and must be able to become the target role (this does not * imply that the target role itself must have create-schema privilege). - * The latter provision guards against "giveaway" attacks. Note that a + * The latter provision guards against "giveaway" attacks. Note that a * superuser will always have both of these privileges a fortiori. */ aclresult = pg_database_aclcheck(MyDatabaseId, saved_uid, ACL_CREATE); @@ -132,7 +132,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString) /* * Examine the list of commands embedded in the CREATE SCHEMA command, and * reorganize them into a sequentially executable order with no forward - * references. Note that the result is still a list of raw parsetrees --- + * references. Note that the result is still a list of raw parsetrees --- * we cannot, in general, run parse analysis on one statement until we * have actually executed the prior ones. */ diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index 2829b1e3044..e6084203a88 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -279,7 +279,7 @@ ResetSequence(Oid seq_relid) seq->log_cnt = 0; /* - * Create a new storage file for the sequence. We want to keep the + * Create a new storage file for the sequence. We want to keep the * sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs. * Same with relminmxid, since a sequence will never contain multixacts. */ @@ -325,9 +325,9 @@ fill_seq_with_data(Relation rel, HeapTuple tuple) LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); /* - * Since VACUUM does not process sequences, we have to force the tuple - * to have xmin = FrozenTransactionId now. Otherwise it would become - * invisible to SELECTs after 2G transactions. It is okay to do this + * Since VACUUM does not process sequences, we have to force the tuple to + * have xmin = FrozenTransactionId now. Otherwise it would become + * invisible to SELECTs after 2G transactions. It is okay to do this * because if the current transaction aborts, no other xact will ever * examine the sequence tuple anyway. */ @@ -487,7 +487,7 @@ nextval(PG_FUNCTION_ARGS) * XXX: This is not safe in the presence of concurrent DDL, but acquiring * a lock here is more expensive than letting nextval_internal do it, * since the latter maintains a cache that keeps us from hitting the lock - * manager more than once per transaction. It's not clear whether the + * manager more than once per transaction. It's not clear whether the * performance penalty is material in practice, but for now, we do it this * way. */ @@ -567,7 +567,7 @@ nextval_internal(Oid relid) } /* - * Decide whether we should emit a WAL log record. If so, force up the + * Decide whether we should emit a WAL log record. If so, force up the * fetch count to grab SEQ_LOG_VALS more values than we actually need to * cache. (These will then be usable without logging.) * @@ -674,7 +674,7 @@ nextval_internal(Oid relid) * We must mark the buffer dirty before doing XLogInsert(); see notes in * SyncOneBuffer(). However, we don't apply the desired changes just yet. * This looks like a violation of the buffer update protocol, but it is in - * fact safe because we hold exclusive lock on the buffer. Any other + * fact safe because we hold exclusive lock on the buffer. Any other * process, including a checkpoint, that tries to examine the buffer * contents will block until we release the lock, and then will see the * final state that we install below. @@ -936,7 +936,7 @@ setval3_oid(PG_FUNCTION_ARGS) * Open the sequence and acquire AccessShareLock if needed * * If we haven't touched the sequence already in this transaction, - * we need to acquire AccessShareLock. We arrange for the lock to + * we need to acquire AccessShareLock. We arrange for the lock to * be owned by the top transaction, so that we don't need to do it * more than once per xact. */ @@ -1037,7 +1037,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel) /* * If the sequence has been transactionally replaced since we last saw it, - * discard any cached-but-unissued values. We do not touch the currval() + * discard any cached-but-unissued values. We do not touch the currval() * state, however. */ if (seqrel->rd_rel->relfilenode != elm->filenode) @@ -1554,13 +1554,13 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record) page = (Page) BufferGetPage(buffer); /* - * We always reinit the page. However, since this WAL record type is - * also used for updating sequences, it's possible that a hot-standby - * backend is examining the page concurrently; so we mustn't transiently - * trash the buffer. The solution is to build the correct new page - * contents in local workspace and then memcpy into the buffer. Then only - * bytes that are supposed to change will change, even transiently. We - * must palloc the local page for alignment reasons. + * We always reinit the page. However, since this WAL record type is also + * used for updating sequences, it's possible that a hot-standby backend + * is examining the page concurrently; so we mustn't transiently trash the + * buffer. The solution is to build the correct new page contents in + * local workspace and then memcpy into the buffer. Then only bytes that + * are supposed to change will change, even transiently. We must palloc + * the local page for alignment reasons. */ localpage = (Page) palloc(BufferGetPageSize(buffer)); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 619aa78d809..341262b6fc8 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -276,7 +276,7 @@ static void AlterSeqNamespaces(Relation classRel, Relation rel, Oid oldNspOid, Oid newNspOid, ObjectAddresses *objsMoved, LOCKMODE lockmode); static void ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, - bool recurse, bool recursing, LOCKMODE lockmode); + bool recurse, bool recursing, LOCKMODE lockmode); static void ATExecValidateConstraint(Relation rel, char *constrName, bool recurse, bool recursing, LOCKMODE lockmode); static int transformColumnNameList(Oid relId, List *colList, @@ -557,7 +557,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId) &inheritOids, &old_constraints, &parentOidCount); /* - * Create a tuple descriptor from the relation schema. Note that this + * Create a tuple descriptor from the relation schema. Note that this * deals with column names, types, and NOT NULL constraints, but not * default values or CHECK constraints; we handle those below. */ @@ -657,7 +657,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId) CommandCounterIncrement(); /* - * Open the new relation and acquire exclusive lock on it. This isn't + * Open the new relation and acquire exclusive lock on it. This isn't * really necessary for locking out other backends (since they can't see * the new rel anyway until we commit), but it keeps the lock manager from * complaining about deadlock risks. @@ -702,7 +702,7 @@ DropErrorMsgNonExistent(RangeVar *rel, char rightkind, bool missing_ok) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), - errmsg("schema \"%s\" does not exist", rel->schemaname))); + errmsg("schema \"%s\" does not exist", rel->schemaname))); } else { @@ -1022,10 +1022,10 @@ ExecuteTruncate(TruncateStmt *stmt) } /* - * In CASCADE mode, suck in all referencing relations as well. This + * In CASCADE mode, suck in all referencing relations as well. This * requires multiple iterations to find indirectly-dependent relations. At * each phase, we need to exclusive-lock new rels before looking for their - * dependencies, else we might miss something. Also, we check each rel as + * dependencies, else we might miss something. Also, we check each rel as * soon as we open it, to avoid a faux pas such as holding lock for a long * time on a rel we have no permissions for. */ @@ -1246,7 +1246,7 @@ ExecuteTruncate(TruncateStmt *stmt) } /* - * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate + * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate */ static void truncate_check_rel(Relation rel) @@ -1674,7 +1674,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, /* * Now copy the CHECK constraints of this parent, adjusting attnos - * using the completed newattno[] map. Identically named constraints + * using the completed newattno[] map. Identically named constraints * are merged if possible, else we throw error. */ if (constr && constr->num_check > 0) @@ -1735,7 +1735,7 @@ MergeAttributes(List *schema, List *supers, char relpersistence, /* * Close the parent rel, but keep our AccessShareLock on it until xact - * commit. That will prevent someone else from deleting or ALTERing + * commit. That will prevent someone else from deleting or ALTERing * the parent before the child is committed. */ heap_close(relation, NoLock); @@ -2243,7 +2243,7 @@ renameatt_internal(Oid myrelid, oldattname))); /* - * if the attribute is inherited, forbid the renaming. if this is a + * if the attribute is inherited, forbid the renaming. if this is a * top-level call to renameatt(), then expected_parents will be 0, so the * effect of this code will be to prohibit the renaming if the attribute * is inherited at all. if this is a recursive call to renameatt(), @@ -2547,7 +2547,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal) newrelname))); /* - * Update pg_class tuple with new relname. (Scribbling on reltup is OK + * Update pg_class tuple with new relname. (Scribbling on reltup is OK * because it's a copy...) */ namestrcpy(&(relform->relname), newrelname); @@ -2603,7 +2603,7 @@ RenameRelationInternal(Oid myrelid, const char *newrelname, bool is_internal) * We also reject these commands if there are any pending AFTER trigger events * for the rel. This is certainly necessary for the rewriting variants of * ALTER TABLE, because they don't preserve tuple TIDs and so the pending - * events would try to fetch the wrong tuples. It might be overly cautious + * events would try to fetch the wrong tuples. It might be overly cautious * in other cases, but again it seems better to err on the side of paranoia. * * REINDEX calls this with "rel" referencing the index to be rebuilt; here @@ -2659,23 +2659,23 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode) * 3. Scan table(s) to check new constraints, and optionally recopy * the data into new table(s). * Phase 3 is not performed unless one or more of the subcommands requires - * it. The intention of this design is to allow multiple independent + * it. The intention of this design is to allow multiple independent * updates of the table schema to be performed with only one pass over the * data. * - * ATPrepCmd performs phase 1. A "work queue" entry is created for + * ATPrepCmd performs phase 1. A "work queue" entry is created for * each table to be affected (there may be multiple affected tables if the * commands traverse a table inheritance hierarchy). Also we do preliminary * validation of the subcommands, including parse transformation of those * expressions that need to be evaluated with respect to the old table * schema. * - * ATRewriteCatalogs performs phase 2 for each affected table. (Note that + * ATRewriteCatalogs performs phase 2 for each affected table. (Note that * phases 2 and 3 normally do no explicit recursion, since phase 1 already * did it --- although some subcommands have to recurse in phase 2 instead.) * Certain subcommands need to be performed before others to avoid * unnecessary conflicts; for example, DROP COLUMN should come before - * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple + * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple * lists, one for each logical "pass" of phase 2. * * ATRewriteTables performs phase 3 for those tables that need it. @@ -2782,17 +2782,18 @@ AlterTableGetLockLevel(List *cmds) * to SELECT */ case AT_SetTableSpace: /* must rewrite heap */ case AT_AlterColumnType: /* must rewrite heap */ - case AT_AddOids: /* must rewrite heap */ + case AT_AddOids: /* must rewrite heap */ cmd_lockmode = AccessExclusiveLock; break; /* - * These subcommands may require addition of toast tables. If we - * add a toast table to a table currently being scanned, we + * These subcommands may require addition of toast tables. If + * we add a toast table to a table currently being scanned, we * might miss data added to the new toast table by concurrent * insert transactions. */ - case AT_SetStorage: /* may add toast tables, see ATRewriteCatalogs() */ + case AT_SetStorage:/* may add toast tables, see + * ATRewriteCatalogs() */ cmd_lockmode = AccessExclusiveLock; break; @@ -2808,12 +2809,12 @@ AlterTableGetLockLevel(List *cmds) /* * Subcommands that may be visible to concurrent SELECTs */ - case AT_DropColumn: /* change visible to SELECT */ + case AT_DropColumn: /* change visible to SELECT */ case AT_AddColumnToView: /* CREATE VIEW */ - case AT_DropOids: /* calls AT_DropColumn */ + case AT_DropOids: /* calls AT_DropColumn */ case AT_EnableAlwaysRule: /* may change SELECT rules */ case AT_EnableReplicaRule: /* may change SELECT rules */ - case AT_EnableRule: /* may change SELECT rules */ + case AT_EnableRule: /* may change SELECT rules */ case AT_DisableRule: /* may change SELECT rules */ cmd_lockmode = AccessExclusiveLock; break; @@ -2834,8 +2835,8 @@ AlterTableGetLockLevel(List *cmds) break; /* - * These subcommands affect write operations only. - * XXX Theoretically, these could be ShareRowExclusiveLock. + * These subcommands affect write operations only. XXX + * Theoretically, these could be ShareRowExclusiveLock. */ case AT_ColumnDefault: case AT_ProcessedConstraint: /* becomes AT_AddConstraint */ @@ -2872,9 +2873,9 @@ AlterTableGetLockLevel(List *cmds) * Cases essentially the same as CREATE INDEX. We * could reduce the lock strength to ShareLock if * we can work out how to allow concurrent catalog - * updates. - * XXX Might be set down to ShareRowExclusiveLock - * but requires further analysis. + * updates. XXX Might be set down to + * ShareRowExclusiveLock but requires further + * analysis. */ cmd_lockmode = AccessExclusiveLock; break; @@ -2883,10 +2884,9 @@ AlterTableGetLockLevel(List *cmds) /* * We add triggers to both tables when we add a * Foreign Key, so the lock level must be at least - * as strong as CREATE TRIGGER. - * XXX Might be set down to ShareRowExclusiveLock - * though trigger info is accessed by - * pg_get_triggerdef + * as strong as CREATE TRIGGER. XXX Might be set + * down to ShareRowExclusiveLock though trigger + * info is accessed by pg_get_triggerdef */ cmd_lockmode = AccessExclusiveLock; break; @@ -2902,8 +2902,8 @@ AlterTableGetLockLevel(List *cmds) * started before us will continue to see the old inheritance * behaviour, while queries started after we commit will see * new behaviour. No need to prevent reads or writes to the - * subtable while we hook it up though. - * Changing the TupDesc may be a problem, so keep highest lock. + * subtable while we hook it up though. Changing the TupDesc + * may be a problem, so keep highest lock. */ case AT_AddInherit: case AT_DropInherit: @@ -2912,9 +2912,9 @@ AlterTableGetLockLevel(List *cmds) /* * These subcommands affect implicit row type conversion. They - * have affects similar to CREATE/DROP CAST on queries. - * don't provide for invalidating parse trees as a result of - * such changes, so we keep these at AccessExclusiveLock. + * have affects similar to CREATE/DROP CAST on queries. don't + * provide for invalidating parse trees as a result of such + * changes, so we keep these at AccessExclusiveLock. */ case AT_AddOf: case AT_DropOf: @@ -2940,29 +2940,32 @@ AlterTableGetLockLevel(List *cmds) * updates. */ case AT_SetStatistics: /* Uses MVCC in getTableAttrs() */ - case AT_ClusterOn: /* Uses MVCC in getIndexes() */ + case AT_ClusterOn: /* Uses MVCC in getIndexes() */ case AT_DropCluster: /* Uses MVCC in getIndexes() */ - case AT_SetOptions: /* Uses MVCC in getTableAttrs() */ + case AT_SetOptions: /* Uses MVCC in getTableAttrs() */ case AT_ResetOptions: /* Uses MVCC in getTableAttrs() */ cmd_lockmode = ShareUpdateExclusiveLock; break; - case AT_ValidateConstraint: /* Uses MVCC in getConstraints() */ + case AT_ValidateConstraint: /* Uses MVCC in + * getConstraints() */ cmd_lockmode = ShareUpdateExclusiveLock; break; /* * Rel options are more complex than first appears. Options * are set here for tables, views and indexes; for historical - * reasons these can all be used with ALTER TABLE, so we - * can't decide between them using the basic grammar. + * reasons these can all be used with ALTER TABLE, so we can't + * decide between them using the basic grammar. * * XXX Look in detail at each option to determine lock level, - * e.g. - * cmd_lockmode = GetRelOptionsLockLevel((List *) cmd->def); + * e.g. cmd_lockmode = GetRelOptionsLockLevel((List *) + * cmd->def); */ - case AT_SetRelOptions: /* Uses MVCC in getIndexes() and getTables() */ - case AT_ResetRelOptions: /* Uses MVCC in getIndexes() and getTables() */ + case AT_SetRelOptions: /* Uses MVCC in getIndexes() and + * getTables() */ + case AT_ResetRelOptions: /* Uses MVCC in getIndexes() and + * getTables() */ cmd_lockmode = AccessExclusiveLock; break; @@ -3209,7 +3212,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, cmd->subtype = AT_ValidateConstraintRecurse; pass = AT_PASS_MISC; break; - case AT_ReplicaIdentity: /* REPLICA IDENTITY ... */ + case AT_ReplicaIdentity: /* REPLICA IDENTITY ... */ ATSimplePermissions(rel, ATT_TABLE | ATT_MATVIEW); pass = AT_PASS_MISC; /* This command never recurses */ @@ -3258,7 +3261,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* * ATRewriteCatalogs * - * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are + * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are * dispatched in a "safe" execution order (designed to avoid unnecessary * conflicts). */ @@ -3604,8 +3607,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode) if (RelationIsUsedAsCatalogTable(OldHeap)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot rewrite table \"%s\" used as a catalog table", - RelationGetRelationName(OldHeap)))); + errmsg("cannot rewrite table \"%s\" used as a catalog table", + RelationGetRelationName(OldHeap)))); /* * Don't allow rewrite on temp tables of other backends ... their @@ -3856,7 +3859,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) { /* * All predicate locks on the tuples or pages are about to be made - * invalid, because we move tuples around. Promote them to + * invalid, because we move tuples around. Promote them to * relation locks. */ TransferPredicateLocksToHeapRelation(oldrel); @@ -3946,8 +3949,8 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode) HeapTupleSetOid(tuple, tupOid); /* - * Constraints might reference the tableoid column, so initialize - * t_tableOid before evaluating them. + * Constraints might reference the tableoid column, so + * initialize t_tableOid before evaluating them. */ tuple->t_tableOid = RelationGetRelid(oldrel); } @@ -4404,7 +4407,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be * * Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it * isn't suitable, throw an error. Currently, we require that the type - * originated with CREATE TYPE AS. We could support any row type, but doing so + * originated with CREATE TYPE AS. We could support any row type, but doing so * would require handling a number of extra corner cases in the DDL commands. */ void @@ -4423,7 +4426,7 @@ check_of_type(HeapTuple typetuple) /* * Close the parent rel, but keep our AccessShareLock on it until xact - * commit. That will prevent someone else from deleting or ALTERing + * commit. That will prevent someone else from deleting or ALTERing * the type before the typed table creation/conversion commits. */ relation_close(typeRelation, NoLock); @@ -4882,7 +4885,7 @@ add_column_collation_dependency(Oid relid, int32 attnum, Oid collid) /* * ALTER TABLE SET WITH OIDS * - * Basically this is an ADD COLUMN for the special OID column. We have + * Basically this is an ADD COLUMN for the special OID column. We have * to cons up a ColumnDef node because the ADD COLUMN code needs one. */ static void @@ -5352,7 +5355,7 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue, LOCKMODE loc * * DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism, * because we have to decide at runtime whether to recurse or not depending - * on whether attinhcount goes to zero or not. (We can't check this in a + * on whether attinhcount goes to zero or not. (We can't check this in a * static pre-pass because it won't handle multiple inheritance situations * correctly.) */ @@ -5600,7 +5603,7 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel, /* * If TryReuseIndex() stashed a relfilenode for us, we used it for the new - * index instead of building from scratch. The DROP of the old edition of + * index instead of building from scratch. The DROP of the old edition of * this index will have scheduled the storage for deletion at commit, so * cancel that pending deletion. */ @@ -5642,7 +5645,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel, elog(ERROR, "index \"%s\" is not unique", indexName); /* - * Determine name to assign to constraint. We require a constraint to + * Determine name to assign to constraint. We require a constraint to * have the same name as the underlying index; therefore, use the index's * existing name as the default constraint name, and if the user * explicitly gives some other name for the constraint, rename the index @@ -5851,7 +5854,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, /* * Check if ONLY was specified with ALTER TABLE. If so, allow the - * contraint creation only if there are no children currently. Error out + * contraint creation only if there are no children currently. Error out * otherwise. */ if (!recurse && children != NIL) @@ -5883,7 +5886,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, /* * Add a foreign-key constraint to a single table * - * Subroutine for ATExecAddConstraint. Must already hold exclusive + * Subroutine for ATExecAddConstraint. Must already hold exclusive * lock on the rel, and have done appropriate validity checks for it. * We do permissions checks here, however. */ @@ -6022,7 +6025,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * * Note that we have to be careful about the difference between the actual * PK column type and the opclass' declared input type, which might be - * only binary-compatible with it. The declared opcintype is the right + * only binary-compatible with it. The declared opcintype is the right * thing to probe pg_amop with. */ if (numfks != numpks) @@ -6179,7 +6182,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, /* * Upon a change to the cast from the FK column to its pfeqop - * operand, revalidate the constraint. For this evaluation, a + * operand, revalidate the constraint. For this evaluation, a * binary coercion cast is equivalent to no cast at all. While * type implementors should design implicit casts with an eye * toward consistency of operations like equality, we cannot @@ -6197,7 +6200,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * Necessarily, the primary key column must then be of the domain * type. Since the constraint was previously valid, all values on * the foreign side necessarily exist on the primary side and in - * turn conform to the domain. Consequently, we need not treat + * turn conform to the domain. Consequently, we need not treat * domains specially here. * * Since we require that all collations share the same notion of @@ -6207,7 +6210,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * We need not directly consider the PK type. It's necessarily * binary coercible to the opcintype of the unique index column, * and ri_triggers.c will only deal with PK datums in terms of - * that opcintype. Changing the opcintype also changes pfeqop. + * that opcintype. Changing the opcintype also changes pfeqop. */ old_check_ok = (new_pathtype == old_pathtype && new_castfunc == old_castfunc && @@ -6300,14 +6303,14 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, */ static void ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, - bool recurse, bool recursing, LOCKMODE lockmode) + bool recurse, bool recursing, LOCKMODE lockmode) { Relation conrel; SysScanDesc scan; ScanKeyData key; HeapTuple contuple; Form_pg_constraint currcon = NULL; - Constraint *cmdcon = NULL; + Constraint *cmdcon = NULL; bool found = false; Assert(IsA(cmd->def, Constraint)); @@ -6374,8 +6377,8 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, heap_freetuple(copyTuple); /* - * Now we need to update the multiple entries in pg_trigger - * that implement the constraint. + * Now we need to update the multiple entries in pg_trigger that + * implement the constraint. */ tgrel = heap_open(TriggerRelationId, RowExclusiveLock); @@ -6397,7 +6400,7 @@ ATExecAlterConstraint(Relation rel, AlterTableCmd *cmd, CatalogUpdateIndexes(tgrel, copyTuple); InvokeObjectPostAlterHook(TriggerRelationId, - HeapTupleGetOid(tgtuple), 0); + HeapTupleGetOid(tgtuple), 0); heap_freetuple(copyTuple); } @@ -6619,10 +6622,10 @@ transformColumnNameList(Oid relId, List *colList, * transformFkeyGetPrimaryKey - * * Look up the names, attnums, and types of the primary key attributes - * for the pkrel. Also return the index OID and index opclasses of the + * for the pkrel. Also return the index OID and index opclasses of the * index supporting the primary key. * - * All parameters except pkrel are output parameters. Also, the function + * All parameters except pkrel are output parameters. Also, the function * return value is the number of attributes in the primary key. * * Used when the column list in the REFERENCES specification is omitted. @@ -6662,7 +6665,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, if (indexStruct->indisprimary && IndexIsValid(indexStruct)) { /* - * Refuse to use a deferrable primary key. This is per SQL spec, + * Refuse to use a deferrable primary key. This is per SQL spec, * and there would be a lot of interesting semantic problems if we * tried to allow it. */ @@ -7592,7 +7595,7 @@ ATPrepAlterColumnType(List **wqueue, tab->relkind == RELKIND_FOREIGN_TABLE) { /* - * For composite types, do this check now. Tables will check it later + * For composite types, do this check now. Tables will check it later * when the table is being rewritten. */ find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL); @@ -7601,7 +7604,7 @@ ATPrepAlterColumnType(List **wqueue, ReleaseSysCache(tuple); /* - * The recursion case is handled by ATSimpleRecursion. However, if we are + * The recursion case is handled by ATSimpleRecursion. However, if we are * told not to recurse, there had better not be any child tables; else the * alter would put them out of step. */ @@ -7710,7 +7713,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, * * We remove any implicit coercion steps at the top level of the old * default expression; this has been agreed to satisfy the principle of - * least surprise. (The conversion to the new column type should act like + * least surprise. (The conversion to the new column type should act like * it started from what the user sees as the stored expression, and the * implicit coercions aren't going to be shown.) */ @@ -7739,7 +7742,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, * and record enough information to let us recreate the objects. * * The actual recreation does not happen here, but only after we have - * performed all the individual ALTER TYPE operations. We have to save + * performed all the individual ALTER TYPE operations. We have to save * the info before executing ALTER TYPE, though, else the deparser will * get confused. * @@ -7868,7 +7871,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, * used in the trigger's WHEN condition. The first case would * not require any extra work, but the second case would * require updating the WHEN expression, which will take a - * significant amount of new code. Since we can't easily tell + * significant amount of new code. Since we can't easily tell * which case applies, we punt for both. FIXME someday. */ ereport(ERROR, @@ -8144,24 +8147,24 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) /* * Re-parse the index and constraint definitions, and attach them to the - * appropriate work queue entries. We do this before dropping because in + * appropriate work queue entries. We do this before dropping because in * the case of a FOREIGN KEY constraint, we might not yet have exclusive * lock on the table the constraint is attached to, and we need to get * that before dropping. It's safe because the parser won't actually look * at the catalogs to detect the existing entry. * - * We can't rely on the output of deparsing to tell us which relation - * to operate on, because concurrent activity might have made the name + * We can't rely on the output of deparsing to tell us which relation to + * operate on, because concurrent activity might have made the name * resolve differently. Instead, we've got to use the OID of the - * constraint or index we're processing to figure out which relation - * to operate on. + * constraint or index we're processing to figure out which relation to + * operate on. */ forboth(oid_item, tab->changedConstraintOids, def_item, tab->changedConstraintDefs) { - Oid oldId = lfirst_oid(oid_item); - Oid relid; - Oid confrelid; + Oid oldId = lfirst_oid(oid_item); + Oid relid; + Oid confrelid; get_constraint_relation_oids(oldId, &relid, &confrelid); ATPostAlterTypeParse(oldId, relid, confrelid, @@ -8171,8 +8174,8 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode) forboth(oid_item, tab->changedIndexOids, def_item, tab->changedIndexDefs) { - Oid oldId = lfirst_oid(oid_item); - Oid relid; + Oid oldId = lfirst_oid(oid_item); + Oid relid; relid = IndexGetRelation(oldId, false); ATPostAlterTypeParse(oldId, relid, InvalidOid, @@ -8238,9 +8241,9 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, cmd)); else if (IsA(stmt, AlterTableStmt)) querytree_list = list_concat(querytree_list, - transformAlterTableStmt(oldRelId, + transformAlterTableStmt(oldRelId, (AlterTableStmt *) stmt, - cmd)); + cmd)); else querytree_list = lappend(querytree_list, stmt); } @@ -8925,13 +8928,13 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation, if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(view_query, true); + view_query_is_auto_updatable(view_query, true); if (view_updatable_error) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("WITH CHECK OPTION is supported only on auto-updatable views"), - errhint("%s", view_updatable_error))); + errmsg("WITH CHECK OPTION is supported only on auto-updatable views"), + errhint("%s", view_updatable_error))); } } @@ -9098,7 +9101,8 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode) /* Fetch the list of indexes on toast relation if necessary */ if (OidIsValid(reltoastrelid)) { - Relation toastRel = relation_open(reltoastrelid, lockmode); + Relation toastRel = relation_open(reltoastrelid, lockmode); + reltoastidxids = RelationGetIndexList(toastRel); relation_close(toastRel, lockmode); } @@ -9120,8 +9124,8 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode) FlushRelationBuffers(rel); /* - * Relfilenodes are not unique in databases across tablespaces, so we - * need to allocate a new one in the new tablespace. + * Relfilenodes are not unique in databases across tablespaces, so we need + * to allocate a new one in the new tablespace. */ newrelfilenode = GetNewRelFileNode(newTableSpace, NULL, rel->rd_rel->relpersistence); @@ -9236,9 +9240,9 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, forkNum)))); /* - * WAL-log the copied page. Unfortunately we don't know what kind of - * a page this is, so we have to log the full page including any - * unused space. + * WAL-log the copied page. Unfortunately we don't know what kind of a + * page this is, so we have to log the full page including any unused + * space. */ if (use_wal) log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false); @@ -9246,7 +9250,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, PageSetChecksumInplace(page, blkno); /* - * Now write the page. We say isTemp = true even if it's not a temp + * Now write the page. We say isTemp = true even if it's not a temp * rel, because there's no need for smgr to schedule an fsync for this * write; we'll do it ourselves below. */ @@ -9256,7 +9260,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst, pfree(buf); /* - * If the rel is WAL-logged, must fsync before commit. We use heap_sync + * If the rel is WAL-logged, must fsync before commit. We use heap_sync * to ensure that the toast table gets fsync'd too. (For a temp or * unlogged rel we don't care since the data will be gone after a crash * anyway.) @@ -9431,7 +9435,7 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode) MergeConstraintsIntoExisting(child_rel, parent_rel); /* - * OK, it looks valid. Make the catalog entries that show inheritance. + * OK, it looks valid. Make the catalog entries that show inheritance. */ StoreCatalogInheritance1(RelationGetRelid(child_rel), RelationGetRelid(parent_rel), @@ -9907,7 +9911,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode) * Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE * INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or * heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will - * be TypeRelationId). There's no convenient way to do this, so go trawling + * be TypeRelationId). There's no convenient way to do this, so go trawling * through pg_depend. */ static void @@ -10093,7 +10097,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode) /* * ALTER TABLE NOT OF * - * Detach a typed table from its originating type. Just clear reloftype and + * Detach a typed table from its originating type. Just clear reloftype and * remove the dependency. */ static void @@ -10155,7 +10159,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, */ pg_class = heap_open(RelationRelationId, RowExclusiveLock); pg_class_tuple = SearchSysCacheCopy1(RELOID, - ObjectIdGetDatum(RelationGetRelid(rel))); + ObjectIdGetDatum(RelationGetRelid(rel))); if (!HeapTupleIsValid(pg_class_tuple)) elog(ERROR, "cache lookup failed for relation \"%s\"", RelationGetRelationName(rel)); @@ -10191,8 +10195,8 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, } /* - * Clear the indisreplident flag from any index that had it previously, and - * set it for any index that should have it now. + * Clear the indisreplident flag from any index that had it previously, + * and set it for any index that should have it now. */ pg_index = heap_open(IndexRelationId, RowExclusiveLock); foreach(index, RelationGetIndexList(rel)) @@ -10201,7 +10205,7 @@ relation_mark_replica_identity(Relation rel, char ri_type, Oid indexOid, bool dirty = false; pg_index_tuple = SearchSysCacheCopy1(INDEXRELID, - ObjectIdGetDatum(thisIndexOid)); + ObjectIdGetDatum(thisIndexOid)); if (!HeapTupleIsValid(pg_index_tuple)) elog(ERROR, "cache lookup failed for index %u", thisIndexOid); pg_index_form = (Form_pg_index) GETSTRUCT(pg_index_tuple); @@ -10261,7 +10265,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode } else if (stmt->identity_type == REPLICA_IDENTITY_INDEX) { - /* fallthrough */; + /* fallthrough */ ; } else elog(ERROR, "unexpected identity type %u", stmt->identity_type); @@ -10289,20 +10293,20 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode if (!indexRel->rd_am->amcanunique || !indexRel->rd_index->indisunique) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("cannot use non-unique index \"%s\" as replica identity", - RelationGetRelationName(indexRel)))); + errmsg("cannot use non-unique index \"%s\" as replica identity", + RelationGetRelationName(indexRel)))); /* Deferred indexes are not guaranteed to be always unique. */ if (!indexRel->rd_index->indimmediate) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use non-immediate index \"%s\" as replica identity", - RelationGetRelationName(indexRel)))); + errmsg("cannot use non-immediate index \"%s\" as replica identity", + RelationGetRelationName(indexRel)))); /* Expression indexes aren't supported. */ if (RelationGetIndexExpressions(indexRel) != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use expression index \"%s\" as replica identity", - RelationGetRelationName(indexRel)))); + errmsg("cannot use expression index \"%s\" as replica identity", + RelationGetRelationName(indexRel)))); /* Predicate indexes aren't supported. */ if (RelationGetIndexPredicate(indexRel) != NIL) ereport(ERROR, @@ -10319,7 +10323,7 @@ ATExecReplicaIdentity(Relation rel, ReplicaIdentityStmt *stmt, LOCKMODE lockmode /* Check index for nullable columns. */ for (key = 0; key < indexRel->rd_index->indnatts; key++) { - int16 attno = indexRel->rd_index->indkey.values[key]; + int16 attno = indexRel->rd_index->indkey.values[key]; Form_pg_attribute attr; /* Of the system columns, only oid is indexable. */ @@ -10878,7 +10882,7 @@ AtEOXact_on_commit_actions(bool isCommit) * Post-subcommit or post-subabort cleanup for ON COMMIT management. * * During subabort, we can immediately remove entries created during this - * subtransaction. During subcommit, just relabel entries marked during + * subtransaction. During subcommit, just relabel entries marked during * this subtransaction as being the parent's responsibility. */ void @@ -10922,7 +10926,7 @@ AtEOSubXact_on_commit_actions(bool isCommit, SubTransactionId mySubid, * This is intended as a callback for RangeVarGetRelidExtended(). It allows * the relation to be locked only if (1) it's a plain table, materialized * view, or TOAST table and (2) the current user is the owner (or the - * superuser). This meets the permission-checking needs of CLUSTER, REINDEX + * superuser). This meets the permission-checking needs of CLUSTER, REINDEX * TABLE, and REFRESH MATERIALIZED VIEW; we expose it here so that it can be * used by all. */ @@ -10939,7 +10943,7 @@ RangeVarCallbackOwnsTable(const RangeVar *relation, /* * If the relation does exist, check whether it's an index. But note that * the relation might have been dropped between the time we did the name - * lookup and now. In that case, there's nothing to do. + * lookup and now. In that case, there's nothing to do. */ relkind = get_rel_relkind(relId); if (!relkind) @@ -11105,8 +11109,8 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid, relkind != RELKIND_FOREIGN_TABLE) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("\"%s\" is not a table, view, materialized view, sequence, or foreign table", - rv->relname))); + errmsg("\"%s\" is not a table, view, materialized view, sequence, or foreign table", + rv->relname))); ReleaseSysCache(tuple); } diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 357e6e19741..031be37a1e7 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -31,7 +31,7 @@ * To allow CREATE DATABASE to give a new database a default tablespace * that's different from the template database's default, we make the * provision that a zero in pg_class.reltablespace means the database's - * default tablespace. Without this, CREATE DATABASE would have to go in + * default tablespace. Without this, CREATE DATABASE would have to go in * and munge the system catalogs of the new database. * * @@ -281,7 +281,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) * reference the whole path here, but mkdir() uses the first two parts. */ if (strlen(location) + 1 + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + - OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1 + OIDCHARS > MAXPGPATH) + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1 + OIDCHARS > MAXPGPATH) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), errmsg("tablespace location \"%s\" is too long", @@ -488,7 +488,7 @@ DropTableSpace(DropTableSpaceStmt *stmt) * Not all files deleted? However, there can be lingering empty files * in the directories, left behind by for example DROP TABLE, that * have been scheduled for deletion at next checkpoint (see comments - * in mdunlink() for details). We could just delete them immediately, + * in mdunlink() for details). We could just delete them immediately, * but we can't tell them apart from important data files that we * mustn't delete. So instead, we force a checkpoint which will clean * out any lingering files, and try again. @@ -562,10 +562,10 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) linkloc = psprintf("pg_tblspc/%u", tablespaceoid); location_with_version_dir = psprintf("%s/%s", location, - TABLESPACE_VERSION_DIRECTORY); + TABLESPACE_VERSION_DIRECTORY); /* - * Attempt to coerce target directory to safe permissions. If this fails, + * Attempt to coerce target directory to safe permissions. If this fails, * it doesn't exist or has the wrong owner. */ if (chmod(location, S_IRWXU) != 0) @@ -666,7 +666,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid) * Attempt to remove filesystem infrastructure for the tablespace. * * 'redo' indicates we are redoing a drop from XLOG; in that case we should - * not throw an ERROR for problems, just LOG them. The worst consequence of + * not throw an ERROR for problems, just LOG them. The worst consequence of * not removing files here would be failure to release some disk space, which * does not justify throwing an error that would require manual intervention * to get the database running again. @@ -684,7 +684,7 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo) struct stat st; linkloc_with_version_dir = psprintf("pg_tblspc/%u/%s", tablespaceoid, - TABLESPACE_VERSION_DIRECTORY); + TABLESPACE_VERSION_DIRECTORY); /* * Check if the tablespace still contains any files. We try to rmdir each @@ -701,10 +701,10 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo) * * If redo is true then ENOENT is a likely outcome here, and we allow it * to pass without comment. In normal operation we still allow it, but - * with a warning. This is because even though ProcessUtility disallows + * with a warning. This is because even though ProcessUtility disallows * DROP TABLESPACE in a transaction block, it's possible that a previous * DROP failed and rolled back after removing the tablespace directories - * and/or symlink. We want to allow a new DROP attempt to succeed at + * and/or symlink. We want to allow a new DROP attempt to succeed at * removing the catalog entries (and symlink if still present), so we * should not give a hard error here. */ @@ -1119,8 +1119,8 @@ AlterTableSpaceMove(AlterTableSpaceMoveStmt *stmt) /* * Handle permissions-checking here since we are locking the tables - * and also to avoid doing a bunch of work only to fail part-way. - * Note that permissions will also be checked by AlterTableInternal(). + * and also to avoid doing a bunch of work only to fail part-way. Note + * that permissions will also be checked by AlterTableInternal(). * * Caller must be considered an owner on the table to move it. */ @@ -1179,7 +1179,7 @@ check_default_tablespace(char **newval, void **extra, GucSource source) { /* * If we aren't inside a transaction, we cannot do database access so - * cannot verify the name. Must accept the value on faith. + * cannot verify the name. Must accept the value on faith. */ if (IsTransactionState()) { @@ -1290,7 +1290,7 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source) /* * If we aren't inside a transaction, we cannot do database access so - * cannot verify the individual names. Must accept the list on faith. + * cannot verify the individual names. Must accept the list on faith. * Fortunately, there's then also no need to pass the data to fd.c. */ if (IsTransactionState()) diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 5f1ccf02c27..9bf0098b6cb 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -107,7 +107,7 @@ static void AfterTriggerSaveEvent(EState *estate, ResultRelInfo *relinfo, * * constraintOid, if nonzero, says that this trigger is being created * internally to implement that constraint. A suitable pg_depend entry will - * be made to link the trigger to that constraint. constraintOid is zero when + * be made to link the trigger to that constraint. constraintOid is zero when * executing a user-entered CREATE TRIGGER command. (For CREATE CONSTRAINT * TRIGGER, we build a pg_constraint entry internally.) * @@ -418,7 +418,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, if (funcrettype != TRIGGEROID) { /* - * We allow OPAQUE just so we can load old dump files. When we see a + * We allow OPAQUE just so we can load old dump files. When we see a * trigger function declared OPAQUE, change it to TRIGGER. */ if (funcrettype == OPAQUEOID) @@ -440,7 +440,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * references one of the built-in RI_FKey trigger functions, assume it is * from a dump of a pre-7.3 foreign key constraint, and take steps to * convert this legacy representation into a regular foreign key - * constraint. Ugly, but necessary for loading old dump files. + * constraint. Ugly, but necessary for loading old dump files. */ if (stmt->isconstraint && !isInternal && list_length(stmt->args) >= 6 && @@ -503,7 +503,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, /* * If trigger is internally generated, modify the provided trigger name to - * ensure uniqueness by appending the trigger OID. (Callers will usually + * ensure uniqueness by appending the trigger OID. (Callers will usually * supply a simple constant trigger name in these cases.) */ if (isInternal) @@ -627,7 +627,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, int16 attnum; int j; - /* Lookup column name. System columns are not allowed */ + /* Lookup column name. System columns are not allowed */ attnum = attnameAttNum(rel, name, false); if (attnum == InvalidAttrNumber) ereport(ERROR, @@ -732,7 +732,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, else { /* - * User CREATE TRIGGER, so place dependencies. We make trigger be + * User CREATE TRIGGER, so place dependencies. We make trigger be * auto-dropped if its relation is dropped or if the FK relation is * dropped. (Auto drop is compatible with our pre-7.3 behavior.) */ @@ -801,7 +801,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString, * full-fledged foreign key constraints. * * The conversion is complex because a pre-7.3 foreign key involved three - * separate triggers, which were reported separately in dumps. While the + * separate triggers, which were reported separately in dumps. While the * single trigger on the referencing table adds no new information, we need * to know the trigger functions of both of the triggers on the referenced * table to build the constraint declaration. Also, due to lack of proper @@ -2038,7 +2038,7 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2113,7 +2113,7 @@ ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2503,7 +2503,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -2599,7 +2599,7 @@ ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo, if (newtuple != slottuple) { /* - * Return the modified tuple using the es_trig_tuple_slot. We assume + * Return the modified tuple using the es_trig_tuple_slot. We assume * the tuple was allocated in per-tuple memory context, and therefore * will go away by itself. The tuple table slot should not try to * clear it. @@ -3031,7 +3031,7 @@ typedef SetConstraintStateData *SetConstraintState; * Although this is mutable state, we can keep it in AfterTriggerSharedData * because all instances of the same type of event in a given event list will * be fired at the same time, if they were queued between the same firing - * cycles. So we need only ensure that ats_firing_id is zero when attaching + * cycles. So we need only ensure that ats_firing_id is zero when attaching * a new event to an existing AfterTriggerSharedData record. */ typedef uint32 TriggerFlags; @@ -3077,7 +3077,7 @@ typedef struct AfterTriggerEventDataOneCtid typedef struct AfterTriggerEventDataZeroCtids { TriggerFlags ate_flags; /* status bits and offset to shared data */ -} AfterTriggerEventDataZeroCtids; +} AfterTriggerEventDataZeroCtids; #define SizeofTriggerEvent(evt) \ (((evt)->ate_flags & AFTER_TRIGGER_TUP_BITS) == AFTER_TRIGGER_2CTID ? \ @@ -3092,7 +3092,7 @@ typedef struct AfterTriggerEventDataZeroCtids /* * To avoid palloc overhead, we keep trigger events in arrays in successively- * larger chunks (a slightly more sophisticated version of an expansible - * array). The space between CHUNK_DATA_START and freeptr is occupied by + * array). The space between CHUNK_DATA_START and freeptr is occupied by * AfterTriggerEventData records; the space between endfree and endptr is * occupied by AfterTriggerSharedData records. */ @@ -3134,7 +3134,7 @@ typedef struct AfterTriggerEventList * * firing_counter is incremented for each call of afterTriggerInvokeEvents. * We mark firable events with the current firing cycle's ID so that we can - * tell which ones to work on. This ensures sane behavior if a trigger + * tell which ones to work on. This ensures sane behavior if a trigger * function chooses to do SET CONSTRAINTS: the inner SET CONSTRAINTS will * only fire those events that weren't already scheduled for firing. * @@ -3142,7 +3142,7 @@ typedef struct AfterTriggerEventList * This is saved and restored across failed subtransactions. * * events is the current list of deferred events. This is global across - * all subtransactions of the current transaction. In a subtransaction + * all subtransactions of the current transaction. In a subtransaction * abort, we know that the events added by the subtransaction are at the * end of the list, so it is relatively easy to discard them. The event * list chunks themselves are stored in event_cxt. @@ -3174,12 +3174,12 @@ typedef struct AfterTriggerEventList * which we similarly use to clean up at subtransaction abort. * * firing_stack is a stack of copies of subtransaction-start-time - * firing_counter. We use this to recognize which deferred triggers were + * firing_counter. We use this to recognize which deferred triggers were * fired (or marked for firing) within an aborted subtransaction. * * We use GetCurrentTransactionNestLevel() to determine the correct array * index in these stacks. maxtransdepth is the number of allocated entries in - * each stack. (By not keeping our own stack pointer, we can avoid trouble + * each stack. (By not keeping our own stack pointer, we can avoid trouble * in cases where errors during subxact abort cause multiple invocations * of AfterTriggerEndSubXact() at the same nesting depth.) */ @@ -3490,7 +3490,7 @@ afterTriggerRestoreEventList(AfterTriggerEventList *events, * single trigger function. * * Frequently, this will be fired many times in a row for triggers of - * a single relation. Therefore, we cache the open relation and provide + * a single relation. Therefore, we cache the open relation and provide * fmgr lookup cache space at the caller level. (For triggers fired at * the end of a query, we can even piggyback on the executor's state.) * @@ -3566,6 +3566,7 @@ AfterTriggerExecute(AfterTriggerEvent event, } /* fall through */ case AFTER_TRIGGER_FDW_REUSE: + /* * Using ExecMaterializeSlot() rather than ExecFetchSlotTuple() * ensures that tg_trigtuple does not reference tuplestore memory. @@ -4093,7 +4094,7 @@ AfterTriggerFireDeferred(void) } /* - * Run all the remaining triggers. Loop until they are all gone, in case + * Run all the remaining triggers. Loop until they are all gone, in case * some trigger queues more for us to do. */ while (afterTriggerMarkEvents(events, NULL, false)) @@ -4156,7 +4157,7 @@ AfterTriggerBeginSubXact(void) int my_level = GetCurrentTransactionNestLevel(); /* - * Ignore call if the transaction is in aborted state. (Probably + * Ignore call if the transaction is in aborted state. (Probably * shouldn't happen?) */ if (afterTriggers == NULL) @@ -4235,7 +4236,7 @@ AfterTriggerEndSubXact(bool isCommit) CommandId subxact_firing_id; /* - * Ignore call if the transaction is in aborted state. (Probably + * Ignore call if the transaction is in aborted state. (Probably * unneeded) */ if (afterTriggers == NULL) @@ -4378,7 +4379,7 @@ SetConstraintStateCopy(SetConstraintState origstate) } /* - * Add a per-trigger item to a SetConstraintState. Returns possibly-changed + * Add a per-trigger item to a SetConstraintState. Returns possibly-changed * pointer to the state object (it will change if we have to repalloc). */ static SetConstraintState @@ -4463,7 +4464,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) * First, identify all the named constraints and make a list of their * OIDs. Since, unlike the SQL spec, we allow multiple constraints of * the same name within a schema, the specifications are not - * necessarily unique. Our strategy is to target all matching + * necessarily unique. Our strategy is to target all matching * constraints within the first search-path schema that has any * matches, but disregard matches in schemas beyond the first match. * (This is a bit odd but it's the historical behavior.) @@ -4489,7 +4490,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) /* * If we're given the schema name with the constraint, look only - * in that schema. If given a bare constraint name, use the + * in that schema. If given a bare constraint name, use the * search path to find the first matching constraint. */ if (constraint->schemaname) @@ -4593,7 +4594,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) /* * Silently skip triggers that are marked as non-deferrable in - * pg_trigger. This is not an error condition, since a + * pg_trigger. This is not an error condition, since a * deferrable RI constraint may have some non-deferrable * actions. */ @@ -4664,7 +4665,7 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt) /* * Make sure a snapshot has been established in case trigger - * functions need one. Note that we avoid setting a snapshot if + * functions need one. Note that we avoid setting a snapshot if * we don't find at least one trigger that has to be fired now. * This is so that BEGIN; SET CONSTRAINTS ...; SET TRANSACTION * ISOLATION LEVEL SERIALIZABLE; ... works properly. (If we are @@ -4724,7 +4725,7 @@ AfterTriggerPendingOnRel(Oid relid) AfterTriggerShared evtshared = GetTriggerSharedData(event); /* - * We can ignore completed events. (Even if a DONE flag is rolled + * We can ignore completed events. (Even if a DONE flag is rolled * back by subxact abort, it's OK because the effects of the TRUNCATE * or whatever must get rolled back too.) */ @@ -4765,7 +4766,7 @@ AfterTriggerPendingOnRel(Oid relid) * be fired for an event. * * NOTE: this is called whenever there are any triggers associated with - * the event (even if they are disabled). This function decides which + * the event (even if they are disabled). This function decides which * triggers actually need to be queued. * ---------- */ diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index c1ee69b3233..f377c193719 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -514,8 +514,8 @@ DefineType(List *names, List *parameters) analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid); /* - * Check permissions on functions. We choose to require the creator/owner - * of a type to also own the underlying functions. Since creating a type + * Check permissions on functions. We choose to require the creator/owner + * of a type to also own the underlying functions. Since creating a type * is tantamount to granting public execute access on the functions, the * minimum sane check would be for execute-with-grant-option. But we * don't have a way to make the type go away if the grant option is @@ -552,7 +552,7 @@ DefineType(List *names, List *parameters) * now have TypeCreate do all the real work. * * Note: the pg_type.oid is stored in user tables as array elements (base - * types) in ArrayType and in composite types in DatumTupleFields. This + * types) in ArrayType and in composite types in DatumTupleFields. This * oid must be preserved by binary upgrades. */ typoid = @@ -725,7 +725,7 @@ DefineDomain(CreateDomainStmt *stmt) get_namespace_name(domainNamespace)); /* - * Check for collision with an existing type name. If there is one and + * Check for collision with an existing type name. If there is one and * it's an autogenerated array, we can rename it out of the way. */ old_type_oid = GetSysCacheOid2(TYPENAMENSP, @@ -1076,7 +1076,7 @@ DefineEnum(CreateEnumStmt *stmt) get_namespace_name(enumNamespace)); /* - * Check for collision with an existing type name. If there is one and + * Check for collision with an existing type name. If there is one and * it's an autogenerated array, we can rename it out of the way. */ old_type_oid = GetSysCacheOid2(TYPENAMENSP, @@ -1193,7 +1193,7 @@ AlterEnum(AlterEnumStmt *stmt, bool isTopLevel) /* * Ordinarily we disallow adding values within transaction blocks, because * we can't cope with enum OID values getting into indexes and then having - * their defining pg_enum entries go away. However, it's okay if the enum + * their defining pg_enum entries go away. However, it's okay if the enum * type was created in the current transaction, since then there can be no * such indexes that wouldn't themselves go away on rollback. (We support * this case because pg_dump --binary-upgrade needs it.) We test this by @@ -1515,7 +1515,7 @@ DefineRange(CreateRangeStmt *stmt) * impossible to define a polymorphic constructor; we have to generate new * constructor functions explicitly for each range type. * - * We actually define 4 functions, with 0 through 3 arguments. This is just + * We actually define 4 functions, with 0 through 3 arguments. This is just * to offer more convenience for the user. */ static void @@ -2277,7 +2277,7 @@ AlterDomainNotNull(List *names, bool notNull) /* * In principle the auxiliary information for this * error should be errdatatype(), but errtablecol() - * seems considerably more useful in practice. Since + * seems considerably more useful in practice. Since * this code only executes in an ALTER DOMAIN command, * the client should already know which domain is in * question. @@ -2300,7 +2300,7 @@ AlterDomainNotNull(List *names, bool notNull) } /* - * Okay to update pg_type row. We can scribble on typTup because it's a + * Okay to update pg_type row. We can scribble on typTup because it's a * copy. */ typTup->typnotnull = notNull; @@ -2488,7 +2488,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) /* * Since all other constraint types throw errors, this must be a check - * constraint. First, process the constraint expression and add an entry + * constraint. First, process the constraint expression and add an entry * to pg_constraint. */ @@ -2674,7 +2674,7 @@ validateDomainConstraint(Oid domainoid, char *ccbin) /* * In principle the auxiliary information for this error * should be errdomainconstraint(), but errtablecol() - * seems considerably more useful in practice. Since this + * seems considerably more useful in practice. Since this * code only executes in an ALTER DOMAIN command, the * client should already know which domain is in question, * and which constraint too. @@ -2857,7 +2857,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode) continue; /* - * Okay, add column to result. We store the columns in column-number + * Okay, add column to result. We store the columns in column-number * order; this is just a hack to improve predictability of regression * test output ... */ @@ -2944,7 +2944,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid, /* * Set up a CoerceToDomainValue to represent the occurrence of VALUE in - * the expression. Note that it will appear to have the type of the base + * the expression. Note that it will appear to have the type of the base * type, not the domain. This seems correct since within the check * expression, we should not assume the input value can be considered a * member of the domain. @@ -3317,7 +3317,7 @@ AlterTypeOwner(List *names, Oid newOwnerId, ObjectType objecttype) /* * If it's a composite type, invoke ATExecChangeOwner so that we fix - * up the pg_class entry properly. That will call back to + * up the pg_class entry properly. That will call back to * AlterTypeOwnerInternal to take care of the pg_type entry(s). */ if (typTup->typtype == TYPTYPE_COMPOSITE) @@ -3464,7 +3464,7 @@ AlterTypeNamespace_oid(Oid typeOid, Oid nspOid, ObjectAddresses *objsMoved) * Caller must have already checked privileges. * * The function automatically recurses to process the type's array type, - * if any. isImplicitArray should be TRUE only when doing this internal + * if any. isImplicitArray should be TRUE only when doing this internal * recursion (outside callers must never try to move an array type directly). * * If errorOnTableType is TRUE, the function errors out if the type is diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 7f5b8473d81..d3a2044191b 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -995,7 +995,7 @@ DropRole(DropRoleStmt *stmt) ReleaseSysCache(tuple); /* - * Remove role from the pg_auth_members table. We have to remove all + * Remove role from the pg_auth_members table. We have to remove all * tuples that show it as either a role or a member. * * XXX what about grantor entries? Maybe we should do one heap scan. @@ -1091,7 +1091,7 @@ RenameRole(const char *oldname, const char *newname) * XXX Client applications probably store the session user somewhere, so * renaming it could cause confusion. On the other hand, there may not be * an actual problem besides a little confusion, so think about this and - * decide. Same for SET ROLE ... we don't restrict renaming the current + * decide. Same for SET ROLE ... we don't restrict renaming the current * effective userid, though. */ @@ -1347,7 +1347,7 @@ AddRoleMems(const char *rolename, Oid roleid, /* * Check permissions: must have createrole or admin option on the role to - * be changed. To mess with a superuser role, you gotta be superuser. + * be changed. To mess with a superuser role, you gotta be superuser. */ if (superuser_arg(roleid)) { @@ -1493,7 +1493,7 @@ DelRoleMems(const char *rolename, Oid roleid, /* * Check permissions: must have createrole or admin option on the role to - * be changed. To mess with a superuser role, you gotta be superuser. + * be changed. To mess with a superuser role, you gotta be superuser. */ if (superuser_arg(roleid)) { diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index ded1841dc65..3d2c73902c6 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -381,18 +381,18 @@ get_rel_oids(Oid relid, const RangeVar *vacrel) * * The output parameters are: * - oldestXmin is the cutoff value used to distinguish whether tuples are - * DEAD or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum). + * DEAD or RECENTLY_DEAD (see HeapTupleSatisfiesVacuum). * - freezeLimit is the Xid below which all Xids are replaced by - * FrozenTransactionId during vacuum. + * FrozenTransactionId during vacuum. * - xidFullScanLimit (computed from table_freeze_age parameter) - * represents a minimum Xid value; a table whose relfrozenxid is older than - * this will have a full-table vacuum applied to it, to freeze tuples across - * the whole table. Vacuuming a table younger than this value can use a - * partial scan. + * represents a minimum Xid value; a table whose relfrozenxid is older than + * this will have a full-table vacuum applied to it, to freeze tuples across + * the whole table. Vacuuming a table younger than this value can use a + * partial scan. * - multiXactCutoff is the value below which all MultiXactIds are removed from - * Xmax. + * Xmax. * - mxactFullScanLimit is a value against which a table's relminmxid value is - * compared to produce a full-table vacuum, as with xidFullScanLimit. + * compared to produce a full-table vacuum, as with xidFullScanLimit. * * xidFullScanLimit and mxactFullScanLimit can be passed as NULL if caller is * not interested. @@ -417,9 +417,9 @@ vacuum_set_xid_limits(Relation rel, MultiXactId safeMxactLimit; /* - * We can always ignore processes running lazy vacuum. This is because we + * We can always ignore processes running lazy vacuum. This is because we * use these values only for deciding which tuples we must keep in the - * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to + * tables. Since lazy vacuum doesn't write its XID anywhere, it's safe to * ignore it. In theory it could be problematic to ignore lazy vacuums in * a full vacuum, but keep in mind that only one vacuum process can be * working on a particular table at any time, and that each vacuum is @@ -566,7 +566,7 @@ vacuum_set_xid_limits(Relation rel, * If we scanned the whole relation then we should just use the count of * live tuples seen; but if we did not, we should not trust the count * unreservedly, especially not in VACUUM, which may have scanned a quite - * nonrandom subset of the table. When we have only partial information, + * nonrandom subset of the table. When we have only partial information, * we take the old value of pg_class.reltuples as a measurement of the * tuple density in the unscanned pages. * @@ -712,7 +712,7 @@ vac_update_relstats(Relation relation, /* * If we have discovered that there are no indexes, then there's no - * primary key either. This could be done more thoroughly... + * primary key either. This could be done more thoroughly... */ if (pgcform->relhaspkey && !hasindex) { @@ -772,7 +772,7 @@ vac_update_relstats(Relation relation, * truncate pg_clog and pg_multixact. * * We violate transaction semantics here by overwriting the database's - * existing pg_database tuple with the new value. This is reasonably + * existing pg_database tuple with the new value. This is reasonably * safe since the new value is correct whether or not this transaction * commits. As with vac_update_relstats, this avoids leaving dead tuples * behind after a VACUUM. @@ -892,7 +892,7 @@ vac_update_datfrozenxid(void) * Also update the XID wrap limit info maintained by varsup.c. * * The passed XID is simply the one I just wrote into my pg_database - * entry. It's used to initialize the "min" calculation. + * entry. It's used to initialize the "min" calculation. * * This routine is only invoked when we've managed to change our * DB's datfrozenxid entry, or we found that the shared XID-wrap-limit @@ -976,7 +976,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti) /* * Update the wrap limit for GetNewTransactionId and creation of new * MultiXactIds. Note: these functions will also signal the postmaster - * for an(other) autovac cycle if needed. XXX should we avoid possibly + * for an(other) autovac cycle if needed. XXX should we avoid possibly * signalling twice? */ SetTransactionIdLimit(frozenXID, oldestxid_datoid); @@ -988,7 +988,7 @@ vac_truncate_clog(TransactionId frozenXID, MultiXactId minMulti) * vacuum_rel() -- vacuum one heap relation * * Doing one heap at a time incurs extra overhead, since we need to - * check that the heap exists again just before we vacuum it. The + * check that the heap exists again just before we vacuum it. The * reason that we do this is so that vacuuming can be spread across * many small transactions. Otherwise, two-phase locking would require * us to lock the entire database during one pass of the vacuum cleaner. @@ -1045,7 +1045,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound) } /* - * Check for user-requested abort. Note we want this to be inside a + * Check for user-requested abort. Note we want this to be inside a * transaction, so xact.c doesn't issue useless WARNING. */ CHECK_FOR_INTERRUPTS(); @@ -1092,7 +1092,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound) * * We allow the user to vacuum a table if he is superuser, the table * owner, or the database owner (but in the latter case, only if it's not - * a shared relation). pg_class_ownercheck includes the superuser case. + * a shared relation). pg_class_ownercheck includes the superuser case. * * Note we choose to treat permissions failure as a WARNING and keep * trying to vacuum the rest of the DB --- is this appropriate? @@ -1220,7 +1220,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound) /* * If the relation has a secondary toast rel, vacuum that too while we * still hold the session lock on the master table. Note however that - * "analyze" will not get done on the toast table. This is good, because + * "analyze" will not get done on the toast table. This is good, because * the toaster always uses hardcoded index access and statistics are * totally unimportant for toast relations. */ @@ -1239,7 +1239,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound) /* * Open all the vacuumable indexes of the given relation, obtaining the - * specified kind of lock on each. Return an array of Relation pointers for + * specified kind of lock on each. Return an array of Relation pointers for * the indexes into *Irel, and the number of indexes into *nindexes. * * We consider an index vacuumable if it is marked insertable (IndexIsReady). @@ -1289,7 +1289,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode, } /* - * Release the resources acquired by vac_open_indexes. Optionally release + * Release the resources acquired by vac_open_indexes. Optionally release * the locks (say NoLock to keep 'em). */ void diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 3870df606b7..b4abeed5ac9 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -473,7 +473,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * Before entering the main loop, establish the invariant that * next_not_all_visible_block is the next block number >= blkno that's not * all-visible according to the visibility map, or nblocks if there's no - * such block. Also, we set up the skipping_all_visible_blocks flag, + * such block. Also, we set up the skipping_all_visible_blocks flag, * which is needed because we need hysteresis in the decision: once we've * started skipping blocks, we may as well skip everything up to the next * not-all-visible block. @@ -706,10 +706,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * It's possible that another backend has extended the heap, * initialized the page, and then failed to WAL-log the page * due to an ERROR. Since heap extension is not WAL-logged, - * recovery might try to replay our record setting the - * page all-visible and find that the page isn't initialized, - * which will cause a PANIC. To prevent that, check whether - * the page has been previously WAL-logged, and if not, do that + * recovery might try to replay our record setting the page + * all-visible and find that the page isn't initialized, which + * will cause a PANIC. To prevent that, check whether the + * page has been previously WAL-logged, and if not, do that * now. */ if (RelationNeedsWAL(onerel) && @@ -834,8 +834,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * NB: Like with per-tuple hint bits, we can't set the * PD_ALL_VISIBLE flag if the inserter committed * asynchronously. See SetHintBits for more info. Check - * that the tuple is hinted xmin-committed because - * of that. + * that the tuple is hinted xmin-committed because of + * that. */ if (all_visible) { @@ -972,7 +972,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, /* * It should never be the case that the visibility map page is set * while the page-level bit is clear, but the reverse is allowed - * (if checksums are not enabled). Regardless, set the both bits + * (if checksums are not enabled). Regardless, set the both bits * so that we get back in sync. * * NB: If the heap page is all-visible but the VM bit is not set, @@ -1034,8 +1034,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, /* * If we remembered any tuples for deletion, then the page will be * visited again by lazy_vacuum_heap, which will compute and record - * its post-compaction free space. If not, then we're done with this - * page, so remember its free space as-is. (This path will always be + * its post-compaction free space. If not, then we're done with this + * page, so remember its free space as-is. (This path will always be * taken if there are no indexes.) */ if (vacrelstats->num_dead_tuples == prev_dead_count) @@ -1635,9 +1635,9 @@ static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks) { long maxtuples; - int vac_work_mem = IsAutoVacuumWorkerProcess() && - autovacuum_work_mem != -1 ? - autovacuum_work_mem : maintenance_work_mem; + int vac_work_mem = IsAutoVacuumWorkerProcess() && + autovacuum_work_mem != -1 ? + autovacuum_work_mem : maintenance_work_mem; if (vacrelstats->hasindex) { diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index 18133242f73..f299738d66b 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -176,7 +176,7 @@ check_datestyle(char **newval, void **extra, GucSource source) } /* - * Prepare the canonical string to return. GUC wants it malloc'd. + * Prepare the canonical string to return. GUC wants it malloc'd. */ result = (char *) malloc(32); if (!result) @@ -257,7 +257,7 @@ check_timezone(char **newval, void **extra, GucSource source) if (pg_strncasecmp(*newval, "interval", 8) == 0) { /* - * Support INTERVAL 'foo'. This is for SQL spec compliance, not + * Support INTERVAL 'foo'. This is for SQL spec compliance, not * because it has any actual real-world usefulness. */ const char *valueptr = *newval; @@ -281,7 +281,7 @@ check_timezone(char **newval, void **extra, GucSource source) /* * Try to parse it. XXX an invalid interval format will result in - * ereport(ERROR), which is not desirable for GUC. We did what we + * ereport(ERROR), which is not desirable for GUC. We did what we * could to guard against this in flatten_set_variable_args, but a * string coming in from postgresql.conf might contain anything. */ @@ -466,7 +466,7 @@ show_log_timezone(void) * We allow idempotent changes (r/w -> r/w and r/o -> r/o) at any time, and * we also always allow changes from read-write to read-only. However, * read-only may be changed to read-write only when in a top-level transaction - * that has not yet taken an initial snapshot. Can't do it in a hot standby + * that has not yet taken an initial snapshot. Can't do it in a hot standby * slave, either. * * If we are not in a transaction at all, just allow the change; it means @@ -627,7 +627,7 @@ check_transaction_deferrable(bool *newval, void **extra, GucSource source) * * We can't roll back the random sequence on error, and we don't want * config file reloads to affect it, so we only want interactive SET SEED - * commands to set it. We use the "extra" storage to ensure that rollbacks + * commands to set it. We use the "extra" storage to ensure that rollbacks * don't try to do the operation again. */ @@ -903,7 +903,7 @@ const char * show_role(void) { /* - * Check whether SET ROLE is active; if not return "none". This is a + * Check whether SET ROLE is active; if not return "none". This is a * kluge to deal with the fact that SET SESSION AUTHORIZATION logically * resets SET ROLE to NONE, but we cannot set the GUC role variable from * assign_session_authorization (because we haven't got enough info to diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index bc085666fbd..683621c35e5 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -52,7 +52,7 @@ validateWithCheckOption(char *value) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid value for \"check_option\" option"), - errdetail("Valid values are \"local\", and \"cascaded\"."))); + errdetail("Valid values are \"local\", and \"cascaded\"."))); } } @@ -344,11 +344,11 @@ UpdateRangeTableOfViewParse(Oid viewOid, Query *viewParse) *rt_entry2; /* - * Make a copy of the given parsetree. It's not so much that we don't + * Make a copy of the given parsetree. It's not so much that we don't * want to scribble on our input, it's that the parser has a bad habit of * outputting multiple links to the same subtree for constructs like * BETWEEN, and we mustn't have OffsetVarNodes increment the varno of a - * Var node twice. copyObject will expand any multiply-referenced subtree + * Var node twice. copyObject will expand any multiply-referenced subtree * into multiple copies. */ viewParse = (Query *) copyObject(viewParse); @@ -460,13 +460,13 @@ DefineView(ViewStmt *stmt, const char *queryString) } /* - * If the check option is specified, look to see if the view is - * actually auto-updatable or not. + * If the check option is specified, look to see if the view is actually + * auto-updatable or not. */ if (check_option) { const char *view_updatable_error = - view_query_is_auto_updatable(viewParse, true); + view_query_is_auto_updatable(viewParse, true); if (view_updatable_error) ereport(ERROR, @@ -513,7 +513,7 @@ DefineView(ViewStmt *stmt, const char *queryString) /* * If the user didn't explicitly ask for a temporary view, check whether - * we need one implicitly. We allow TEMP to be inserted automatically as + * we need one implicitly. We allow TEMP to be inserted automatically as * long as the CREATE command is consistent with that --- no explicit * schema name. */ diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 8c01a63500d..640964c5b7c 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -317,7 +317,7 @@ ExecMarkPos(PlanState *node) * * NOTE: the semantics of this are that the first ExecProcNode following * the restore operation will yield the same tuple as the first one following - * the mark operation. It is unspecified what happens to the plan node's + * the mark operation. It is unspecified what happens to the plan node's * result TupleTableSlot. (In most cases the result slot is unchanged by * a restore, but the node may choose to clear it or to load it with the * restored-to tuple.) Hence the caller should discard any previously @@ -397,7 +397,7 @@ ExecSupportsMarkRestore(NodeTag plantype) /* * T_Result only supports mark/restore if it has a child plan that * does, so we do not have enough information to give a really - * correct answer. However, for current uses it's enough to + * correct answer. However, for current uses it's enough to * always say "false", because this routine is not asked about * gating Result plans, only base-case Results. */ diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c index 32d0718ec59..7ff3e1ece1a 100644 --- a/src/backend/executor/execCurrent.c +++ b/src/backend/executor/execCurrent.c @@ -142,7 +142,7 @@ execCurrentOf(CurrentOfExpr *cexpr, /* * This table didn't produce the cursor's current row; some other - * inheritance child of the same parent must have. Signal caller to + * inheritance child of the same parent must have. Signal caller to * do nothing on this table. */ return false; diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c index a9acd5b535d..45d6477c2e7 100644 --- a/src/backend/executor/execJunk.c +++ b/src/backend/executor/execJunk.c @@ -52,7 +52,7 @@ * * Initialize the Junk filter. * - * The source targetlist is passed in. The output tuple descriptor is + * The source targetlist is passed in. The output tuple descriptor is * built from the non-junk tlist entries, plus the passed specification * of whether to include room for an OID or not. * An optional resultSlot can be passed as well. diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 886c75125d2..072c7df0ada 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -19,7 +19,7 @@ * ExecutorRun accepts direction and count arguments that specify whether * the plan is to be executed forwards, backwards, and for how many tuples. * In some cases ExecutorRun may be called multiple times to process all - * the tuples for a plan. It is also acceptable to stop short of executing + * the tuples for a plan. It is also acceptable to stop short of executing * the whole plan (but only if it is a SELECT). * * ExecutorFinish must be called after the final ExecutorRun call and @@ -329,12 +329,12 @@ standard_ExecutorRun(QueryDesc *queryDesc, * ExecutorFinish * * This routine must be called after the last ExecutorRun call. - * It performs cleanup such as firing AFTER triggers. It is + * It performs cleanup such as firing AFTER triggers. It is * separate from ExecutorEnd because EXPLAIN ANALYZE needs to * include these actions in the total runtime. * * We provide a function hook variable that lets loadable plugins - * get control when ExecutorFinish is called. Such a plugin would + * get control when ExecutorFinish is called. Such a plugin would * normally call standard_ExecutorFinish(). * * ---------------------------------------------------------------- @@ -565,7 +565,7 @@ ExecCheckRTEPerms(RangeTblEntry *rte) * userid to check as: current user unless we have a setuid indication. * * Note: GetUserId() is presently fast enough that there's no harm in - * calling it separately for each RTE. If that stops being true, we could + * calling it separately for each RTE. If that stops being true, we could * call it once in ExecCheckRTPerms and pass the userid down from there. * But for now, no need for the extra clutter. */ @@ -1184,7 +1184,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, * if so it doesn't matter which one we pick.) However, it is sometimes * necessary to fire triggers on other relations; this happens mainly when an * RI update trigger queues additional triggers on other relations, which will - * be processed in the context of the outer query. For efficiency's sake, + * be processed in the context of the outer query. For efficiency's sake, * we want to have a ResultRelInfo for those triggers too; that can avoid * repeated re-opening of the relation. (It also provides a way for EXPLAIN * ANALYZE to report the runtimes of such triggers.) So we make additional @@ -1221,7 +1221,7 @@ ExecGetTriggerResultRel(EState *estate, Oid relid) /* * Open the target relation's relcache entry. We assume that an * appropriate lock is still held by the backend from whenever the trigger - * event got queued, so we need take no new lock here. Also, we need not + * event got queued, so we need take no new lock here. Also, we need not * recheck the relkind, so no need for CheckValidResultRel. */ rel = heap_open(relid, NoLock); @@ -1327,7 +1327,7 @@ ExecPostprocessPlan(EState *estate) /* * Run any secondary ModifyTable nodes to completion, in case the main - * query did not fetch all rows from them. (We do this to ensure that + * query did not fetch all rows from them. (We do this to ensure that * such nodes have predictable results.) */ foreach(lc, estate->es_auxmodifytables) @@ -1639,7 +1639,8 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo, TupleTableSlot *slot, EState *estate) { ExprContext *econtext; - ListCell *l1, *l2; + ListCell *l1, + *l2; /* * We will use the EState's per-tuple context for evaluating constraint @@ -1655,7 +1656,7 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo, l2, resultRelInfo->ri_WithCheckOptionExprs) { WithCheckOption *wco = (WithCheckOption *) lfirst(l1); - ExprState *wcoExpr = (ExprState *) lfirst(l2); + ExprState *wcoExpr = (ExprState *) lfirst(l2); /* * WITH CHECK OPTION checks are intended to ensure that the new tuple @@ -1667,8 +1668,8 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo, if (!ExecQual((List *) wcoExpr, econtext, false)) ereport(ERROR, (errcode(ERRCODE_WITH_CHECK_OPTION_VIOLATION), - errmsg("new row violates WITH CHECK OPTION for view \"%s\"", - wco->viewname), + errmsg("new row violates WITH CHECK OPTION for view \"%s\"", + wco->viewname), errdetail("Failing row contains %s.", ExecBuildSlotValueDescription(slot, RelationGetDescr(resultRelInfo->ri_RelationDesc), @@ -1681,7 +1682,7 @@ ExecWithCheckOptions(ResultRelInfo *resultRelInfo, * * This is intentionally very similar to BuildIndexValueDescription, but * unlike that function, we truncate long field values (to at most maxfieldlen - * bytes). That seems necessary here since heap field values could be very + * bytes). That seems necessary here since heap field values could be very * long, whereas index entries typically aren't so wide. * * Also, unlike the case with index entries, we need to be prepared to ignore @@ -1875,7 +1876,7 @@ EvalPlanQual(EState *estate, EPQState *epqstate, *tid = copyTuple->t_self; /* - * Need to run a recheck subquery. Initialize or reinitialize EPQ state. + * Need to run a recheck subquery. Initialize or reinitialize EPQ state. */ EvalPlanQualBegin(epqstate, estate); @@ -1958,7 +1959,7 @@ EvalPlanQualFetch(EState *estate, Relation relation, int lockmode, /* * If xmin isn't what we're expecting, the slot must have been - * recycled and reused for an unrelated tuple. This implies that + * recycled and reused for an unrelated tuple. This implies that * the latest version of the row was deleted, so we need do * nothing. (Should be safe to examine xmin without getting * buffer's content lock, since xmin never changes in an existing @@ -2199,7 +2200,7 @@ EvalPlanQualGetTuple(EPQState *epqstate, Index rti) /* * Fetch the current row values for any non-locked relations that need - * to be scanned by an EvalPlanQual operation. origslot must have been set + * to be scanned by an EvalPlanQual operation. origslot must have been set * to contain the current result row (top-level row) that we need to recheck. */ void @@ -2428,7 +2429,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) /* * Each EState must have its own es_epqScanDone state, but if we have - * nested EPQ checks they should share es_epqTuple arrays. This allows + * nested EPQ checks they should share es_epqTuple arrays. This allows * sub-rechecks to inherit the values being examined by an outer recheck. */ estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool)); @@ -2485,7 +2486,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree) * * This is a cut-down version of ExecutorEnd(); basically we want to do most * of the normal cleanup, but *not* close result relations (which we are - * just sharing from the outer query). We do, however, have to close any + * just sharing from the outer query). We do, however, have to close any * trigger target relations that got opened, since those are not shared. * (There probably shouldn't be any of the latter, but just in case...) */ diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c index c5ecd185b8c..c0189eb5a12 100644 --- a/src/backend/executor/execProcnode.c +++ b/src/backend/executor/execProcnode.c @@ -52,7 +52,7 @@ * * ExecInitNode() notices that it is looking at a nest loop and * as the code below demonstrates, it calls ExecInitNestLoop(). * Eventually this calls ExecInitNode() on the right and left subplans - * and so forth until the entire plan is initialized. The result + * and so forth until the entire plan is initialized. The result * of ExecInitNode() is a plan state tree built with the same structure * as the underlying plan tree. * @@ -575,7 +575,7 @@ MultiExecProcNode(PlanState *node) * at 'node'. * * After this operation, the query plan will not be able to be - * processed any further. This should be called only after + * processed any further. This should be called only after * the query plan has been fully executed. * ---------------------------------------------------------------- */ diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index 833c4ed6a4e..f162e92fc71 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -26,7 +26,7 @@ * ExecProject() is used to make tuple projections. Rather then * trying to speed it up, the execution plan should be pre-processed * to facilitate attribute sharing between nodes wherever possible, - * instead of doing needless copying. -cim 5/31/91 + * instead of doing needless copying. -cim 5/31/91 * * During expression evaluation, we check_stack_depth only in * ExecMakeFunctionResult (and substitute routines) rather than at every @@ -201,7 +201,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext, * * Note: for notational simplicity we declare these functions as taking the * specific type of ExprState that they work on. This requires casting when - * assigning the function pointer in ExecInitExpr. Be careful that the + * assigning the function pointer in ExecInitExpr. Be careful that the * function signature is declared correctly, because the cast suppresses * automatic checking! * @@ -236,7 +236,7 @@ static Datum ExecEvalCurrentOfExpr(ExprState *exprstate, ExprContext *econtext, * The caller should already have switched into the temporary memory * context econtext->ecxt_per_tuple_memory. The convenience entry point * ExecEvalExprSwitchContext() is provided for callers who don't prefer to - * do the switch in an outer loop. We do not do the switch in these routines + * do the switch in an outer loop. We do not do the switch in these routines * because it'd be a waste of cycles during nested expression evaluation. * ---------------------------------------------------------------- */ @@ -366,7 +366,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate, * We might have a nested-assignment situation, in which the * refassgnexpr is itself a FieldStore or ArrayRef that needs to * obtain and modify the previous value of the array element or slice - * being replaced. If so, we have to extract that value from the + * being replaced. If so, we have to extract that value from the * array and pass it down via the econtext's caseValue. It's safe to * reuse the CASE mechanism because there cannot be a CASE between * here and where the value would be needed, and an array assignment @@ -439,7 +439,7 @@ ExecEvalArrayRef(ArrayRefExprState *astate, /* * For assignment to varlena arrays, we handle a NULL original array * by substituting an empty (zero-dimensional) array; insertion of the - * new element will result in a singleton array value. It does not + * new element will result in a singleton array value. It does not * matter whether the new element is NULL. */ if (*isNull) @@ -829,11 +829,11 @@ ExecEvalWholeRowVar(WholeRowVarExprState *wrvstate, ExprContext *econtext, * We really only care about numbers of attributes and data types. * Also, we can ignore type mismatch on columns that are dropped in * the destination type, so long as (1) the physical storage matches - * or (2) the actual column value is NULL. Case (1) is helpful in + * or (2) the actual column value is NULL. Case (1) is helpful in * some cases involving out-of-date cached plans, while case (2) is * expected behavior in situations such as an INSERT into a table with * dropped columns (the planner typically generates an INT4 NULL - * regardless of the dropped column type). If we find a dropped + * regardless of the dropped column type). If we find a dropped * column and cannot verify that case (1) holds, we have to use * ExecEvalWholeRowSlow to check (2) for each row. */ @@ -1491,7 +1491,7 @@ ExecEvalFuncArgs(FunctionCallInfo fcinfo, * ExecPrepareTuplestoreResult * * Subroutine for ExecMakeFunctionResult: prepare to extract rows from a - * tuplestore function result. We must set up a funcResultSlot (unless + * tuplestore function result. We must set up a funcResultSlot (unless * already done in a previous call cycle) and verify that the function * returned the expected tuple descriptor. */ @@ -1536,7 +1536,7 @@ ExecPrepareTuplestoreResult(FuncExprState *fcache, } /* - * If function provided a tupdesc, cross-check it. We only really need to + * If function provided a tupdesc, cross-check it. We only really need to * do this for functions returning RECORD, but might as well do it always. */ if (resultDesc) @@ -1719,7 +1719,7 @@ restart: if (fcache->func.fn_retset || hasSetArg) { /* - * We need to return a set result. Complain if caller not ready to + * We need to return a set result. Complain if caller not ready to * accept one. */ if (isDone == NULL) @@ -2046,7 +2046,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, /* * Normally the passed expression tree will be a FuncExprState, since the * grammar only allows a function call at the top level of a table - * function reference. However, if the function doesn't return set then + * function reference. However, if the function doesn't return set then * the planner might have replaced the function call via constant-folding * or inlining. So if we see any other kind of expression node, execute * it via the general ExecEvalExpr() code; the only difference is that we @@ -2085,7 +2085,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, * * Note: ideally, we'd do this in the per-tuple context, but then the * argument values would disappear when we reset the context in the - * inner loop. So do it in caller context. Perhaps we should make a + * inner loop. So do it in caller context. Perhaps we should make a * separate context just to hold the evaluated arguments? */ argDone = ExecEvalFuncArgs(&fcinfo, fcache->args, econtext); @@ -2171,7 +2171,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, * Can't do anything very useful with NULL rowtype values. For a * function returning set, we consider this a protocol violation * (but another alternative would be to just ignore the result and - * "continue" to get another row). For a function not returning + * "continue" to get another row). For a function not returning * set, we fall out of the loop; we'll cons up an all-nulls result * row below. */ @@ -2305,7 +2305,7 @@ no_function_result: } /* - * If function provided a tupdesc, cross-check it. We only really need to + * If function provided a tupdesc, cross-check it. We only really need to * do this for functions returning RECORD, but might as well do it always. */ if (rsinfo.setDesc) @@ -2483,7 +2483,7 @@ ExecEvalDistinct(FuncExprState *fcache, * * Evaluate "scalar op ANY/ALL (array)". The operator always yields boolean, * and we combine the results across all array elements using OR and AND - * (for ANY and ALL respectively). Of course we short-circuit as soon as + * (for ANY and ALL respectively). Of course we short-circuit as soon as * the result is known. */ static Datum @@ -2670,7 +2670,7 @@ ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate, * qualification to conjunctive normal form. If we ever get * an AND to evaluate, we can be sure that it's not a top-level * clause in the qualification, but appears lower (as a function - * argument, for example), or in the target list. Not that you + * argument, for example), or in the target list. Not that you * need to know this, mind you... * ---------------------------------------------------------------- */ @@ -2801,7 +2801,7 @@ ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, /* ---------------------------------------------------------------- * ExecEvalConvertRowtype * - * Evaluate a rowtype coercion operation. This may require + * Evaluate a rowtype coercion operation. This may require * rearranging field positions. * ---------------------------------------------------------------- */ @@ -2930,7 +2930,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, /* * if we have a true test, then we return the result, since the case - * statement is satisfied. A NULL result from the test is not + * statement is satisfied. A NULL result from the test is not * considered true. */ if (DatumGetBool(clause_value) && !*isNull) @@ -3144,7 +3144,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext, * If all items were null or empty arrays, return an empty array; * otherwise, if some were and some weren't, raise error. (Note: we * must special-case this somehow to avoid trying to generate a 1-D - * array formed from empty arrays. It's not ideal...) + * array formed from empty arrays. It's not ideal...) */ if (haveempty) { @@ -4315,7 +4315,7 @@ ExecEvalExprSwitchContext(ExprState *expression, * ExecInitExpr: prepare an expression tree for execution * * This function builds and returns an ExprState tree paralleling the given - * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr + * Expr node tree. The ExprState tree can then be handed to ExecEvalExpr * for execution. Because the Expr tree itself is read-only as far as * ExecInitExpr and ExecEvalExpr are concerned, several different executions * of the same plan tree can occur concurrently. @@ -4326,9 +4326,9 @@ ExecEvalExprSwitchContext(ExprState *expression, * * Any Aggref, WindowFunc, or SubPlan nodes found in the tree are added to the * lists of such nodes held by the parent PlanState. Otherwise, we do very - * little initialization here other than building the state-node tree. Any + * little initialization here other than building the state-node tree. Any * nontrivial work associated with initializing runtime info for a node should - * happen during the first actual evaluation of that node. (This policy lets + * happen during the first actual evaluation of that node. (This policy lets * us avoid work if the node is never actually evaluated.) * * Note: there is no ExecEndExpr function; we assume that any resource @@ -5133,7 +5133,7 @@ ExecQual(List *qual, ExprContext *econtext, bool resultForNull) oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory); /* - * Evaluate the qual conditions one at a time. If we find a FALSE result, + * Evaluate the qual conditions one at a time. If we find a FALSE result, * we can stop evaluating and return FALSE --- the AND result must be * FALSE. Also, if we find a NULL result when resultForNull is FALSE, we * can stop and return FALSE --- the AND result must be FALSE or NULL in @@ -5292,7 +5292,7 @@ ExecTargetList(List *targetlist, else { /* - * We have some done and some undone sets. Restart the done ones + * We have some done and some undone sets. Restart the done ones * so that we can deliver a tuple (if possible). */ foreach(tl, targetlist) diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c index 5e4538fa5e9..869abbecbd8 100644 --- a/src/backend/executor/execScan.c +++ b/src/backend/executor/execScan.c @@ -30,7 +30,7 @@ static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, Tuple * ExecScanFetch -- fetch next potential tuple * * This routine is concerned with substituting a test tuple if we are - * inside an EvalPlanQual recheck. If we aren't, just execute + * inside an EvalPlanQual recheck. If we aren't, just execute * the access method's next-tuple routine. */ static inline TupleTableSlot * @@ -155,7 +155,7 @@ ExecScan(ScanState *node, ResetExprContext(econtext); /* - * get a tuple from the access method. Loop until we obtain a tuple that + * get a tuple from the access method. Loop until we obtain a tuple that * passes the qualification. */ for (;;) diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index 928b5e3178a..66515f71a25 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -4,7 +4,7 @@ * Routines dealing with TupleTableSlots. These are used for resource * management associated with tuples (eg, releasing buffer pins for * tuples in disk buffers, or freeing the memory occupied by transient - * tuples). Slots also provide access abstraction that lets us implement + * tuples). Slots also provide access abstraction that lets us implement * "virtual" tuples to reduce data-copying overhead. * * Routines dealing with the type information for tuples. Currently, @@ -261,7 +261,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ ExecClearTuple(slot); /* - * Release any old descriptor. Also release old Datum/isnull arrays if + * Release any old descriptor. Also release old Datum/isnull arrays if * present (we don't bother to check if they could be re-used). */ if (slot->tts_tupleDescriptor) @@ -311,7 +311,7 @@ ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ * Another case where it is 'false' is when the referenced tuple is held * in a tuple table slot belonging to a lower-level executor Proc node. * In this case the lower-level slot retains ownership and responsibility - * for eventually releasing the tuple. When this method is used, we must + * for eventually releasing the tuple. When this method is used, we must * be certain that the upper-level Proc node will lose interest in the tuple * sooner than the lower-level one does! If you're not certain, copy the * lower-level tuple with heap_copytuple and let the upper-level table @@ -650,7 +650,7 @@ ExecFetchSlotTuple(TupleTableSlot *slot) * Fetch the slot's minimal physical tuple. * * If the slot contains a virtual tuple, we convert it to minimal - * physical form. The slot retains ownership of the minimal tuple. + * physical form. The slot retains ownership of the minimal tuple. * If it contains a regular tuple we convert to minimal form and store * that in addition to the regular tuple (not instead of, because * callers may hold pointers to Datums within the regular tuple). @@ -829,7 +829,7 @@ ExecCopySlot(TupleTableSlot *dstslot, TupleTableSlot *srcslot) * ExecInit{Result,Scan,Extra}TupleSlot * * These are convenience routines to initialize the specified slot - * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot + * in nodes inheriting the appropriate state. ExecInitExtraTupleSlot * is used for initializing special-purpose slots. * -------------------------------- */ @@ -1147,7 +1147,7 @@ BuildTupleFromCStrings(AttInMetadata *attinmeta, char **values) * code would have no way to obtain a tupledesc for the tuple. * * Note that if we do build a new tuple, it's palloc'd in the current - * memory context. Beware of code that changes context between the initial + * memory context. Beware of code that changes context between the initial * heap_form_tuple/etc call and calling HeapTuple(Header)GetDatum. * * For performance-critical callers, it could be worthwhile to take extra diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index fc71d852bed..d5e1273e91c 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -105,7 +105,7 @@ CreateExecutorState(void) * Initialize all fields of the Executor State structure */ estate->es_direction = ForwardScanDirection; - estate->es_snapshot = InvalidSnapshot; /* caller must initialize this */ + estate->es_snapshot = InvalidSnapshot; /* caller must initialize this */ estate->es_crosscheck_snapshot = InvalidSnapshot; /* no crosscheck */ estate->es_range_table = NIL; estate->es_plannedstmt = NULL; @@ -342,7 +342,7 @@ CreateStandaloneExprContext(void) * any previously computed pass-by-reference expression result will go away! * * If isCommit is false, we are being called in error cleanup, and should - * not call callbacks but only release memory. (It might be better to call + * not call callbacks but only release memory. (It might be better to call * the callbacks and pass the isCommit flag to them, but that would require * more invasive code changes than currently seems justified.) * @@ -371,7 +371,7 @@ FreeExprContext(ExprContext *econtext, bool isCommit) * ReScanExprContext * * Reset an expression context in preparation for a rescan of its - * plan node. This requires calling any registered shutdown callbacks, + * plan node. This requires calling any registered shutdown callbacks, * since any partially complete set-returning-functions must be canceled. * * Note we make no assumption about the caller's memory context. @@ -412,7 +412,7 @@ MakePerTupleExprContext(EState *estate) /* ---------------- * ExecAssignExprContext * - * This initializes the ps_ExprContext field. It is only necessary + * This initializes the ps_ExprContext field. It is only necessary * to do this for nodes which use ExecQual or ExecProject * because those routines require an econtext. Other nodes that * don't have to evaluate expressions don't need to do this. @@ -458,7 +458,7 @@ ExecAssignResultTypeFromTL(PlanState *planstate) /* * ExecTypeFromTL needs the parse-time representation of the tlist, not a - * list of ExprStates. This is good because some plan nodes don't bother + * list of ExprStates. This is good because some plan nodes don't bother * to set up planstate->targetlist ... */ tupDesc = ExecTypeFromTL(planstate->plan->targetlist, hasoid); @@ -486,7 +486,7 @@ ExecGetResultType(PlanState *planstate) * the given tlist should be a list of ExprState nodes, not Expr nodes. * * inputDesc can be NULL, but if it is not, we check to see whether simple - * Vars in the tlist match the descriptor. It is important to provide + * Vars in the tlist match the descriptor. It is important to provide * inputDesc for relation-scan plan nodes, as a cross check that the relation * hasn't been changed since the plan was made. At higher levels of a plan, * there is no need to recheck. @@ -692,7 +692,7 @@ ExecAssignProjectionInfo(PlanState *planstate, * * However ... there is no particular need to do it during ExecEndNode, * because FreeExecutorState will free any remaining ExprContexts within - * the EState. Letting FreeExecutorState do it allows the ExprContexts to + * the EState. Letting FreeExecutorState do it allows the ExprContexts to * be freed in reverse order of creation, rather than order of creation as * will happen if we delete them here, which saves O(N^2) work in the list * cleanup inside FreeExprContext. @@ -712,7 +712,7 @@ ExecFreeExprContext(PlanState *planstate) * the following scan type support functions are for * those nodes which are stubborn and return tuples in * their Scan tuple slot instead of their Result tuple - * slot.. luck fur us, these nodes do not do projections + * slot.. luck fur us, these nodes do not do projections * so we don't have to worry about getting the ProjectionInfo * right for them... -cim 6/3/91 * ---------------------------------------------------------------- @@ -1111,7 +1111,7 @@ ExecInsertIndexTuples(TupleTableSlot *slot, /* * If the index has an associated exclusion constraint, check that. * This is simpler than the process for uniqueness checks since we - * always insert first and then check. If the constraint is deferred, + * always insert first and then check. If the constraint is deferred, * we check now anyway, but don't throw error on violation; instead * we'll queue a recheck event. * @@ -1295,7 +1295,7 @@ retry: /* * If an in-progress transaction is affecting the visibility of this - * tuple, we need to wait for it to complete and then recheck. For + * tuple, we need to wait for it to complete and then recheck. For * simplicity we do rechecking by just restarting the whole scan --- * this case probably doesn't happen often enough to be worth trying * harder, and anyway we don't want to hold any index internal locks @@ -1357,7 +1357,7 @@ retry: /* * Check existing tuple's index values to see if it really matches the - * exclusion condition against the new_values. Returns true if conflict. + * exclusion condition against the new_values. Returns true if conflict. */ static bool index_recheck_constraint(Relation index, Oid *constr_procs, diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index f0a89d23b87..4d112604bb7 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -47,7 +47,7 @@ typedef struct } DR_sqlfunction; /* - * We have an execution_state record for each query in a function. Each + * We have an execution_state record for each query in a function. Each * record contains a plantree for its query. If the query is currently in * F_EXEC_RUN state then there's a QueryDesc too. * @@ -466,7 +466,7 @@ sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo, * Set up the per-query execution_state records for a SQL function. * * The input is a List of Lists of parsed and rewritten, but not planned, - * querytrees. The sublist structure denotes the original query boundaries. + * querytrees. The sublist structure denotes the original query boundaries. */ static List * init_execution_state(List *queryTree_list, @@ -590,7 +590,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) bool isNull; /* - * Create memory context that holds all the SQLFunctionCache data. It + * Create memory context that holds all the SQLFunctionCache data. It * must be a child of whatever context holds the FmgrInfo. */ fcontext = AllocSetContextCreate(finfo->fn_mcxt, @@ -602,7 +602,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) oldcontext = MemoryContextSwitchTo(fcontext); /* - * Create the struct proper, link it to fcontext and fn_extra. Once this + * Create the struct proper, link it to fcontext and fn_extra. Once this * is done, we'll be able to recover the memory after failure, even if the * FmgrInfo is long-lived. */ @@ -672,7 +672,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) fcache->src = TextDatumGetCString(tmp); /* - * Parse and rewrite the queries in the function text. Use sublists to + * Parse and rewrite the queries in the function text. Use sublists to * keep track of the original query boundaries. But we also build a * "flat" list of the rewritten queries to pass to check_sql_fn_retval. * This is because the last canSetTag query determines the result type @@ -712,7 +712,7 @@ init_sql_fcache(FmgrInfo *finfo, Oid collation, bool lazyEvalOK) * any polymorphic arguments. * * Note: we set fcache->returnsTuple according to whether we are returning - * the whole tuple result or just a single column. In the latter case we + * the whole tuple result or just a single column. In the latter case we * clear returnsTuple because we need not act different from the scalar * result case, even if it's a rowtype column. (However, we have to force * lazy eval mode in that case; otherwise we'd need extra code to expand @@ -944,7 +944,7 @@ postquel_get_single_result(TupleTableSlot *slot, /* * Set up to return the function value. For pass-by-reference datatypes, * be sure to allocate the result in resultcontext, not the current memory - * context (which has query lifespan). We can't leave the data in the + * context (which has query lifespan). We can't leave the data in the * TupleTableSlot because we intend to clear the slot before returning. */ oldcontext = MemoryContextSwitchTo(resultcontext); @@ -1052,7 +1052,7 @@ fmgr_sql(PG_FUNCTION_ARGS) /* * Switch to context in which the fcache lives. This ensures that our * tuplestore etc will have sufficient lifetime. The sub-executor is - * responsible for deleting per-tuple information. (XXX in the case of a + * responsible for deleting per-tuple information. (XXX in the case of a * long-lived FmgrInfo, this policy represents more memory leakage, but * it's not entirely clear where to keep stuff instead.) */ @@ -1106,7 +1106,7 @@ fmgr_sql(PG_FUNCTION_ARGS) * suspend execution before completion is if we are returning a row from a * lazily-evaluated SELECT. So, when first entering this loop, we'll * either start a new query (and push a fresh snapshot) or re-establish - * the active snapshot from the existing query descriptor. If we need to + * the active snapshot from the existing query descriptor. If we need to * start a new query in a subsequent execution of the loop, either we need * a fresh snapshot (and pushed_snapshot is false) or the existing * snapshot is on the active stack and we can just bump its command ID. @@ -1162,7 +1162,7 @@ fmgr_sql(PG_FUNCTION_ARGS) * Break from loop if we didn't shut down (implying we got a * lazily-evaluated row). Otherwise we'll press on till the whole * function is done, relying on the tuplestore to keep hold of the - * data to eventually be returned. This is necessary since an + * data to eventually be returned. This is necessary since an * INSERT/UPDATE/DELETE RETURNING that sets the result might be * followed by additional rule-inserted commands, and we want to * finish doing all those commands before we return anything. @@ -1184,7 +1184,7 @@ fmgr_sql(PG_FUNCTION_ARGS) /* * Flush the current snapshot so that we will take a new one for - * the new query list. This ensures that new snaps are taken at + * the new query list. This ensures that new snaps are taken at * original-query boundaries, matching the behavior of interactive * execution. */ @@ -1242,7 +1242,7 @@ fmgr_sql(PG_FUNCTION_ARGS) else if (fcache->lazyEval) { /* - * We are done with a lazy evaluation. Clean up. + * We are done with a lazy evaluation. Clean up. */ tuplestore_clear(fcache->tstore); @@ -1266,8 +1266,8 @@ fmgr_sql(PG_FUNCTION_ARGS) else { /* - * We are done with a non-lazy evaluation. Return whatever is in - * the tuplestore. (It is now caller's responsibility to free the + * We are done with a non-lazy evaluation. Return whatever is in + * the tuplestore. (It is now caller's responsibility to free the * tuplestore when done.) */ rsi->returnMode = SFRM_Materialize; @@ -1379,7 +1379,7 @@ sql_exec_error_callback(void *arg) /* * Try to determine where in the function we failed. If there is a query - * with non-null QueryDesc, finger it. (We check this rather than looking + * with non-null QueryDesc, finger it. (We check this rather than looking * for F_EXEC_RUN state, so that errors during ExecutorStart or * ExecutorEnd are blamed on the appropriate query; see postquel_start and * postquel_end.) @@ -1671,7 +1671,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, * the function that's calling it. * * XXX Note that if rettype is RECORD, the IsBinaryCoercible check - * will succeed for any composite restype. For the moment we rely on + * will succeed for any composite restype. For the moment we rely on * runtime type checking to catch any discrepancy, but it'd be nice to * do better at parse time. */ @@ -1717,7 +1717,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList, /* * Verify that the targetlist matches the return tuple type. We scan * the non-deleted attributes to ensure that they match the datatypes - * of the non-resjunk columns. For deleted attributes, insert NULL + * of the non-resjunk columns. For deleted attributes, insert NULL * result columns if the caller asked for that. */ tupnatts = tupdesc->natts; diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 186c319a3a2..09ff03543df 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -25,7 +25,7 @@ * The agg's first input type and transtype must be the same in this case! * * If transfunc is marked "strict" then NULL input_values are skipped, - * keeping the previous transvalue. If transfunc is not strict then it + * keeping the previous transvalue. If transfunc is not strict then it * is called for every input tuple and must deal with NULL initcond * or NULL input_values for itself. * @@ -66,7 +66,7 @@ * it is completely forbidden for functions to modify pass-by-ref inputs, * but in the aggregate case we know the left input is either the initial * transition value or a previous function result, and in either case its - * value need not be preserved. See int8inc() for an example. Notice that + * value need not be preserved. See int8inc() for an example. Notice that * advance_transition_function() is coded to avoid a data copy step when * the previous transition value pointer is returned. Also, some * transition functions want to store working state in addition to the @@ -132,14 +132,14 @@ typedef struct AggStatePerAggData Aggref *aggref; /* - * Nominal number of arguments for aggregate function. For plain aggs, - * this excludes any ORDER BY expressions. For ordered-set aggs, this + * Nominal number of arguments for aggregate function. For plain aggs, + * this excludes any ORDER BY expressions. For ordered-set aggs, this * counts both the direct and aggregated (ORDER BY) arguments. */ int numArguments; /* - * Number of aggregated input columns. This includes ORDER BY expressions + * Number of aggregated input columns. This includes ORDER BY expressions * in both the plain-agg and ordered-set cases. Ordered-set direct args * are not counted, though. */ @@ -153,7 +153,7 @@ typedef struct AggStatePerAggData int numTransInputs; /* - * Number of arguments to pass to the finalfn. This is always at least 1 + * Number of arguments to pass to the finalfn. This is always at least 1 * (the transition state value) plus any ordered-set direct args. If the * finalfn wants extra args then we pass nulls corresponding to the * aggregated input columns. @@ -216,7 +216,7 @@ typedef struct AggStatePerAggData transtypeByVal; /* - * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but + * Stuff for evaluation of inputs. We used to just use ExecEvalExpr, but * with the addition of ORDER BY we now need at least a slot for passing * data to the sort object, which requires a tupledesc, so we might as * well go whole hog and use ExecProject too. @@ -236,7 +236,7 @@ typedef struct AggStatePerAggData * input tuple group and updated for each input tuple. * * For a simple (non DISTINCT/ORDER BY) aggregate, we just feed the input - * values straight to the transition function. If it's DISTINCT or + * values straight to the transition function. If it's DISTINCT or * requires ORDER BY, we pass the input values into a Tuplesort object; * then at completion of the input tuple group, we scan the sorted values, * eliminate duplicates if needed, and run the transition function on the @@ -279,7 +279,7 @@ typedef struct AggStatePerGroupData /* * Note: noTransValue initially has the same value as transValueIsNull, - * and if true both are cleared to false at the same time. They are not + * and if true both are cleared to false at the same time. They are not * the same though: if transfn later returns a NULL, we want to keep that * NULL and not auto-replace it with a later input value. Only the first * non-NULL input will be auto-substituted. @@ -289,7 +289,7 @@ typedef struct AggStatePerGroupData /* * To implement hashed aggregation, we need a hashtable that stores a * representative tuple and an array of AggStatePerGroup structs for each - * distinct set of GROUP BY column values. We compute the hash key from + * distinct set of GROUP BY column values. We compute the hash key from * the GROUP BY columns. */ typedef struct AggHashEntryData *AggHashEntry; @@ -416,7 +416,7 @@ initialize_aggregates(AggState *aggstate, * * The new values (and null flags) have been preloaded into argument positions * 1 and up in peraggstate->transfn_fcinfo, so that we needn't copy them again - * to pass to the transition function. We also expect that the static fields + * to pass to the transition function. We also expect that the static fields * of the fcinfo are already initialized; that was done by ExecInitAgg(). * * It doesn't matter which memory context this is called in. @@ -495,7 +495,7 @@ advance_transition_function(AggState *aggstate, /* * If pass-by-ref datatype, must copy the new value into aggcontext and - * pfree the prior transValue. But if transfn returned a pointer to its + * pfree the prior transValue. But if transfn returned a pointer to its * first input, we don't need to do anything. */ if (!peraggstate->transtypeByVal && @@ -519,7 +519,7 @@ advance_transition_function(AggState *aggstate, } /* - * Advance all the aggregates for one input tuple. The input tuple + * Advance all the aggregates for one input tuple. The input tuple * has been stored in tmpcontext->ecxt_outertuple, so that it is accessible * to ExecEvalExpr. pergroup is the array of per-group structs to use * (this might be in a hashtable entry). @@ -609,7 +609,7 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup) /* * Run the transition function for a DISTINCT or ORDER BY aggregate * with only one input. This is called after we have completed - * entering all the input values into the sort object. We complete the + * entering all the input values into the sort object. We complete the * sort, read out the values in sorted order, and run the transition * function on each value (applying DISTINCT if appropriate). * @@ -705,7 +705,7 @@ process_ordered_aggregate_single(AggState *aggstate, /* * Run the transition function for a DISTINCT or ORDER BY aggregate * with more than one input. This is called after we have completed - * entering all the input values into the sort object. We complete the + * entering all the input values into the sort object. We complete the * sort, read out the values in sorted order, and run the transition * function on each value (applying DISTINCT if appropriate). * @@ -1070,9 +1070,9 @@ lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot) * the appropriate attribute for each aggregate function use (Aggref * node) appearing in the targetlist or qual of the node. The number * of tuples to aggregate over depends on whether grouped or plain - * aggregation is selected. In grouped aggregation, we produce a result + * aggregation is selected. In grouped aggregation, we produce a result * row for each group; in plain aggregation there's a single result row - * for the whole query. In either case, the value of each aggregate is + * for the whole query. In either case, the value of each aggregate is * stored in the expression context to be used when ExecProject evaluates * the result tuple. */ @@ -1097,7 +1097,7 @@ ExecAgg(AggState *node) } /* - * Exit if nothing left to do. (We must do the ps_TupFromTlist check + * Exit if nothing left to do. (We must do the ps_TupFromTlist check * first, because in some cases agg_done gets set before we emit the final * aggregate tuple, and we have to finish running SRFs for it.) */ @@ -1181,11 +1181,11 @@ agg_retrieve_direct(AggState *aggstate) /* * Clear the per-output-tuple context for each group, as well as * aggcontext (which contains any pass-by-ref transvalues of the old - * group). We also clear any child contexts of the aggcontext; some + * group). We also clear any child contexts of the aggcontext; some * aggregate functions store working state in such contexts. * * We use ReScanExprContext not just ResetExprContext because we want - * any registered shutdown callbacks to be called. That allows + * any registered shutdown callbacks to be called. That allows * aggregate functions to ensure they've cleaned up any non-memory * resources. */ @@ -1518,8 +1518,8 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) aggstate->hashtable = NULL; /* - * Create expression contexts. We need two, one for per-input-tuple - * processing and one for per-output-tuple processing. We cheat a little + * Create expression contexts. We need two, one for per-input-tuple + * processing and one for per-output-tuple processing. We cheat a little * by using ExecAssignExprContext() to build both. */ ExecAssignExprContext(estate, &aggstate->ss.ps); @@ -1552,7 +1552,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * initialize child expressions * * Note: ExecInitExpr finds Aggrefs for us, and also checks that no aggs - * contain other agg calls in their arguments. This would make no sense + * contain other agg calls in their arguments. This would make no sense * under SQL semantics anyway (and it's forbidden by the spec). Because * that is true, we don't need to worry about evaluating the aggs in any * particular order. @@ -1599,7 +1599,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * This is not an error condition: we might be using the Agg node just * to do hash-based grouping. Even in the regular case, * constant-expression simplification could optimize away all of the - * Aggrefs in the targetlist and qual. So keep going, but force local + * Aggrefs in the targetlist and qual. So keep going, but force local * copy of numaggs positive so that palloc()s below don't choke. */ numaggs = 1; @@ -1760,7 +1760,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) } /* - * Get actual datatypes of the (nominal) aggregate inputs. These + * Get actual datatypes of the (nominal) aggregate inputs. These * could be different from the agg's declared input types, when the * agg accepts ANY or a polymorphic type. */ @@ -1852,7 +1852,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) * If the transfn is strict and the initval is NULL, make sure input * type and transtype are the same (or at least binary-compatible), so * that it's OK to use the first aggregated input value as the initial - * transValue. This should have been checked at agg definition time, + * transValue. This should have been checked at agg definition time, * but we must check again in case the transfn's strictness property * has been changed. */ @@ -1885,7 +1885,7 @@ ExecInitAgg(Agg *node, EState *estate, int eflags) /* * If we're doing either DISTINCT or ORDER BY for a plain agg, then we * have a list of SortGroupClause nodes; fish out the data in them and - * stick them into arrays. We ignore ORDER BY for an ordered-set agg, + * stick them into arrays. We ignore ORDER BY for an ordered-set agg, * however; the agg's transfn and finalfn are responsible for that. * * Note that by construction, if there is a DISTINCT clause then the @@ -2144,8 +2144,8 @@ ExecReScanAgg(AggState *node) * * The transition and/or final functions of an aggregate may want to verify * that they are being called as aggregates, rather than as plain SQL - * functions. They should use this function to do so. The return value - * is nonzero if being called as an aggregate, or zero if not. (Specific + * functions. They should use this function to do so. The return value + * is nonzero if being called as an aggregate, or zero if not. (Specific * nonzero values are AGG_CONTEXT_AGGREGATE or AGG_CONTEXT_WINDOW, but more * values could conceivably appear in future.) * diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c index 6185c1d0d14..ef121c420de 100644 --- a/src/backend/executor/nodeAppend.c +++ b/src/backend/executor/nodeAppend.c @@ -33,7 +33,7 @@ * / * Append -------+------+------+--- nil * / \ | | | - * nil nil ... ... ... + * nil nil ... ... ... * subplans * * Append nodes are currently used for unions, and to support diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c index 7d8a3f2c248..9b1e97578d0 100644 --- a/src/backend/executor/nodeBitmapHeapscan.c +++ b/src/backend/executor/nodeBitmapHeapscan.c @@ -5,7 +5,7 @@ * * NOTE: it is critical that this plan type only be used with MVCC-compliant * snapshots (ie, regular snapshots, not SnapshotAny or one of the other - * special snapshots). The reason is that since index and heap scans are + * special snapshots). The reason is that since index and heap scans are * decoupled, there can be no assurance that the index tuple prompting a * visit to a particular heap TID still exists when the visit is made. * Therefore the tuple might not exist anymore either (which is OK because @@ -340,7 +340,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres) /* * We must hold share lock on the buffer content while examining tuple - * visibility. Afterwards, however, the tuples we have found to be + * visibility. Afterwards, however, the tuples we have found to be * visible are guaranteed good as long as we hold the buffer pin. */ LockBuffer(buffer, BUFFER_LOCK_SHARE); diff --git a/src/backend/executor/nodeForeignscan.c b/src/backend/executor/nodeForeignscan.c index ac135d9ba86..9cc53459527 100644 --- a/src/backend/executor/nodeForeignscan.c +++ b/src/backend/executor/nodeForeignscan.c @@ -147,7 +147,7 @@ ExecInitForeignScan(ForeignScan *node, EState *estate, int eflags) scanstate->ss.ss_currentRelation = currentRelation; /* - * get the scan type from the relation descriptor. (XXX at some point we + * get the scan type from the relation descriptor. (XXX at some point we * might want to let the FDW editorialize on the scan tupdesc.) */ ExecAssignScanType(&scanstate->ss, RelationGetDescr(currentRelation)); diff --git a/src/backend/executor/nodeFunctionscan.c b/src/backend/executor/nodeFunctionscan.c index 0300941a525..da5d8c114db 100644 --- a/src/backend/executor/nodeFunctionscan.c +++ b/src/backend/executor/nodeFunctionscan.c @@ -232,7 +232,7 @@ FunctionNext(FunctionScanState *node) } /* - * If alldone, we just return the previously-cleared scanslot. Otherwise, + * If alldone, we just return the previously-cleared scanslot. Otherwise, * finish creating the virtual tuple. */ if (!alldone) @@ -449,8 +449,8 @@ ExecInitFunctionScan(FunctionScan *node, EState *estate, int eflags) * Create the combined TupleDesc * * If there is just one function without ordinality, the scan result - * tupdesc is the same as the function result tupdesc --- except that - * we may stuff new names into it below, so drop any rowtype label. + * tupdesc is the same as the function result tupdesc --- except that we + * may stuff new names into it below, so drop any rowtype label. */ if (scanstate->simple) { diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 95ed9bd9d0d..589b2f15099 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -365,7 +365,7 @@ ExecHashTableCreate(Hash *node, List *hashOperators, bool keepNulls) /* * Set up for skew optimization, if possible and there's a need for more - * than one batch. (In a one-batch join, there's no point in it.) + * than one batch. (In a one-batch join, there's no point in it.) */ if (nbatch > 1) ExecHashBuildSkewHash(hashtable, node, num_skew_mcvs); @@ -407,7 +407,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, /* * Estimate tupsize based on footprint of tuple in hashtable... note this - * does not allow for any palloc overhead. The manipulations of spaceUsed + * does not allow for any palloc overhead. The manipulations of spaceUsed * don't count palloc overhead either. */ tupsize = HJTUPLE_OVERHEAD + @@ -459,7 +459,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, /* * Set nbuckets to achieve an average bucket load of NTUP_PER_BUCKET when * memory is filled. Set nbatch to the smallest power of 2 that appears - * sufficient. The Min() steps limit the results so that the pointer + * sufficient. The Min() steps limit the results so that the pointer * arrays we'll try to allocate do not exceed work_mem. */ max_pointers = (work_mem * 1024L) / sizeof(void *); @@ -498,8 +498,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, /* * Both nbuckets and nbatch must be powers of 2 to make - * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate - * nbuckets to the next larger power of 2. We also force nbuckets to not + * ExecHashGetBucketAndBatch fast. We already fixed nbatch; now inflate + * nbuckets to the next larger power of 2. We also force nbuckets to not * be real small, by starting the search at 2^10. (Note: above we made * sure that nbuckets is not more than INT_MAX / 2, so this loop cannot * overflow, nor can the final shift to recalculate nbuckets.) @@ -817,7 +817,7 @@ ExecHashGetHashValue(HashJoinTable hashtable, * the hash support function as strict even if the operator is not. * * Note: currently, all hashjoinable operators must be strict since - * the hash index AM assumes that. However, it takes so little extra + * the hash index AM assumes that. However, it takes so little extra * code here to allow non-strict that we may as well do it. */ if (isNull) @@ -1237,7 +1237,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse) /* * While we have not hit a hole in the hashtable and have not hit * the desired bucket, we have collided with some previous hash - * value, so try the next bucket location. NB: this code must + * value, so try the next bucket location. NB: this code must * match ExecHashGetSkewBucket. */ bucket = hashvalue & (nbuckets - 1); @@ -1435,7 +1435,7 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable) * NOTE: this is not nearly as simple as it looks on the surface, because * of the possibility of collisions in the hashtable. Suppose that hash * values A and B collide at a particular hashtable entry, and that A was - * entered first so B gets shifted to a different table entry. If we were + * entered first so B gets shifted to a different table entry. If we were * to remove A first then ExecHashGetSkewBucket would mistakenly start * reporting that B is not in the hashtable, because it would hit the NULL * before finding B. However, we always remove entries in the reverse diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index 9c8398a9cf5..7eec3f333de 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -126,7 +126,7 @@ ExecHashJoin(HashJoinState *node) * check this when the outer relation's startup cost is less * than the projected cost of building the hash table. * Otherwise it's best to build the hash table first and see - * if the inner relation is empty. (When it's a left join, we + * if the inner relation is empty. (When it's a left join, we * should always make this check, since we aren't going to be * able to skip the join on the strength of an empty inner * relation anyway.) @@ -530,7 +530,7 @@ ExecInitHashJoin(HashJoin *node, EState *estate, int eflags) * tuple slot of the Hash node (which is our inner plan). we can do this * because Hash nodes don't return tuples via ExecProcNode() -- instead * the hash join node uses ExecScanHashBucket() to get at the contents of - * the hash table. -cim 6/9/91 + * the hash table. -cim 6/9/91 */ { HashState *hashstate = (HashState *) innerPlanState(hjstate); @@ -896,7 +896,7 @@ ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue, /* * ExecHashJoinGetSavedTuple - * read the next tuple from a batch file. Return NULL if no more. + * read the next tuple from a batch file. Return NULL if no more. * * On success, *hashvalue is set to the tuple's hash value, and the tuple * itself is stored in the given slot. diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c index 8d5c3544d56..c55723608d6 100644 --- a/src/backend/executor/nodeIndexonlyscan.c +++ b/src/backend/executor/nodeIndexonlyscan.c @@ -88,7 +88,7 @@ IndexOnlyNext(IndexOnlyScanState *node) * Note on Memory Ordering Effects: visibilitymap_test does not lock * the visibility map buffer, and therefore the result we read here * could be slightly stale. However, it can't be stale enough to - * matter. It suffices to show that (1) there is a read barrier + * matter. It suffices to show that (1) there is a read barrier * between the time we read the index TID and the time we test the * visibility map; and (2) there is a write barrier between the time * some other concurrent process clears the visibility map bit and the @@ -113,7 +113,7 @@ IndexOnlyNext(IndexOnlyScanState *node) /* * Only MVCC snapshots are supported here, so there should be no * need to keep following the HOT chain once a visible entry has - * been found. If we did want to allow that, we'd need to keep + * been found. If we did want to allow that, we'd need to keep * more state to remember not to call index_getnext_tid next time. */ if (scandesc->xs_continue_hot) @@ -122,7 +122,7 @@ IndexOnlyNext(IndexOnlyScanState *node) /* * Note: at this point we are holding a pin on the heap page, as * recorded in scandesc->xs_cbuf. We could release that pin now, - * but it's not clear whether it's a win to do so. The next index + * but it's not clear whether it's a win to do so. The next index * entry might require a visit to the same heap page. */ } diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index 4bde1e3afe1..2b89dc60f67 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -216,7 +216,7 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext, /* * For each run-time key, extract the run-time expression and evaluate - * it with respect to the current context. We then stick the result + * it with respect to the current context. We then stick the result * into the proper scan key. * * Note: the result of the eval could be a pass-by-ref value that's @@ -349,7 +349,7 @@ ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys) /* * Note we advance the rightmost array key most quickly, since it will * correspond to the lowest-order index column among the available - * qualifications. This is hypothesized to result in better locality of + * qualifications. This is hypothesized to result in better locality of * access in the index. */ for (j = numArrayKeys - 1; j >= 0; j--) diff --git a/src/backend/executor/nodeLimit.c b/src/backend/executor/nodeLimit.c index ba650471030..0c723ac224d 100644 --- a/src/backend/executor/nodeLimit.c +++ b/src/backend/executor/nodeLimit.c @@ -113,7 +113,7 @@ ExecLimit(LimitState *node) /* * The subplan is known to return no tuples (or not more than - * OFFSET tuples, in general). So we return no tuples. + * OFFSET tuples, in general). So we return no tuples. */ return NULL; diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index ae107961ba3..298d4b4d017 100644 --- a/src/backend/executor/nodeLockRows.c +++ b/src/backend/executor/nodeLockRows.c @@ -182,7 +182,7 @@ lnext: tuple.t_self = copyTuple->t_self; /* - * Need to run a recheck subquery. Initialize EPQ state if we + * Need to run a recheck subquery. Initialize EPQ state if we * didn't do so already. */ if (!epq_started) @@ -213,7 +213,7 @@ lnext: { /* * First, fetch a copy of any rows that were successfully locked - * without any update having occurred. (We do this in a separate pass + * without any update having occurred. (We do this in a separate pass * so as to avoid overhead in the common case where there are no * concurrent updates.) */ @@ -318,7 +318,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) /* * Locate the ExecRowMark(s) that this node is responsible for, and - * construct ExecAuxRowMarks for them. (InitPlan should already have + * construct ExecAuxRowMarks for them. (InitPlan should already have * built the global list of ExecRowMarks.) */ lrstate->lr_arowMarks = NIL; @@ -340,7 +340,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags) aerm = ExecBuildAuxRowMark(erm, outerPlan->targetlist); /* - * Only locking rowmarks go into our own list. Non-locking marks are + * Only locking rowmarks go into our own list. Non-locking marks are * passed off to the EvalPlanQual machinery. This is because we don't * want to bother fetching non-locked rows unless we actually have to * do an EPQ recheck. diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c index 13002bf9b46..4a632ee686f 100644 --- a/src/backend/executor/nodeMaterial.c +++ b/src/backend/executor/nodeMaterial.c @@ -185,7 +185,7 @@ ExecInitMaterial(Material *node, EState *estate, int eflags) /* * Tuplestore's interpretation of the flag bits is subtly different from * the general executor meaning: it doesn't think BACKWARD necessarily - * means "backwards all the way to start". If told to support BACKWARD we + * means "backwards all the way to start". If told to support BACKWARD we * must include REWIND in the tuplestore eflags, else tuplestore_trim * might throw away too much. */ diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c index 74fa40da74c..47ed068c7b7 100644 --- a/src/backend/executor/nodeMergeAppend.c +++ b/src/backend/executor/nodeMergeAppend.c @@ -32,7 +32,7 @@ * / * MergeAppend---+------+------+--- nil * / \ | | | - * nil nil ... ... ... + * nil nil ... ... ... * subplans */ diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 2a1b4ed8b66..bc036a30b0d 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -41,7 +41,7 @@ * * Therefore, rather than directly executing the merge join clauses, * we evaluate the left and right key expressions separately and then - * compare the columns one at a time (see MJCompare). The planner + * compare the columns one at a time (see MJCompare). The planner * passes us enough information about the sort ordering of the inputs * to allow us to determine how to make the comparison. We may use the * appropriate btree comparison function, since Postgres' only notion @@ -269,7 +269,7 @@ MJExamineQuals(List *mergeclauses, * input, since we assume mergejoin operators are strict. If the NULL * is in the first join column, and that column sorts nulls last, then * we can further conclude that no following tuple can match anything - * either, since they must all have nulls in the first column. However, + * either, since they must all have nulls in the first column. However, * that case is only interesting if we're not in FillOuter mode, else * we have to visit all the tuples anyway. * @@ -325,7 +325,7 @@ MJEvalOuterValues(MergeJoinState *mergestate) /* * MJEvalInnerValues * - * Same as above, but for the inner tuple. Here, we have to be prepared + * Same as above, but for the inner tuple. Here, we have to be prepared * to load data from either the true current inner, or the marked inner, * so caller must tell us which slot to load from. */ @@ -736,7 +736,7 @@ ExecMergeJoin(MergeJoinState *node) case MJEVAL_MATCHABLE: /* - * OK, we have the initial tuples. Begin by skipping + * OK, we have the initial tuples. Begin by skipping * non-matching tuples. */ node->mj_JoinState = EXEC_MJ_SKIP_TEST; @@ -1131,7 +1131,7 @@ ExecMergeJoin(MergeJoinState *node) * which means that all subsequent outer tuples will be * larger than our marked inner tuples. So we need not * revisit any of the marked tuples but can proceed to - * look for a match to the current inner. If there's + * look for a match to the current inner. If there's * no more inners, no more matches are possible. * ---------------- */ @@ -1522,7 +1522,7 @@ ExecInitMergeJoin(MergeJoin *node, EState *estate, int eflags) * For certain types of inner child nodes, it is advantageous to issue * MARK every time we advance past an inner tuple we will never return to. * For other types, MARK on a tuple we cannot return to is a waste of - * cycles. Detect which case applies and set mj_ExtraMarks if we want to + * cycles. Detect which case applies and set mj_ExtraMarks if we want to * issue "unnecessary" MARK calls. * * Currently, only Material wants the extra MARKs, and it will be helpful diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index fca7a2581f3..8ac60477fb8 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -30,7 +30,7 @@ * * If the query specifies RETURNING, then the ModifyTable returns a * RETURNING tuple after completing each row insert, update, or delete. - * It must be called again to continue the operation. Without RETURNING, + * It must be called again to continue the operation. Without RETURNING, * we just loop within the node until all the work is done, then * return NULL. This avoids useless call/return overhead. */ @@ -419,7 +419,7 @@ ldelete:; * proceed. We don't want to discard the original DELETE * while keeping the triggered actions based on its deletion; * and it would be no better to allow the original DELETE - * while discarding updates that it triggered. The row update + * while discarding updates that it triggered. The row update * carries some information that might be important according * to business rules; so throwing an error is the only safe * course. @@ -491,7 +491,7 @@ ldelete:; { /* * We have to put the target tuple into a slot, which means first we - * gotta fetch it. We can use the trigger tuple slot. + * gotta fetch it. We can use the trigger tuple slot. */ TupleTableSlot *rslot; HeapTupleData deltuple; @@ -549,7 +549,7 @@ ldelete:; * note: we can't run UPDATE queries with transactions * off because UPDATEs are actually INSERTs and our * scan will mistakenly loop forever, updating the tuple - * it just inserted.. This should be fixed but until it + * it just inserted.. This should be fixed but until it * is, we don't want to get stuck in an infinite loop * which corrupts your database.. * @@ -657,7 +657,7 @@ ExecUpdate(ItemPointer tupleid, * * If we generate a new candidate tuple after EvalPlanQual testing, we * must loop back here and recheck constraints. (We don't need to - * redo triggers, however. If there are any BEFORE triggers then + * redo triggers, however. If there are any BEFORE triggers then * trigger.c will have done heap_lock_tuple to lock the correct tuple, * so there's no need to do them again.) */ @@ -900,7 +900,7 @@ ExecModifyTable(ModifyTableState *node) /* * es_result_relation_info must point to the currently active result - * relation while we are within this ModifyTable node. Even though + * relation while we are within this ModifyTable node. Even though * ModifyTable nodes can't be nested statically, they can be nested * dynamically (since our subplan could include a reference to a modifying * CTE). So we have to save and restore the caller's value. @@ -916,7 +916,7 @@ ExecModifyTable(ModifyTableState *node) for (;;) { /* - * Reset the per-output-tuple exprcontext. This is needed because + * Reset the per-output-tuple exprcontext. This is needed because * triggers expect to use that context as workspace. It's a bit ugly * to do this below the top level of the plan, however. We might need * to rethink this later. @@ -973,6 +973,7 @@ ExecModifyTable(ModifyTableState *node) * ctid!! */ tupleid = &tuple_ctid; } + /* * Use the wholerow attribute, when available, to reconstruct * the old relation tuple. @@ -1105,7 +1106,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * call ExecInitNode on each of the plans to be executed and save the * results into the array "mt_plans". This is also a convenient place to * verify that the proposed target relations are valid and open their - * indexes for insertion of new index entries. Note we *must* set + * indexes for insertion of new index entries. Note we *must* set * estate->es_result_relation_info correctly while we initialize each * sub-plan; ExecContextForcesOids depends on that! */ @@ -1125,7 +1126,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * If there are indices on the result relation, open them and save * descriptors in the result relation info, so that we can add new - * index entries for the tuples we add/update. We need not do this + * index entries for the tuples we add/update. We need not do this * for a DELETE, however, since deletion doesn't affect indexes. Also, * inside an EvalPlanQual operation, the indexes might be open * already, since we share the resultrel state with the original @@ -1175,6 +1176,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) WithCheckOption *wco = (WithCheckOption *) lfirst(ll); ExprState *wcoExpr = ExecInitExpr((Expr *) wco->qual, mtstate->mt_plans[i]); + wcoExprs = lappend(wcoExprs, wcoExpr); } @@ -1194,7 +1196,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * Initialize result tuple slot and assign its rowtype using the first - * RETURNING list. We assume the rest will look the same. + * RETURNING list. We assume the rest will look the same. */ tupDesc = ExecTypeFromTL((List *) linitial(node->returningLists), false); @@ -1240,7 +1242,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) /* * If we have any secondary relations in an UPDATE or DELETE, they need to * be treated like non-locked relations in SELECT FOR UPDATE, ie, the - * EvalPlanQual mechanism needs to be told about them. Locate the + * EvalPlanQual mechanism needs to be told about them. Locate the * relevant ExecRowMarks. */ foreach(l, node->rowMarks) @@ -1281,7 +1283,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags) * attribute present --- no need to look first. * * If there are multiple result relations, each one needs its own junk - * filter. Note multiple rels are only possible for UPDATE/DELETE, so we + * filter. Note multiple rels are only possible for UPDATE/DELETE, so we * can't be fooled by some needing a filter and some not. * * This section of code is also a convenient place to verify that the diff --git a/src/backend/executor/nodeRecursiveunion.c b/src/backend/executor/nodeRecursiveunion.c index 32c859c1a20..de3d87a5d67 100644 --- a/src/backend/executor/nodeRecursiveunion.c +++ b/src/backend/executor/nodeRecursiveunion.c @@ -316,7 +316,7 @@ ExecReScanRecursiveUnion(RecursiveUnionState *node) /* * if chgParam of subnode is not null then plan will be re-scanned by - * first ExecProcNode. Because of above, we only have to do this to the + * first ExecProcNode. Because of above, we only have to do this to the * non-recursive term. */ if (outerPlan->chgParam == NULL) diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c index 1f32c938489..75f6ed98837 100644 --- a/src/backend/executor/nodeSetOp.c +++ b/src/backend/executor/nodeSetOp.c @@ -5,7 +5,7 @@ * * The input of a SetOp node consists of tuples from two relations, * which have been combined into one dataset, with a junk attribute added - * that shows which relation each tuple came from. In SETOP_SORTED mode, + * that shows which relation each tuple came from. In SETOP_SORTED mode, * the input has furthermore been sorted according to all the grouping * columns (ie, all the non-junk attributes). The SetOp node scans each * group of identical tuples to determine how many came from each input @@ -18,7 +18,7 @@ * relation is the left-hand one for EXCEPT, and tries to make the smaller * input relation come first for INTERSECT. We build a hash table in memory * with one entry for each group of identical tuples, and count the number of - * tuples in the group from each relation. After seeing all the input, we + * tuples in the group from each relation. After seeing all the input, we * scan the hashtable and generate the correct output using those counts. * We can avoid making hashtable entries for any tuples appearing only in the * second input relation, since they cannot result in any output. @@ -268,7 +268,7 @@ setop_retrieve_direct(SetOpState *setopstate) /* * Store the copied first input tuple in the tuple table slot reserved - * for it. The tuple will be deleted when it is cleared from the + * for it. The tuple will be deleted when it is cleared from the * slot. */ ExecStoreTuple(setopstate->grp_firstTuple, diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 49d193bbae9..5d02d9420b1 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -261,12 +261,12 @@ ExecScanSubPlan(SubPlanState *node, * semantics for ANY_SUBLINK or AND semantics for ALL_SUBLINK. * (ROWCOMPARE_SUBLINK doesn't allow multiple tuples from the subplan.) * NULL results from the combining operators are handled according to the - * usual SQL semantics for OR and AND. The result for no input tuples is + * usual SQL semantics for OR and AND. The result for no input tuples is * FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for * ROWCOMPARE_SUBLINK. * * For EXPR_SUBLINK we require the subplan to produce no more than one - * tuple, else an error is raised. If zero tuples are produced, we return + * tuple, else an error is raised. If zero tuples are produced, we return * NULL. Assuming we get a tuple, we just use its first column (there can * be only one non-junk column in this case). * @@ -409,7 +409,7 @@ ExecScanSubPlan(SubPlanState *node, else if (!found) { /* - * deal with empty subplan result. result/isNull were previously + * deal with empty subplan result. result/isNull were previously * initialized correctly for all sublink types except EXPR and * ROWCOMPARE; for those, return NULL. */ @@ -894,7 +894,7 @@ ExecInitSubPlan(SubPlan *subplan, PlanState *parent) * * This is called from ExecEvalParamExec() when the value of a PARAM_EXEC * parameter is requested and the param's execPlan field is set (indicating - * that the param has not yet been evaluated). This allows lazy evaluation + * that the param has not yet been evaluated). This allows lazy evaluation * of initplans: we don't run the subplan until/unless we need its output. * Note that this routine MUST clear the execPlan fields of the plan's * output parameters after evaluating them! @@ -1122,7 +1122,7 @@ ExecInitAlternativeSubPlan(AlternativeSubPlan *asplan, PlanState *parent) /* * Select the one to be used. For this, we need an estimate of the number * of executions of the subplan. We use the number of output rows - * expected from the parent plan node. This is a good estimate if we are + * expected from the parent plan node. This is a good estimate if we are * in the parent's targetlist, and an underestimate (but probably not by * more than a factor of 2) if we are in the qual. */ diff --git a/src/backend/executor/nodeSubqueryscan.c b/src/backend/executor/nodeSubqueryscan.c index c69534da770..3d7cce2c9ea 100644 --- a/src/backend/executor/nodeSubqueryscan.c +++ b/src/backend/executor/nodeSubqueryscan.c @@ -194,7 +194,7 @@ ExecReScanSubqueryScan(SubqueryScanState *node) /* * ExecReScan doesn't know about my subplan, so I have to do - * changed-parameter signaling myself. This is just as well, because the + * changed-parameter signaling myself. This is just as well, because the * subplan has its own memory context in which its chgParam state lives. */ if (node->ss.ps.chgParam != NULL) diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index 597a26018ad..ab3ec9735f4 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -4,7 +4,7 @@ * Routines to handle unique'ing of queries where appropriate * * Unique is a very simple node type that just filters out duplicate - * tuples from a stream of sorted tuples from its subplan. It's essentially + * tuples from a stream of sorted tuples from its subplan. It's essentially * a dumbed-down form of Group: the duplicate-removal functionality is * identical. However, Unique doesn't do projection nor qual checking, * so it's marginally more efficient for cases where neither is needed. diff --git a/src/backend/executor/nodeValuesscan.c b/src/backend/executor/nodeValuesscan.c index 3016a6b072a..83b1324abc5 100644 --- a/src/backend/executor/nodeValuesscan.c +++ b/src/backend/executor/nodeValuesscan.c @@ -215,7 +215,7 @@ ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags) planstate = &scanstate->ss.ps; /* - * Create expression contexts. We need two, one for per-sublist + * Create expression contexts. We need two, one for per-sublist * processing and one for execScan.c to use for quals and projections. We * cheat a little by using ExecAssignExprContext() to build both. */ diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 40a925331c9..a0470d3eab2 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -4,7 +4,7 @@ * routines to handle WindowAgg nodes. * * A WindowAgg node evaluates "window functions" across suitable partitions - * of the input tuple set. Any one WindowAgg works for just a single window + * of the input tuple set. Any one WindowAgg works for just a single window * specification, though it can evaluate multiple window functions sharing * identical window specifications. The input tuples are required to be * delivered in sorted order, with the PARTITION BY columns (if any) as @@ -14,7 +14,7 @@ * * Since window functions can require access to any or all of the rows in * the current partition, we accumulate rows of the partition into a - * tuplestore. The window functions are called using the WindowObject API + * tuplestore. The window functions are called using the WindowObject API * so that they can access those rows as needed. * * We also support using plain aggregate functions as window functions. @@ -280,7 +280,7 @@ advance_windowaggregate(WindowAggState *winstate, { /* * For a strict transfn, nothing happens when there's a NULL input; we - * just keep the prior transValue. Note transValueCount doesn't + * just keep the prior transValue. Note transValueCount doesn't * change either. */ for (i = 1; i <= numArguments; i++) @@ -330,7 +330,7 @@ advance_windowaggregate(WindowAggState *winstate, } /* - * OK to call the transition function. Set winstate->curaggcontext while + * OK to call the transition function. Set winstate->curaggcontext while * calling it, for possible use by AggCheckCallContext. */ InitFunctionCallInfoData(*fcinfo, &(peraggstate->transfn), @@ -362,7 +362,7 @@ advance_windowaggregate(WindowAggState *winstate, /* * If pass-by-ref datatype, must copy the new value into aggcontext and - * pfree the prior transValue. But if transfn returned a pointer to its + * pfree the prior transValue. But if transfn returned a pointer to its * first input, we don't need to do anything. */ if (!peraggstate->transtypeByVal && @@ -485,7 +485,7 @@ advance_windowaggregate_base(WindowAggState *winstate, } /* - * OK to call the inverse transition function. Set + * OK to call the inverse transition function. Set * winstate->curaggcontext while calling it, for possible use by * AggCheckCallContext. */ @@ -513,7 +513,7 @@ advance_windowaggregate_base(WindowAggState *winstate, /* * If pass-by-ref datatype, must copy the new value into aggcontext and - * pfree the prior transValue. But if invtransfn returned a pointer to + * pfree the prior transValue. But if invtransfn returned a pointer to * its first input, we don't need to do anything. * * Note: the checks for null values here will never fire, but it seems @@ -827,7 +827,7 @@ eval_windowaggregates(WindowAggState *winstate) * * We assume that aggregates using the shared context always restart if * *any* aggregate restarts, and we may thus clean up the shared - * aggcontext if that is the case. Private aggcontexts are reset by + * aggcontext if that is the case. Private aggcontexts are reset by * initialize_windowaggregate() if their owning aggregate restarts. If we * aren't restarting an aggregate, we need to free any previously saved * result for it, else we'll leak memory. @@ -864,9 +864,9 @@ eval_windowaggregates(WindowAggState *winstate) * (i.e., frameheadpos) and aggregatedupto, while restarted aggregates * contain no rows. If there are any restarted aggregates, we must thus * begin aggregating anew at frameheadpos, otherwise we may simply - * continue at aggregatedupto. We must remember the old value of + * continue at aggregatedupto. We must remember the old value of * aggregatedupto to know how long to skip advancing non-restarted - * aggregates. If we modify aggregatedupto, we must also clear + * aggregates. If we modify aggregatedupto, we must also clear * agg_row_slot, per the loop invariant below. */ aggregatedupto_nonrestarted = winstate->aggregatedupto; @@ -881,7 +881,7 @@ eval_windowaggregates(WindowAggState *winstate) * Advance until we reach a row not in frame (or end of partition). * * Note the loop invariant: agg_row_slot is either empty or holds the row - * at position aggregatedupto. We advance aggregatedupto after processing + * at position aggregatedupto. We advance aggregatedupto after processing * a row. */ for (;;) @@ -1142,7 +1142,7 @@ spool_tuples(WindowAggState *winstate, int64 pos) /* * If the tuplestore has spilled to disk, alternate reading and writing - * becomes quite expensive due to frequent buffer flushes. It's cheaper + * becomes quite expensive due to frequent buffer flushes. It's cheaper * to force the entire partition to get spooled in one go. * * XXX this is a horrid kluge --- it'd be better to fix the performance @@ -1239,7 +1239,7 @@ release_partition(WindowAggState *winstate) * to our window framing rule * * The caller must have already determined that the row is in the partition - * and fetched it into a slot. This function just encapsulates the framing + * and fetched it into a slot. This function just encapsulates the framing * rules. */ static bool @@ -1341,7 +1341,7 @@ row_is_in_frame(WindowAggState *winstate, int64 pos, TupleTableSlot *slot) * * Uses the winobj's read pointer for any required fetches; hence, if the * frame mode is one that requires row comparisons, the winobj's mark must - * not be past the currently known frame head. Also uses the specified slot + * not be past the currently known frame head. Also uses the specified slot * for any required fetches. */ static void @@ -1446,7 +1446,7 @@ update_frameheadpos(WindowObject winobj, TupleTableSlot *slot) * * Uses the winobj's read pointer for any required fetches; hence, if the * frame mode is one that requires row comparisons, the winobj's mark must - * not be past the currently known frame tail. Also uses the specified slot + * not be past the currently known frame tail. Also uses the specified slot * for any required fetches. */ static void @@ -1789,8 +1789,8 @@ ExecInitWindowAgg(WindowAgg *node, EState *estate, int eflags) winstate->ss.ps.state = estate; /* - * Create expression contexts. We need two, one for per-input-tuple - * processing and one for per-output-tuple processing. We cheat a little + * Create expression contexts. We need two, one for per-input-tuple + * processing and one for per-output-tuple processing. We cheat a little * by using ExecAssignExprContext() to build both. */ ExecAssignExprContext(estate, &winstate->ss.ps); @@ -2288,7 +2288,7 @@ initialize_peragg(WindowAggState *winstate, WindowFunc *wfunc, /* * Insist that forward and inverse transition functions have the same - * strictness setting. Allowing them to differ would require handling + * strictness setting. Allowing them to differ would require handling * more special cases in advance_windowaggregate and * advance_windowaggregate_base, for no discernible benefit. This should * have been checked at agg definition time, but we must check again in @@ -2467,7 +2467,7 @@ window_gettupleslot(WindowObject winobj, int64 pos, TupleTableSlot *slot) * requested amount of space. Subsequent calls just return the same chunk. * * Memory obtained this way is normally used to hold state that should be - * automatically reset for each new partition. If a window function wants + * automatically reset for each new partition. If a window function wants * to hold state across the whole query, fcinfo->fn_extra can be used in the * usual way for that. */ diff --git a/src/backend/executor/nodeWorktablescan.c b/src/backend/executor/nodeWorktablescan.c index 2138ce78cf2..94ecf754fb1 100644 --- a/src/backend/executor/nodeWorktablescan.c +++ b/src/backend/executor/nodeWorktablescan.c @@ -82,7 +82,7 @@ ExecWorkTableScan(WorkTableScanState *node) { /* * On the first call, find the ancestor RecursiveUnion's state via the - * Param slot reserved for it. (We can't do this during node init because + * Param slot reserved for it. (We can't do this during node init because * there are corner cases where we'll get the init call before the * RecursiveUnion does.) */ diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index e0325c4a7de..7ba1fd90663 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -256,7 +256,7 @@ AtEOSubXact_SPI(bool isCommit, SubTransactionId mySubid) } /* - * Pop the stack entry and reset global variables. Unlike + * Pop the stack entry and reset global variables. Unlike * SPI_finish(), we don't risk switching to memory contexts that might * be already gone. */ @@ -1306,7 +1306,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan, } /* - * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the + * Disallow SCROLL with SELECT FOR UPDATE. This is not redundant with the * check in transformDeclareCursorStmt because the cursor options might * not have come through there. */ @@ -1560,7 +1560,7 @@ SPI_plan_is_valid(SPIPlanPtr plan) /* * SPI_result_code_string --- convert any SPI return code to a string * - * This is often useful in error messages. Most callers will probably + * This is often useful in error messages. Most callers will probably * only pass negative (error-case) codes, but for generality we recognize * the success codes too. */ @@ -1630,7 +1630,7 @@ SPI_result_code_string(int code) * CachedPlanSources. * * This is exported so that pl/pgsql can use it (this beats letting pl/pgsql - * look directly into the SPIPlan for itself). It's not documented in + * look directly into the SPIPlan for itself). It's not documented in * spi.sgml because we'd just as soon not have too many places using this. */ List * @@ -1646,7 +1646,7 @@ SPI_plan_get_plan_sources(SPIPlanPtr plan) * return NULL. Caller is responsible for doing ReleaseCachedPlan(). * * This is exported so that pl/pgsql can use it (this beats letting pl/pgsql - * look directly into the SPIPlan for itself). It's not documented in + * look directly into the SPIPlan for itself). It's not documented in * spi.sgml because we'd just as soon not have too many places using this. */ CachedPlan * @@ -2204,7 +2204,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, /* * The last canSetTag query sets the status values returned to the - * caller. Be careful to free any tuptables not returned, to + * caller. Be careful to free any tuptables not returned, to * avoid intratransaction memory leak. */ if (canSetTag) diff --git a/src/backend/executor/tstoreReceiver.c b/src/backend/executor/tstoreReceiver.c index 23f11360c3a..c15c99a1f4e 100644 --- a/src/backend/executor/tstoreReceiver.c +++ b/src/backend/executor/tstoreReceiver.c @@ -5,7 +5,7 @@ * a Tuplestore. * * Optionally, we can force detoasting (but not decompression) of out-of-line - * toasted values. This is to support cursors WITH HOLD, which must retain + * toasted values. This is to support cursors WITH HOLD, which must retain * data even if the underlying table is dropped. * * diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c index 9b9ba0a22e0..7d0309079da 100644 --- a/src/backend/lib/stringinfo.c +++ b/src/backend/lib/stringinfo.c @@ -99,7 +99,7 @@ appendStringInfo(StringInfo str, const char *fmt,...) * appendStringInfoVA * * Attempt to format text data under the control of fmt (an sprintf-style - * format string) and append it to whatever is already in str. If successful + * format string) and append it to whatever is already in str. If successful * return zero; if not (because there's not enough space), return an estimate * of the space needed, without modifying str. Typically the caller should * pass the return value to enlargeStringInfo() before trying again; see @@ -247,7 +247,7 @@ enlargeStringInfo(StringInfo str, int needed) int newlen; /* - * Guard against out-of-range "needed" values. Without this, we can get + * Guard against out-of-range "needed" values. Without this, we can get * an overflow or infinite loop in the following. */ if (needed < 0) /* should not happen */ diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c index 8fa9aa797f4..70b0b939823 100644 --- a/src/backend/libpq/auth.c +++ b/src/backend/libpq/auth.c @@ -49,7 +49,7 @@ static int recv_and_check_password_packet(Port *port, char **logdetail); /* Max size of username ident server can return */ #define IDENT_USERNAME_MAX 512 -/* Standard TCP port number for Ident service. Assigned by IANA */ +/* Standard TCP port number for Ident service. Assigned by IANA */ #define IDENT_PORT 113 static int ident_inet(hbaPort *port); @@ -677,7 +677,7 @@ recv_password_packet(Port *port) (errmsg("received password packet"))); /* - * Return the received string. Note we do not attempt to do any + * Return the received string. Note we do not attempt to do any * character-set conversion on it; since we don't yet know the client's * encoding, there wouldn't be much point. */ @@ -1387,7 +1387,7 @@ interpret_ident_response(const char *ident_response, /* * Talk to the ident server on host "remote_ip_addr" and find out who * owns the tcp connection from his port "remote_port" to port - * "local_port_addr" on host "local_ip_addr". Return the user name the + * "local_port_addr" on host "local_ip_addr". Return the user name the * ident server gives as "*ident_user". * * IP addresses and port numbers are in network byte order. @@ -1591,7 +1591,7 @@ auth_peer(hbaPort *port) { ereport(LOG, (errmsg("failed to look up local user id %ld: %s", - (long) uid, errno ? strerror(errno) : _("user does not exist")))); + (long) uid, errno ? strerror(errno) : _("user does not exist")))); return STATUS_ERROR; } @@ -2006,8 +2006,8 @@ CheckLDAPAuth(Port *port) attributes[1] = NULL; filter = psprintf("(%s=%s)", - attributes[0], - port->user_name); + attributes[0], + port->user_name); r = ldap_search_s(ldap, port->hba->ldapbasedn, @@ -2095,9 +2095,9 @@ CheckLDAPAuth(Port *port) } else fulluser = psprintf("%s%s%s", - port->hba->ldapprefix ? port->hba->ldapprefix : "", - port->user_name, - port->hba->ldapsuffix ? port->hba->ldapsuffix : ""); + port->hba->ldapprefix ? port->hba->ldapprefix : "", + port->user_name, + port->hba->ldapsuffix ? port->hba->ldapsuffix : ""); r = ldap_simple_bind_s(ldap, fulluser, passwd); ldap_unbind(ldap); diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c index 827d4c58886..4a6bcf5598f 100644 --- a/src/backend/libpq/be-fsstubs.c +++ b/src/backend/libpq/be-fsstubs.c @@ -768,7 +768,7 @@ lo_get_fragment_internal(Oid loOid, int64 offset, int32 nbytes) LargeObjectDesc *loDesc; int64 loSize; int64 result_length; - int total_read PG_USED_FOR_ASSERTS_ONLY; + int total_read PG_USED_FOR_ASSERTS_ONLY; bytea *result = NULL; /* @@ -870,7 +870,7 @@ lo_create_bytea(PG_FUNCTION_ARGS) Oid loOid = PG_GETARG_OID(0); bytea *str = PG_GETARG_BYTEA_PP(1); LargeObjectDesc *loDesc; - int written PG_USED_FOR_ASSERTS_ONLY; + int written PG_USED_FOR_ASSERTS_ONLY; CreateFSContext(); @@ -893,7 +893,7 @@ lo_put(PG_FUNCTION_ARGS) int64 offset = PG_GETARG_INT64(1); bytea *str = PG_GETARG_BYTEA_PP(2); LargeObjectDesc *loDesc; - int written PG_USED_FOR_ASSERTS_ONLY; + int written PG_USED_FOR_ASSERTS_ONLY; CreateFSContext(); diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c index 56ad6ab4247..59204cfe801 100644 --- a/src/backend/libpq/be-secure.c +++ b/src/backend/libpq/be-secure.c @@ -30,13 +30,13 @@ * impersonations. * * Another benefit of EDH is that it allows the backend and - * clients to use DSA keys. DSA keys can only provide digital + * clients to use DSA keys. DSA keys can only provide digital * signatures, not encryption, and are often acceptable in * jurisdictions where RSA keys are unacceptable. * * The downside to EDH is that it makes it impossible to * use ssldump(1) if there's a problem establishing an SSL - * session. In this case you'll need to temporarily disable + * session. In this case you'll need to temporarily disable * EDH by commenting out the callback. * * ... @@ -119,7 +119,7 @@ char *SSLCipherSuites = NULL; char *SSLECDHCurve; /* GUC variable: if false, prefer client ciphers */ -bool SSLPreferServerCiphers; +bool SSLPreferServerCiphers; /* ------------------------------------------------------------ */ /* Hardcoded values */ diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index b070bfeda35..fd98c60ddb0 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -1758,7 +1758,7 @@ check_hba(hbaPort *port) * Read the config file and create a List of HbaLine records for the contents. * * The configuration is read into a temporary list, and if any parse error - * occurs the old list is kept in place and false is returned. Only if the + * occurs the old list is kept in place and false is returned. Only if the * whole file parses OK is the list replaced, and the function returns true. * * On a false result, caller will take care of reporting a FATAL error in case @@ -2244,7 +2244,7 @@ load_ident(void) /* * Determine what authentication method should be used when accessing database - * "database" from frontend "raddr", user "user". Return the method and + * "database" from frontend "raddr", user "user". Return the method and * an optional argument (stored in fields of *port), and STATUS_OK. * * If the file does not contain any entry matching the request, we return diff --git a/src/backend/libpq/md5.c b/src/backend/libpq/md5.c index e2c929fb526..90bc113681c 100644 --- a/src/backend/libpq/md5.c +++ b/src/backend/libpq/md5.c @@ -2,7 +2,7 @@ * md5.c * * Implements the MD5 Message-Digest Algorithm as specified in - * RFC 1321. This implementation is a simple one, in that it + * RFC 1321. This implementation is a simple one, in that it * needs every input byte to be buffered before doing any * calculations. I do not expect this file to be used for * general purpose MD5'ing of large amounts of data, only for diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index 0179451f080..605d8913b16 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -447,7 +447,7 @@ StreamServerPort(int family, char *hostName, unsigned short portNumber, /* * Note: This might fail on some OS's, like Linux older than * 2.4.21-pre3, that don't have the IPV6_V6ONLY socket option, and map - * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4 + * ipv4 addresses to ipv6. It will show ::ffff:ipv4 for all ipv4 * connections. */ err = bind(fd, addr->ai_addr, addr->ai_addrlen); @@ -692,6 +692,7 @@ StreamConnection(pgsocket server_fd, Port *port) } #ifdef WIN32 + /* * This is a Win32 socket optimization. The ideal size is 32k. * http://support.microsoft.com/kb/823764/EN-US/ @@ -1126,7 +1127,7 @@ pq_getmessage(StringInfo s, int maxlen) if (len > 0) { /* - * Allocate space for message. If we run out of room (ridiculously + * Allocate space for message. If we run out of room (ridiculously * large message), we will elog(ERROR), but we want to discard the * message body so as not to lose communication sync. */ diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c index ba9598a8c63..dfe3a646a1b 100644 --- a/src/backend/libpq/pqformat.c +++ b/src/backend/libpq/pqformat.c @@ -120,7 +120,7 @@ pq_sendbytes(StringInfo buf, const char *data, int datalen) * pq_sendcountedtext - append a counted text string (with character set conversion) * * The data sent to the frontend by this routine is a 4-byte count field - * followed by the string. The count includes itself or not, as per the + * followed by the string. The count includes itself or not, as per the * countincludesself flag (pre-3.0 protocol requires it to include itself). * The passed text string need not be null-terminated, and the data sent * to the frontend isn't either. diff --git a/src/backend/main/main.c b/src/backend/main/main.c index 1b9cbd1de36..4a563741e91 100644 --- a/src/backend/main/main.c +++ b/src/backend/main/main.c @@ -69,7 +69,7 @@ main(int argc, char *argv[]) /* * Remember the physical location of the initially given argv[] array for - * possible use by ps display. On some platforms, the argv[] storage must + * possible use by ps display. On some platforms, the argv[] storage must * be overwritten in order to set the process title for ps. In such cases * save_ps_display_args makes and returns a new copy of the argv[] array. * @@ -98,10 +98,10 @@ main(int argc, char *argv[]) MemoryContextInit(); /* - * Set up locale information from environment. Note that LC_CTYPE and + * Set up locale information from environment. Note that LC_CTYPE and * LC_COLLATE will be overridden later from pg_control if we are in an * already-initialized database. We set them here so that they will be - * available to fill pg_control during initdb. LC_MESSAGES will get set + * available to fill pg_control during initdb. LC_MESSAGES will get set * later during GUC option processing, but we set it here to allow startup * error messages to be localized. */ @@ -109,6 +109,7 @@ main(int argc, char *argv[]) set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("postgres")); #ifdef WIN32 + /* * Windows uses codepages rather than the environment, so we work around * that by querying the environment explicitly first for LC_COLLATE and @@ -202,6 +203,7 @@ main(int argc, char *argv[]) #endif #ifdef WIN32 + /* * Start our win32 signal implementation * @@ -227,9 +229,9 @@ main(int argc, char *argv[]) /* - * Place platform-specific startup hacks here. This is the right + * Place platform-specific startup hacks here. This is the right * place to put code that must be executed early in the launch of any new - * server process. Note that this code will NOT be executed when a backend + * server process. Note that this code will NOT be executed when a backend * or sub-bootstrap process is forked, unless we are in a fork/exec * environment (ie EXEC_BACKEND is defined). * diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c index 3a6d0fb236b..c927b7891f5 100644 --- a/src/backend/nodes/bitmapset.c +++ b/src/backend/nodes/bitmapset.c @@ -38,7 +38,7 @@ * where x's are unspecified bits. The two's complement negative is formed * by inverting all the bits and adding one. Inversion gives * yyyyyy01111 - * where each y is the inverse of the corresponding x. Incrementing gives + * where each y is the inverse of the corresponding x. Incrementing gives * yyyyyy10000 * and then ANDing with the original value gives * 00000010000 @@ -796,7 +796,7 @@ bms_join(Bitmapset *a, Bitmapset *b) /*---------- * bms_first_member - find and remove first member of a set * - * Returns -1 if set is empty. NB: set is destructively modified! + * Returns -1 if set is empty. NB: set is destructively modified! * * This is intended as support for iterating through the members of a set. * The typical pattern is diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 98ad91078ed..43530aa24a8 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -4,7 +4,7 @@ * Copy functions for Postgres tree nodes. * * NOTE: we currently support copying all node types found in parse and - * plan trees. We do not support copying executor state trees; there + * plan trees. We do not support copying executor state trees; there * is no need for that, and no point in maintaining all the code that * would be needed. We also do not support copying Path trees, mainly * because the circular linkages between RelOptInfo and Path nodes can't @@ -30,7 +30,7 @@ /* * Macros to simplify copying of different kinds of fields. Use these - * wherever possible to reduce the chance for silly typos. Note that these + * wherever possible to reduce the chance for silly typos. Note that these * hard-wire the convention that the local variables in a Copy routine are * named 'newnode' and 'from'. */ @@ -1038,7 +1038,7 @@ _copyIntoClause(const IntoClause *from) /* * We don't need a _copyExpr because Expr is an abstract supertype which - * should never actually get instantiated. Also, since it has no common + * should never actually get instantiated. Also, since it has no common * fields except NodeTag, there's no need for a helper routine to factor * out copying the common fields... */ @@ -3300,7 +3300,7 @@ _copyReplicaIdentityStmt(const ReplicaIdentityStmt *from) } static AlterSystemStmt * -_copyAlterSystemStmt(const AlterSystemStmt * from) +_copyAlterSystemStmt(const AlterSystemStmt *from) { AlterSystemStmt *newnode = makeNode(AlterSystemStmt); diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index 9901d231cdb..2407cb73a38 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -11,7 +11,7 @@ * be handled easily in a simple depth-first traversal. * * Currently, in fact, equal() doesn't know how to compare Plan trees - * either. This might need to be fixed someday. + * either. This might need to be fixed someday. * * NOTE: it is intentional that parse location fields (in nodes that have * one) are not compared. This is because we want, for example, a variable @@ -34,8 +34,8 @@ /* - * Macros to simplify comparison of different kinds of fields. Use these - * wherever possible to reduce the chance for silly typos. Note that these + * Macros to simplify comparison of different kinds of fields. Use these + * wherever possible to reduce the chance for silly typos. Note that these * hard-wire the convention that the local variables in an Equal routine are * named 'a' and 'b'. */ @@ -131,7 +131,7 @@ _equalIntoClause(const IntoClause *a, const IntoClause *b) /* * We don't need an _equalExpr because Expr is an abstract supertype which - * should never actually get instantiated. Also, since it has no common + * should never actually get instantiated. Also, since it has no common * fields except NodeTag, there's no need for a helper routine to factor * out comparing the common fields... */ @@ -764,9 +764,9 @@ static bool _equalPlaceHolderVar(const PlaceHolderVar *a, const PlaceHolderVar *b) { /* - * We intentionally do not compare phexpr. Two PlaceHolderVars with the + * We intentionally do not compare phexpr. Two PlaceHolderVars with the * same ID and levelsup should be considered equal even if the contained - * expressions have managed to mutate to different states. This will + * expressions have managed to mutate to different states. This will * happen during final plan construction when there are nested PHVs, since * the inner PHV will get replaced by a Param in some copies of the outer * PHV. Another way in which it can happen is that initplan sublinks @@ -1551,7 +1551,7 @@ _equalReplicaIdentityStmt(const ReplicaIdentityStmt *a, const ReplicaIdentityStm } static bool -_equalAlterSystemStmt(const AlterSystemStmt * a, const AlterSystemStmt * b) +_equalAlterSystemStmt(const AlterSystemStmt *a, const AlterSystemStmt *b) { COMPARE_NODE_FIELD(setstmt); diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c index aebc5b60c29..f32124bedff 100644 --- a/src/backend/nodes/list.c +++ b/src/backend/nodes/list.c @@ -796,7 +796,7 @@ list_union_oid(const List *list1, const List *list2) * "intersection" if list1 is known unique beforehand. * * This variant works on lists of pointers, and determines list - * membership via equal(). Note that the list1 member will be pointed + * membership via equal(). Note that the list1 member will be pointed * to in the result. */ List * @@ -988,7 +988,7 @@ list_append_unique_oid(List *list, Oid datum) * via equal(). * * This is almost the same functionality as list_union(), but list1 is - * modified in-place rather than being copied. Note also that list2's cells + * modified in-place rather than being copied. Note also that list2's cells * are not inserted in list1, so the analogy to list_concat() isn't perfect. */ List * diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c index 664670d82a8..da59c580b0e 100644 --- a/src/backend/nodes/makefuncs.c +++ b/src/backend/nodes/makefuncs.c @@ -535,7 +535,7 @@ makeDefElemExtended(char *nameSpace, char *name, Node *arg, * makeFuncCall - * * Initialize a FuncCall struct with the information every caller must - * supply. Any non-default parameters have to be inserted by the caller. + * supply. Any non-default parameters have to be inserted by the caller. */ FuncCall * makeFuncCall(List *name, List *args, int location) diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index 1e48a7f8890..5a98bfbc11e 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -239,7 +239,7 @@ exprType(const Node *expr) /* * exprTypmod - * returns the type-specific modifier of the expression's result type, - * if it can be determined. In many cases, it can't and we return -1. + * if it can be determined. In many cases, it can't and we return -1. */ int32 exprTypmod(const Node *expr) @@ -1543,8 +1543,8 @@ leftmostLoc(int loc1, int loc2) * * The walker routine should return "false" to continue the tree walk, or * "true" to abort the walk and immediately return "true" to the top-level - * caller. This can be used to short-circuit the traversal if the walker - * has found what it came for. "false" is returned to the top-level caller + * caller. This can be used to short-circuit the traversal if the walker + * has found what it came for. "false" is returned to the top-level caller * iff no invocation of the walker returned "true". * * The node types handled by expression_tree_walker include all those @@ -1582,7 +1582,7 @@ leftmostLoc(int loc1, int loc2) * * expression_tree_walker will handle SubPlan nodes by recursing normally * into the "testexpr" and the "args" list (which are expressions belonging to - * the outer plan). It will not touch the completed subplan, however. Since + * the outer plan). It will not touch the completed subplan, however. Since * there is no link to the original Query, it is not possible to recurse into * subselects of an already-planned expression tree. This is OK for current * uses, but may need to be revisited in future. @@ -2154,8 +2154,8 @@ expression_tree_mutator(Node *node, return (Node *) copyObject(node); case T_WithCheckOption: { - WithCheckOption *wco = (WithCheckOption *) node; - WithCheckOption *newnode; + WithCheckOption *wco = (WithCheckOption *) node; + WithCheckOption *newnode; FLATCOPY(newnode, wco, WithCheckOption); MUTATE(newnode->qual, wco->qual, Node *); @@ -2658,7 +2658,7 @@ expression_tree_mutator(Node *node, * This routine exists just to reduce the number of places that need to know * where all the expression subtrees of a Query are. Note it can be used * for starting a walk at top level of a Query regardless of whether the - * mutator intends to descend into subqueries. It is also useful for + * mutator intends to descend into subqueries. It is also useful for * descending into subqueries within a mutator. * * Some callers want to suppress mutating of certain items in the Query, @@ -2668,7 +2668,7 @@ expression_tree_mutator(Node *node, * indicated items. (More flag bits may be added as needed.) * * Normally the Query node itself is copied, but some callers want it to be - * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All + * modified in-place; they must pass QTW_DONT_COPY_QUERY in flags. All * modified substructure is safely copied in any case. */ Query * diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 10e81391b13..11c74860070 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -13,7 +13,7 @@ * NOTES * Every node type that can appear in stored rules' parsetrees *must* * have an output function defined here (as well as an input function - * in readfuncs.c). For use in debugging, we also provide output + * in readfuncs.c). For use in debugging, we also provide output * functions for nodes that appear in raw parsetrees, path, and plan trees. * These nodes however need not have input functions. * @@ -30,8 +30,8 @@ /* - * Macros to simplify output of different kinds of fields. Use these - * wherever possible to reduce the chance for silly typos. Note that these + * Macros to simplify output of different kinds of fields. Use these + * wherever possible to reduce the chance for silly typos. Note that these * hard-wire conventions about the names of the local variables in an Out * routine. */ diff --git a/src/backend/nodes/params.c b/src/backend/nodes/params.c index 3916412dd16..b21d651f95b 100644 --- a/src/backend/nodes/params.c +++ b/src/backend/nodes/params.c @@ -27,7 +27,7 @@ * * Note: the intent of this function is to make a static, self-contained * set of parameter values. If dynamic parameter hooks are present, we - * intentionally do not copy them into the result. Rather, we forcibly + * intentionally do not copy them into the result. Rather, we forcibly * instantiate all available parameter values and copy the datum values. */ ParamListInfo diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c index 7a88a0d46a3..2c0edff6c1b 100644 --- a/src/backend/nodes/read.c +++ b/src/backend/nodes/read.c @@ -85,21 +85,21 @@ stringToNode(char *str) * Backslashes themselves must also be backslashed for consistency. * Any other character can be, but need not be, backslashed as well. * * If the resulting token is '<>' (with no backslash), it is returned - * as a non-NULL pointer to the token but with length == 0. Note that + * as a non-NULL pointer to the token but with length == 0. Note that * there is no other way to get a zero-length token. * * Returns a pointer to the start of the next token, and the length of the - * token (including any embedded backslashes!) in *length. If there are + * token (including any embedded backslashes!) in *length. If there are * no more tokens, NULL and 0 are returned. * * NOTE: this routine doesn't remove backslashes; the caller must do so * if necessary (see "debackslash"). * * NOTE: prior to release 7.0, this routine also had a special case to treat - * a token starting with '"' as extending to the next '"'. This code was + * a token starting with '"' as extending to the next '"'. This code was * broken, however, since it would fail to cope with a string containing an * embedded '"'. I have therefore removed this special case, and instead - * introduced rules for using backslashes to quote characters. Higher-level + * introduced rules for using backslashes to quote characters. Higher-level * code should add backslashes to a string constant to ensure it is treated * as a single token. */ @@ -259,7 +259,7 @@ nodeTokenType(char *token, int length) * Slightly higher-level reader. * * This routine applies some semantic knowledge on top of the purely - * lexical tokenizer pg_strtok(). It can read + * lexical tokenizer pg_strtok(). It can read * * Value token nodes (integers, floats, or strings); * * General nodes (via parseNodeString() from readfuncs.c); * * Lists of the above; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index ef1eae91bf7..1ec4f3c6956 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -12,7 +12,7 @@ * * NOTES * Path and Plan nodes do not have any readfuncs support, because we - * never have occasion to read them in. (There was once code here that + * never have occasion to read them in. (There was once code here that * claimed to read them, but it was broken as well as unused.) We * never read executor state trees, either. * @@ -34,7 +34,7 @@ /* * Macros to simplify reading of different kinds of fields. Use these - * wherever possible to reduce the chance for silly typos. Note that these + * wherever possible to reduce the chance for silly typos. Note that these * hard-wire conventions about the names of the local variables in a Read * routine. */ @@ -130,7 +130,7 @@ /* * NOTE: use atoi() to read values written with %d, or atoui() to read * values written with %u in outfuncs.c. An exception is OID values, - * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u, + * for which use atooid(). (As of 7.1, outfuncs.c writes OIDs as %u, * but this will probably change in the future.) */ #define atoui(x) ((unsigned int) strtoul((x), NULL, 10)) @@ -601,7 +601,7 @@ _readOpExpr(void) /* * The opfuncid is stored in the textual format primarily for debugging * and documentation reasons. We want to always read it as zero to force - * it to be re-looked-up in the pg_operator entry. This ensures that + * it to be re-looked-up in the pg_operator entry. This ensures that * stored rules don't have hidden dependencies on operators' functions. * (We don't currently support an ALTER OPERATOR command, but might * someday.) @@ -632,7 +632,7 @@ _readDistinctExpr(void) /* * The opfuncid is stored in the textual format primarily for debugging * and documentation reasons. We want to always read it as zero to force - * it to be re-looked-up in the pg_operator entry. This ensures that + * it to be re-looked-up in the pg_operator entry. This ensures that * stored rules don't have hidden dependencies on operators' functions. * (We don't currently support an ALTER OPERATOR command, but might * someday.) @@ -663,7 +663,7 @@ _readNullIfExpr(void) /* * The opfuncid is stored in the textual format primarily for debugging * and documentation reasons. We want to always read it as zero to force - * it to be re-looked-up in the pg_operator entry. This ensures that + * it to be re-looked-up in the pg_operator entry. This ensures that * stored rules don't have hidden dependencies on operators' functions. * (We don't currently support an ALTER OPERATOR command, but might * someday.) @@ -694,7 +694,7 @@ _readScalarArrayOpExpr(void) /* * The opfuncid is stored in the textual format primarily for debugging * and documentation reasons. We want to always read it as zero to force - * it to be re-looked-up in the pg_operator entry. This ensures that + * it to be re-looked-up in the pg_operator entry. This ensures that * stored rules don't have hidden dependencies on operators' functions. * (We don't currently support an ALTER OPERATOR command, but might * someday.) diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c index df3ae93b1dc..a880c81cf1c 100644 --- a/src/backend/nodes/tidbitmap.c +++ b/src/backend/nodes/tidbitmap.c @@ -19,7 +19,7 @@ * of lossiness. In theory we could fall back to page ranges at some * point, but for now that seems useless complexity. * - * We also support the notion of candidate matches, or rechecking. This + * We also support the notion of candidate matches, or rechecking. This * means we know that a search need visit only some tuples on a page, * but we are not certain that all of those tuples are real matches. * So the eventual heap scan must recheck the quals for these tuples only, @@ -48,7 +48,7 @@ /* * The maximum number of tuples per page is not large (typically 256 with * 8K pages, or 1024 with 32K pages). So there's not much point in making - * the per-page bitmaps variable size. We just legislate that the size + * the per-page bitmaps variable size. We just legislate that the size * is this: */ #define MAX_TUPLES_PER_PAGE MaxHeapTuplesPerPage @@ -61,10 +61,10 @@ * for that page in the page table. * * We actually store both exact pages and lossy chunks in the same hash - * table, using identical data structures. (This is because dynahash.c's + * table, using identical data structures. (This is because dynahash.c's * memory management doesn't allow space to be transferred easily from one * hashtable to another.) Therefore it's best if PAGES_PER_CHUNK is the - * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we + * same as MAX_TUPLES_PER_PAGE, or at least not too different. But we * also want PAGES_PER_CHUNK to be a power of 2 to avoid expensive integer * remainder operations. So, define it like this: */ @@ -142,7 +142,7 @@ struct TIDBitmap /* * When iterating over a bitmap in sorted order, a TBMIterator is used to - * track our progress. There can be several iterators scanning the same + * track our progress. There can be several iterators scanning the same * bitmap concurrently. Note that the bitmap becomes read-only as soon as * any iterator is created. */ @@ -790,7 +790,7 @@ tbm_find_pageentry(const TIDBitmap *tbm, BlockNumber pageno) * * If new, the entry is marked as an exact (non-chunk) entry. * - * This may cause the table to exceed the desired memory size. It is + * This may cause the table to exceed the desired memory size. It is * up to the caller to call tbm_lossify() at the next safe point if so. */ static PagetableEntry * @@ -870,7 +870,7 @@ tbm_page_is_lossy(const TIDBitmap *tbm, BlockNumber pageno) /* * tbm_mark_page_lossy - mark the page number as lossily stored * - * This may cause the table to exceed the desired memory size. It is + * This may cause the table to exceed the desired memory size. It is * up to the caller to call tbm_lossify() at the next safe point if so. */ static void @@ -891,7 +891,7 @@ tbm_mark_page_lossy(TIDBitmap *tbm, BlockNumber pageno) chunk_pageno = pageno - bitno; /* - * Remove any extant non-lossy entry for the page. If the page is its own + * Remove any extant non-lossy entry for the page. If the page is its own * chunk header, however, we skip this and handle the case below. */ if (bitno != 0) @@ -956,7 +956,7 @@ tbm_lossify(TIDBitmap *tbm) * * Since we are called as soon as nentries exceeds maxentries, we should * push nentries down to significantly less than maxentries, or else we'll - * just end up doing this again very soon. We shoot for maxentries/2. + * just end up doing this again very soon. We shoot for maxentries/2. */ Assert(!tbm->iterating); Assert(tbm->status == TBM_HASH); diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c index 6ceb090e855..de2a6709dd3 100644 --- a/src/backend/optimizer/geqo/geqo_eval.c +++ b/src/backend/optimizer/geqo/geqo_eval.c @@ -82,11 +82,11 @@ geqo_eval(PlannerInfo *root, Gene *tour, int num_gene) * not already contain some entries. The newly added entries will be * recycled by the MemoryContextDelete below, so we must ensure that the * list is restored to its former state before exiting. We can do this by - * truncating the list to its original length. NOTE this assumes that any + * truncating the list to its original length. NOTE this assumes that any * added entries are appended at the end! * * We also must take care not to mess up the outer join_rel_hash, if there - * is one. We can do this by just temporarily setting the link to NULL. + * is one. We can do this by just temporarily setting the link to NULL. * (If we are dealing with enough join rels, which we very likely are, a * new hash table will get built and used locally.) * @@ -217,7 +217,7 @@ gimme_tree(PlannerInfo *root, Gene *tour, int num_gene) * Merge a "clump" into the list of existing clumps for gimme_tree. * * We try to merge the clump into some existing clump, and repeat if - * successful. When no more merging is possible, insert the clump + * successful. When no more merging is possible, insert the clump * into the list, preserving the list ordering rule (namely, that * clumps of larger size appear earlier). * @@ -268,7 +268,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force) /* * Recursively try to merge the enlarged old_clump with - * others. When no further merge is possible, we'll reinsert + * others. When no further merge is possible, we'll reinsert * it into the list. */ return merge_clump(root, clumps, old_clump, force); @@ -279,7 +279,7 @@ merge_clump(PlannerInfo *root, List *clumps, Clump *new_clump, bool force) /* * No merging is possible, so add new_clump as an independent clump, in - * proper order according to size. We can be fast for the common case + * proper order according to size. We can be fast for the common case * where it has size 1 --- it should always go at the end. */ if (clumps == NIL || new_clump->size == 1) diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 5777cb2ff0c..41eaa2653ac 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -425,7 +425,7 @@ set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) * set_append_rel_size * Set size estimates for an "append relation" * - * The passed-in rel and RTE represent the entire append relation. The + * The passed-in rel and RTE represent the entire append relation. The * relation's contents are computed by appending together the output of * the individual member relations. Note that in the inheritance case, * the first member relation is actually the same table as is mentioned in @@ -489,7 +489,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, /* * We have to copy the parent's targetlist and quals to the child, - * with appropriate substitution of variables. However, only the + * with appropriate substitution of variables. However, only the * baserestrictinfo quals are needed before we can check for * constraint exclusion; so do that first and then check to see if we * can disregard this child. @@ -553,7 +553,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, /* * We have to make child entries in the EquivalenceClass data - * structures as well. This is needed either if the parent + * structures as well. This is needed either if the parent * participates in some eclass joins (because we will want to consider * inner-indexscan joins on the individual children) or if the parent * has useful pathkeys (because we should try to build MergeAppend @@ -594,7 +594,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, /* * Accumulate per-column estimates too. We need not do anything - * for PlaceHolderVars in the parent list. If child expression + * for PlaceHolderVars in the parent list. If child expression * isn't a Var, or we didn't record a width estimate for it, we * have to fall back on a datatype-based estimate. * @@ -670,7 +670,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * Generate access paths for each member relation, and remember the - * cheapest path for each one. Also, identify all pathkeys (orderings) + * cheapest path for each one. Also, identify all pathkeys (orderings) * and parameterizations (required_outer sets) available for the member * relations. */ @@ -720,7 +720,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, /* * Collect lists of all the available path orderings and - * parameterizations for all the children. We use these as a + * parameterizations for all the children. We use these as a * heuristic to indicate which sort orderings and parameterizations we * should build Append and MergeAppend paths for. */ @@ -806,7 +806,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, * so that not that many cases actually get considered here.) * * The Append node itself cannot enforce quals, so all qual checking must - * be done in the child paths. This means that to have a parameterized + * be done in the child paths. This means that to have a parameterized * Append path, we must have the exact same parameterization for each * child path; otherwise some children might be failing to check the * moved-down quals. To make them match up, we can try to increase the @@ -977,7 +977,7 @@ get_cheapest_parameterized_child_path(PlannerInfo *root, RelOptInfo *rel, * joinquals to be checked within the path's scan. However, some existing * paths might check the available joinquals already while others don't; * therefore, it's not clear which existing path will be cheapest after - * reparameterization. We have to go through them all and find out. + * reparameterization. We have to go through them all and find out. */ cheapest = NULL; foreach(lc, rel->pathlist) @@ -1103,7 +1103,7 @@ has_multiple_baserels(PlannerInfo *root) * * We don't currently support generating parameterized paths for subqueries * by pushing join clauses down into them; it seems too expensive to re-plan - * the subquery multiple times to consider different alternatives. So the + * the subquery multiple times to consider different alternatives. So the * subquery will have exactly one path. (The path will be parameterized * if the subquery contains LATERAL references, otherwise not.) Since there's * no freedom of action here, there's no need for a separate set_subquery_size @@ -1560,7 +1560,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist) * independent jointree items in the query. This is > 1. * * 'initial_rels' is a list of RelOptInfo nodes for each independent - * jointree item. These are the components to be joined together. + * jointree item. These are the components to be joined together. * Note that levels_needed == list_length(initial_rels). * * Returns the final level of join relations, i.e., the relation that is @@ -1576,7 +1576,7 @@ make_rel_from_joinlist(PlannerInfo *root, List *joinlist) * needed for these paths need have been instantiated. * * Note to plugin authors: the functions invoked during standard_join_search() - * modify root->join_rel_list and root->join_rel_hash. If you want to do more + * modify root->join_rel_list and root->join_rel_hash. If you want to do more * than one join-order search, you'll probably need to save and restore the * original states of those data structures. See geqo_eval() for an example. */ @@ -1675,7 +1675,7 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels) * column k is found to be unsafe to reference, we set unsafeColumns[k] to * TRUE, but we don't reject the subquery overall since column k might * not be referenced by some/all quals. The unsafeColumns[] array will be - * consulted later by qual_is_pushdown_safe(). It's better to do it this + * consulted later by qual_is_pushdown_safe(). It's better to do it this * way than to make the checks directly in qual_is_pushdown_safe(), because * when the subquery involves set operations we have to check the output * expressions in each arm of the set op. @@ -1768,7 +1768,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery, * check_output_expressions - check subquery's output expressions for safety * * There are several cases in which it's unsafe to push down an upper-level - * qual if it references a particular output column of a subquery. We check + * qual if it references a particular output column of a subquery. We check * each output column of the subquery and set unsafeColumns[k] to TRUE if * that column is unsafe for a pushed-down qual to reference. The conditions * checked here are: @@ -1786,7 +1786,7 @@ recurse_pushdown_safe(Node *setOp, Query *topquery, * of rows returned. (This condition is vacuous for DISTINCT, because then * there are no non-DISTINCT output columns, so we needn't check. But note * we are assuming that the qual can't distinguish values that the DISTINCT - * operator sees as equal. This is a bit shaky but we have no way to test + * operator sees as equal. This is a bit shaky but we have no way to test * for the case, and it's unlikely enough that we shouldn't refuse the * optimization just because it could theoretically happen.) */ @@ -1903,7 +1903,7 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual, /* * It would be unsafe to push down window function calls, but at least for - * the moment we could never see any in a qual anyhow. (The same applies + * the moment we could never see any in a qual anyhow. (The same applies * to aggregates, which we check for in pull_var_clause below.) */ Assert(!contain_window_function(qual)); diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c index efeea374c27..9b657fb21fd 100644 --- a/src/backend/optimizer/path/clausesel.c +++ b/src/backend/optimizer/path/clausesel.c @@ -58,7 +58,7 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause, * See clause_selectivity() for the meaning of the additional parameters. * * Our basic approach is to take the product of the selectivities of the - * subclauses. However, that's only right if the subclauses have independent + * subclauses. However, that's only right if the subclauses have independent * probabilities, and in reality they are often NOT independent. So, * we want to be smarter where we can. @@ -75,12 +75,12 @@ static void addRangeClause(RangeQueryClause **rqlist, Node *clause, * see that hisel is the fraction of the range below the high bound, while * losel is the fraction above the low bound; so hisel can be interpreted * directly as a 0..1 value but we need to convert losel to 1-losel before - * interpreting it as a value. Then the available range is 1-losel to hisel. + * interpreting it as a value. Then the available range is 1-losel to hisel. * However, this calculation double-excludes nulls, so really we need * hisel + losel + null_frac - 1.) * * If either selectivity is exactly DEFAULT_INEQ_SEL, we forget this equation - * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation + * and instead use DEFAULT_RANGE_INEQ_SEL. The same applies if the equation * yields an impossible (negative) result. * * A free side-effect is that we can recognize redundant inequalities such @@ -174,7 +174,7 @@ clauselist_selectivity(PlannerInfo *root, { /* * If it's not a "<" or ">" operator, just merge the - * selectivity in generically. But if it's the right oprrest, + * selectivity in generically. But if it's the right oprrest, * add the clause to rqlist for later processing. */ switch (get_oprrest(expr->opno)) @@ -459,14 +459,14 @@ treat_as_join_clause(Node *clause, RestrictInfo *rinfo, * nestloop join's inner relation --- varRelid should then be the ID of the * inner relation. * - * When varRelid is 0, all variables are treated as variables. This + * When varRelid is 0, all variables are treated as variables. This * is appropriate for ordinary join clauses and restriction clauses. * * jointype is the join type, if the clause is a join clause. Pass JOIN_INNER * if the clause isn't a join clause. * * sjinfo is NULL for a non-join clause, otherwise it provides additional - * context information about the join being performed. There are some + * context information about the join being performed. There are some * special cases: * 1. For a special (not INNER) join, sjinfo is always a member of * root->join_info_list. @@ -501,7 +501,7 @@ clause_selectivity(PlannerInfo *root, /* * If the clause is marked pseudoconstant, then it will be used as a * gating qual and should not affect selectivity estimates; hence - * return 1.0. The only exception is that a constant FALSE may be + * return 1.0. The only exception is that a constant FALSE may be * taken as having selectivity 0.0, since it will surely mean no rows * out of the plan. This case is simple enough that we need not * bother caching the result. @@ -520,11 +520,11 @@ clause_selectivity(PlannerInfo *root, /* * If possible, cache the result of the selectivity calculation for - * the clause. We can cache if varRelid is zero or the clause + * the clause. We can cache if varRelid is zero or the clause * contains only vars of that relid --- otherwise varRelid will affect * the result, so mustn't cache. Outer join quals might be examined * with either their join's actual jointype or JOIN_INNER, so we need - * two cache variables to remember both cases. Note: we assume the + * two cache variables to remember both cases. Note: we assume the * result won't change if we are switching the input relations or * considering a unique-ified case, so we only need one cache variable * for all non-JOIN_INNER cases. @@ -685,7 +685,7 @@ clause_selectivity(PlannerInfo *root, /* * This is not an operator, so we guess at the selectivity. THIS IS A * HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE ABLE TO HAVE - * SELECTIVITIES THEMSELVES. -- JMH 7/9/92 + * SELECTIVITIES THEMSELVES. -- JMH 7/9/92 */ s1 = (Selectivity) 0.3333333; } diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 326794acb85..848065ee7b2 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -24,7 +24,7 @@ * * Obviously, taking constants for these values is an oversimplification, * but it's tough enough to get any useful estimates even at this level of - * detail. Note that all of these parameters are user-settable, in case + * detail. Note that all of these parameters are user-settable, in case * the default values are drastically off for a particular platform. * * seq_page_cost and random_page_cost can also be overridden for an individual @@ -493,7 +493,7 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count) * computed for us by query_planner. * * Caller is expected to have ensured that tuples_fetched is greater than zero - * and rounded to integer (see clamp_row_est). The result will likewise be + * and rounded to integer (see clamp_row_est). The result will likewise be * greater than zero and integral. */ double @@ -694,7 +694,7 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, /* * For small numbers of pages we should charge spc_random_page_cost * apiece, while if nearly all the table's pages are being read, it's more - * appropriate to charge spc_seq_page_cost apiece. The effect is + * appropriate to charge spc_seq_page_cost apiece. The effect is * nonlinear, too. For lack of a better idea, interpolate like this to * determine the cost per page. */ @@ -769,7 +769,7 @@ cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec) * Estimate the cost of a BitmapAnd node * * Note that this considers only the costs of index scanning and bitmap - * creation, not the eventual heap access. In that sense the object isn't + * creation, not the eventual heap access. In that sense the object isn't * truly a Path, but it has enough path-like properties (costs in particular) * to warrant treating it as one. We don't bother to set the path rows field, * however. @@ -828,7 +828,7 @@ cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root) /* * We estimate OR selectivity on the assumption that the inputs are * non-overlapping, since that's often the case in "x IN (list)" type - * situations. Of course, we clamp to 1.0 at the end. + * situations. Of course, we clamp to 1.0 at the end. * * The runtime cost of the BitmapOr itself is estimated at 100x * cpu_operator_cost for each tbm_union needed. Probably too small, @@ -917,7 +917,7 @@ cost_tidscan(Path *path, PlannerInfo *root, /* * We must force TID scan for WHERE CURRENT OF, because only nodeTidscan.c - * understands how to do it correctly. Therefore, honor enable_tidscan + * understands how to do it correctly. Therefore, honor enable_tidscan * only when CURRENT OF isn't present. Also note that cost_qual_eval * counts a CurrentOfExpr as having startup cost disable_cost, which we * subtract off here; that's to prevent other plan types such as seqscan @@ -1036,7 +1036,7 @@ cost_functionscan(Path *path, PlannerInfo *root, * * Currently, nodeFunctionscan.c always executes the functions to * completion before returning any rows, and caches the results in a - * tuplestore. So the function eval cost is all startup cost, and per-row + * tuplestore. So the function eval cost is all startup cost, and per-row * costs are minimal. * * XXX in principle we ought to charge tuplestore spill costs if the @@ -1108,7 +1108,7 @@ cost_valuesscan(Path *path, PlannerInfo *root, * * Note: this is used for both self-reference and regular CTEs; the * possible cost differences are below the threshold of what we could - * estimate accurately anyway. Note that the costs of evaluating the + * estimate accurately anyway. Note that the costs of evaluating the * referenced CTE query are added into the final plan as initplan costs, * and should NOT be counted here. */ @@ -1202,7 +1202,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm) * If the total volume exceeds sort_mem, we switch to a tape-style merge * algorithm. There will still be about t*log2(t) tuple comparisons in * total, but we will also need to write and read each tuple once per - * merge pass. We expect about ceil(logM(r)) merge passes where r is the + * merge pass. We expect about ceil(logM(r)) merge passes where r is the * number of initial runs formed and M is the merge order used by tuplesort.c. * Since the average initial run should be about twice sort_mem, we have * disk traffic = 2 * relsize * ceil(logM(p / (2*sort_mem))) @@ -1216,7 +1216,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm) * accesses (XXX can't we refine that guess?) * * By default, we charge two operator evals per tuple comparison, which should - * be in the right ballpark in most cases. The caller can tweak this by + * be in the right ballpark in most cases. The caller can tweak this by * specifying nonzero comparison_cost; typically that's used for any extra * work that has to be done to prepare the inputs to the comparison operators. * @@ -1340,7 +1340,7 @@ cost_sort(Path *path, PlannerInfo *root, * Determines and returns the cost of a MergeAppend node. * * MergeAppend merges several pre-sorted input streams, using a heap that - * at any given instant holds the next tuple from each stream. If there + * at any given instant holds the next tuple from each stream. If there * are N streams, we need about N*log2(N) tuple comparisons to construct * the heap at startup, and then for each output tuple, about log2(N) * comparisons to delete the top heap entry and another log2(N) comparisons @@ -1499,7 +1499,7 @@ cost_agg(Path *path, PlannerInfo *root, * group otherwise. We charge cpu_tuple_cost for each output tuple. * * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the - * same total CPU cost, but AGG_SORTED has lower startup cost. If the + * same total CPU cost, but AGG_SORTED has lower startup cost. If the * input path is already sorted appropriately, AGG_SORTED should be * preferred (since it has no risk of memory overflow). This will happen * as long as the computed total costs are indeed exactly equal --- but if @@ -2107,10 +2107,10 @@ initial_cost_mergejoin(PlannerInfo *root, JoinCostWorkspace *workspace, * Unlike other costsize functions, this routine makes one actual decision: * whether we should materialize the inner path. We do that either because * the inner path can't support mark/restore, or because it's cheaper to - * use an interposed Material node to handle mark/restore. When the decision + * use an interposed Material node to handle mark/restore. When the decision * is cost-based it would be logically cleaner to build and cost two separate * paths with and without that flag set; but that would require repeating most - * of the cost calculations, which are not all that cheap. Since the choice + * of the cost calculations, which are not all that cheap. Since the choice * will not affect output pathkeys or startup cost, only total cost, there is * no possibility of wanting to keep both paths. So it seems best to make * the decision here and record it in the path's materialize_inner field. @@ -2174,7 +2174,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, qp_qual_cost.per_tuple -= merge_qual_cost.per_tuple; /* - * Get approx # tuples passing the mergequals. We use approx_tuple_count + * Get approx # tuples passing the mergequals. We use approx_tuple_count * here because we need an estimate done with JOIN_INNER semantics. */ mergejointuples = approx_tuple_count(root, &path->jpath, mergeclauses); @@ -2188,7 +2188,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, * estimated approximately as size of merge join output minus size of * inner relation. Assume that the distinct key values are 1, 2, ..., and * denote the number of values of each key in the outer relation as m1, - * m2, ...; in the inner relation, n1, n2, ... Then we have + * m2, ...; in the inner relation, n1, n2, ... Then we have * * size of join = m1 * n1 + m2 * n2 + ... * @@ -2199,7 +2199,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, * This equation works correctly for outer tuples having no inner match * (nk = 0), but not for inner tuples having no outer match (mk = 0); we * are effectively subtracting those from the number of rescanned tuples, - * when we should not. Can we do better without expensive selectivity + * when we should not. Can we do better without expensive selectivity * computations? * * The whole issue is moot if we are working from a unique-ified outer @@ -2219,7 +2219,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, /* * Decide whether we want to materialize the inner input to shield it from - * mark/restore and performing re-fetches. Our cost model for regular + * mark/restore and performing re-fetches. Our cost model for regular * re-fetches is that a re-fetch costs the same as an original fetch, * which is probably an overestimate; but on the other hand we ignore the * bookkeeping costs of mark/restore. Not clear if it's worth developing @@ -2312,7 +2312,7 @@ final_cost_mergejoin(PlannerInfo *root, MergePath *path, /* * For each tuple that gets through the mergejoin proper, we charge * cpu_tuple_cost plus the cost of evaluating additional restriction - * clauses that are to be applied at the join. (This is pessimistic since + * clauses that are to be applied at the join. (This is pessimistic since * not all of the quals may get evaluated at each tuple.) * * Note: we could adjust for SEMI/ANTI joins skipping some qual @@ -2464,7 +2464,7 @@ initial_cost_hashjoin(PlannerInfo *root, JoinCostWorkspace *workspace, * If inner relation is too big then we will need to "batch" the join, * which implies writing and reading most of the tuples to disk an extra * time. Charge seq_page_cost per page, since the I/O should be nice and - * sequential. Writing the inner rel counts as startup cost, all the rest + * sequential. Writing the inner rel counts as startup cost, all the rest * as run cost. */ if (numbatches > 1) @@ -2695,7 +2695,7 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path, /* * For each tuple that gets through the hashjoin proper, we charge * cpu_tuple_cost plus the cost of evaluating additional restriction - * clauses that are to be applied at the join. (This is pessimistic since + * clauses that are to be applied at the join. (This is pessimistic since * not all of the quals may get evaluated at each tuple.) */ startup_cost += qp_qual_cost.startup; @@ -2748,7 +2748,7 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan) { /* * Otherwise we will be rescanning the subplan output on each - * evaluation. We need to estimate how much of the output we will + * evaluation. We need to estimate how much of the output we will * actually need to scan. NOTE: this logic should agree with the * tuple_fraction estimates used by make_subplan() in * plan/subselect.c. @@ -2796,10 +2796,10 @@ cost_subplan(PlannerInfo *root, SubPlan *subplan, Plan *plan) /* * cost_rescan * Given a finished Path, estimate the costs of rescanning it after - * having done so the first time. For some Path types a rescan is + * having done so the first time. For some Path types a rescan is * cheaper than an original scan (if no parameters change), and this * function embodies knowledge about that. The default is to return - * the same costs stored in the Path. (Note that the cost estimates + * the same costs stored in the Path. (Note that the cost estimates * actually stored in Paths are always for first scans.) * * This function is not currently intended to model effects such as rescans @@ -2840,7 +2840,7 @@ cost_rescan(PlannerInfo *root, Path *path, { /* * These plan types materialize their final result in a - * tuplestore or tuplesort object. So the rescan cost is only + * tuplestore or tuplesort object. So the rescan cost is only * cpu_tuple_cost per tuple, unless the result is large enough * to spill to disk. */ @@ -2865,8 +2865,8 @@ cost_rescan(PlannerInfo *root, Path *path, { /* * These plan types not only materialize their results, but do - * not implement qual filtering or projection. So they are - * even cheaper to rescan than the ones above. We charge only + * not implement qual filtering or projection. So they are + * even cheaper to rescan than the ones above. We charge only * cpu_operator_cost per tuple. (Note: keep that in sync with * the run_cost charge in cost_sort, and also see comments in * cost_material before you change it.) @@ -3007,7 +3007,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) * evaluation of AND/OR? Probably *not*, because that would make the * results depend on the clause ordering, and we are not in any position * to expect that the current ordering of the clauses is the one that's - * going to end up being used. The above per-RestrictInfo caching would + * going to end up being used. The above per-RestrictInfo caching would * not mix well with trying to re-order clauses anyway. * * Another issue that is entirely ignored here is that if a set-returning @@ -3129,7 +3129,7 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context) else if (IsA(node, AlternativeSubPlan)) { /* - * Arbitrarily use the first alternative plan for costing. (We should + * Arbitrarily use the first alternative plan for costing. (We should * certainly only include one alternative, and we don't yet have * enough information to know which one the executor is most likely to * use.) @@ -3273,13 +3273,13 @@ compute_semi_anti_join_factors(PlannerInfo *root, /* * jselec can be interpreted as the fraction of outer-rel rows that have * any matches (this is true for both SEMI and ANTI cases). And nselec is - * the fraction of the Cartesian product that matches. So, the average + * the fraction of the Cartesian product that matches. So, the average * number of matches for each outer-rel row that has at least one match is * nselec * inner_rows / jselec. * * Note: it is correct to use the inner rel's "rows" count here, even * though we might later be considering a parameterized inner path with - * fewer rows. This is because we have included all the join clauses in + * fewer rows. This is because we have included all the join clauses in * the selectivity estimate. */ if (jselec > 0) /* protect against zero divide */ @@ -3607,7 +3607,7 @@ calc_joinrel_size_estimate(PlannerInfo *root, double nrows; /* - * Compute joinclause selectivity. Note that we are only considering + * Compute joinclause selectivity. Note that we are only considering * clauses that become restriction clauses at this join level; we are not * double-counting them because they were not considered in estimating the * sizes of the component rels. @@ -3665,7 +3665,7 @@ calc_joinrel_size_estimate(PlannerInfo *root, * * If we are doing an outer join, take that into account: the joinqual * selectivity has to be clamped using the knowledge that the output must - * be at least as large as the non-nullable input. However, any + * be at least as large as the non-nullable input. However, any * pushed-down quals are applied after the outer join, so their * selectivity applies fully. * @@ -3736,7 +3736,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel) /* * Compute per-output-column width estimates by examining the subquery's - * targetlist. For any output that is a plain Var, get the width estimate + * targetlist. For any output that is a plain Var, get the width estimate * that was made while planning the subquery. Otherwise, we leave it to * set_rel_width to fill in a datatype-based default estimate. */ @@ -3755,7 +3755,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel) * The subquery could be an expansion of a view that's had columns * added to it since the current query was parsed, so that there are * non-junk tlist columns in it that don't correspond to any column - * visible at our query level. Ignore such columns. + * visible at our query level. Ignore such columns. */ if (te->resno < rel->min_attr || te->resno > rel->max_attr) continue; @@ -3904,7 +3904,7 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan) * of estimating baserestrictcost, so we set that, and we also set up width * using what will be purely datatype-driven estimates from the targetlist. * There is no way to do anything sane with the rows value, so we just put - * a default estimate and hope that the wrapper can improve on it. The + * a default estimate and hope that the wrapper can improve on it. The * wrapper's GetForeignRelSize function will be called momentarily. * * The rel's targetlist and restrictinfo list must have been constructed @@ -4025,7 +4025,7 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel) { /* * We could be looking at an expression pulled up from a subquery, - * or a ROW() representing a whole-row child Var, etc. Do what we + * or a ROW() representing a whole-row child Var, etc. Do what we * can using the expression type information. */ int32 item_width; @@ -4132,7 +4132,7 @@ void set_default_effective_cache_size(void) { /* - * We let check_effective_cache_size() compute the actual setting. Note + * We let check_effective_cache_size() compute the actual setting. Note * that this call is a no-op if the user has supplied a setting (since * that will have a higher priority than PGC_S_DYNAMIC_DEFAULT). */ diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index ac12f84fd5e..b7aff3775ee 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -74,7 +74,7 @@ static bool reconsider_full_join_clause(PlannerInfo *root, * * If below_outer_join is true, then the clause was found below the nullable * side of an outer join, so its sides might validly be both NULL rather than - * strictly equal. We can still deduce equalities in such cases, but we take + * strictly equal. We can still deduce equalities in such cases, but we take * care to mark an EquivalenceClass if it came from any such clauses. Also, * we have to check that both sides are either pseudo-constants or strict * functions of Vars, else they might not both go to NULL above the outer @@ -141,9 +141,9 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, collation); /* - * Reject clauses of the form X=X. These are not as redundant as they + * Reject clauses of the form X=X. These are not as redundant as they * might seem at first glance: assuming the operator is strict, this is - * really an expensive way to write X IS NOT NULL. So we must not risk + * really an expensive way to write X IS NOT NULL. So we must not risk * just losing the clause, which would be possible if there is already a * single-element EquivalenceClass containing X. The case is not common * enough to be worth contorting the EC machinery for, so just reject the @@ -187,14 +187,14 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, * Sweep through the existing EquivalenceClasses looking for matches to * item1 and item2. These are the possible outcomes: * - * 1. We find both in the same EC. The equivalence is already known, so + * 1. We find both in the same EC. The equivalence is already known, so * there's nothing to do. * * 2. We find both in different ECs. Merge the two ECs together. * * 3. We find just one. Add the other to its EC. * - * 4. We find neither. Make a new, two-entry EC. + * 4. We find neither. Make a new, two-entry EC. * * Note: since all ECs are built through this process or the similar * search in get_eclass_for_sort_expr(), it's impossible that we'd match @@ -294,7 +294,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, /* * We add ec2's items to ec1, then set ec2's ec_merged link to point - * to ec1 and remove ec2 from the eq_classes list. We cannot simply + * to ec1 and remove ec2 from the eq_classes list. We cannot simply * delete ec2 because that could leave dangling pointers in existing * PathKeys. We leave it behind with a link so that the merged EC can * be found. @@ -406,7 +406,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo, * Also, the expression's exposed collation must match the EC's collation. * This is important because in comparisons like "foo < bar COLLATE baz", * only one of the expressions has the correct exposed collation as we receive - * it from the parser. Forcing both of them to have it ensures that all + * it from the parser. Forcing both of them to have it ensures that all * variant spellings of such a construct behave the same. Again, we can * stick on a RelabelType to force the right exposed collation. (It might * work to not label the collation at all in EC members, but this is risky @@ -511,22 +511,22 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids, * single-member EquivalenceClass for it. * * expr is the expression, and nullable_relids is the set of base relids - * that are potentially nullable below it. We actually only care about + * that are potentially nullable below it. We actually only care about * the set of such relids that are used in the expression; but for caller * convenience, we perform that intersection step here. The caller need * only be sure that nullable_relids doesn't omit any nullable rels that * might appear in the expr. * * sortref is the SortGroupRef of the originating SortGroupClause, if any, - * or zero if not. (It should never be zero if the expression is volatile!) + * or zero if not. (It should never be zero if the expression is volatile!) * * If rel is not NULL, it identifies a specific relation we're considering * a path for, and indicates that child EC members for that relation can be - * considered. Otherwise child members are ignored. (Note: since child EC + * considered. Otherwise child members are ignored. (Note: since child EC * members aren't guaranteed unique, a non-NULL value means that there could * be more than one EC that matches the expression; if so it's order-dependent * which one you get. This is annoying but it only happens in corner cases, - * so for now we live with just reporting the first match. See also + * so for now we live with just reporting the first match. See also * generate_implied_equalities_for_column and match_pathkeys_to_index.) * * If create_it is TRUE, we'll build a new EquivalenceClass when there is no @@ -680,7 +680,7 @@ get_eclass_for_sort_expr(PlannerInfo *root, * * When an EC contains pseudoconstants, our strategy is to generate * "member = const1" clauses where const1 is the first constant member, for - * every other member (including other constants). If we are able to do this + * every other member (including other constants). If we are able to do this * then we don't need any "var = var" comparisons because we've successfully * constrained all the vars at their points of creation. If we fail to * generate any of these clauses due to lack of cross-type operators, we fall @@ -705,7 +705,7 @@ get_eclass_for_sort_expr(PlannerInfo *root, * "WHERE a.x = b.y AND b.y = a.z", the scheme breaks down if we cannot * generate "a.x = a.z" as a restriction clause for A.) In this case we mark * the EC "ec_broken" and fall back to regurgitating its original source - * RestrictInfos at appropriate times. We do not try to retract any derived + * RestrictInfos at appropriate times. We do not try to retract any derived * clauses already generated from the broken EC, so the resulting plan could * be poor due to bad selectivity estimates caused by redundant clauses. But * the correct solution to that is to fix the opfamilies ... @@ -968,8 +968,8 @@ generate_base_implied_equalities_broken(PlannerInfo *root, * built any join RelOptInfos. * * An annoying special case for parameterized scans is that the inner rel can - * be an appendrel child (an "other rel"). In this case we must generate - * appropriate clauses using child EC members. add_child_rel_equivalences + * be an appendrel child (an "other rel"). In this case we must generate + * appropriate clauses using child EC members. add_child_rel_equivalences * must already have been done for the child rel. * * The results are sufficient for use in merge, hash, and plain nestloop join @@ -983,7 +983,7 @@ generate_base_implied_equalities_broken(PlannerInfo *root, * we consider different join paths, we avoid generating multiple copies: * whenever we select a particular pair of EquivalenceMembers to join, * we check to see if the pair matches any original clause (in ec_sources) - * or previously-built clause (in ec_derives). This saves memory and allows + * or previously-built clause (in ec_derives). This saves memory and allows * re-use of information cached in RestrictInfos. * * join_relids should always equal bms_union(outer_relids, inner_rel->relids). @@ -1079,7 +1079,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root, * First, scan the EC to identify member values that are computable at the * outer rel, at the inner rel, or at this relation but not in either * input rel. The outer-rel members should already be enforced equal, - * likewise for the inner-rel members. We'll need to create clauses to + * likewise for the inner-rel members. We'll need to create clauses to * enforce that any newly computable members are all equal to each other * as well as to at least one input member, plus enforce at least one * outer-rel member equal to at least one inner-rel member. @@ -1105,7 +1105,7 @@ generate_join_implied_equalities_normal(PlannerInfo *root, } /* - * First, select the joinclause if needed. We can equate any one outer + * First, select the joinclause if needed. We can equate any one outer * member to any one inner member, but we have to find a datatype * combination for which an opfamily member operator exists. If we have * choices, we prefer simple Var members (possibly with RelabelType) since @@ -1323,8 +1323,8 @@ create_join_clause(PlannerInfo *root, /* * Search to see if we already built a RestrictInfo for this pair of - * EquivalenceMembers. We can use either original source clauses or - * previously-derived clauses. The check on opno is probably redundant, + * EquivalenceMembers. We can use either original source clauses or + * previously-derived clauses. The check on opno is probably redundant, * but be safe ... */ foreach(lc, ec->ec_sources) @@ -1455,7 +1455,7 @@ create_join_clause(PlannerInfo *root, * * Outer join clauses that are marked outerjoin_delayed are special: this * condition means that one or both VARs might go to null due to a lower - * outer join. We can still push a constant through the clause, but only + * outer join. We can still push a constant through the clause, but only * if its operator is strict; and we *have to* throw the clause back into * regular joinclause processing. By keeping the strict join clause, * we ensure that any null-extended rows that are mistakenly generated due @@ -1649,7 +1649,7 @@ reconsider_outer_join_clause(PlannerInfo *root, RestrictInfo *rinfo, /* * Yes it does! Try to generate a clause INNERVAR = CONSTANT for each - * CONSTANT in the EC. Note that we must succeed with at least one + * CONSTANT in the EC. Note that we must succeed with at least one * constant before we can decide to throw away the outer-join clause. */ match = false; @@ -1938,8 +1938,8 @@ add_child_rel_equivalences(PlannerInfo *root, continue; /* - * No point in searching if parent rel not mentioned in eclass; but - * we can't tell that for sure if parent rel is itself a child. + * No point in searching if parent rel not mentioned in eclass; but we + * can't tell that for sure if parent rel is itself a child. */ if (parent_rel->reloptkind == RELOPT_BASEREL && !bms_is_subset(parent_rel->relids, cur_ec->ec_relids)) @@ -2055,7 +2055,7 @@ mutate_eclass_expressions(PlannerInfo *root, * is a redundant list of clauses equating the table/index column to each of * the other-relation values it is known to be equal to. Any one of * these clauses can be used to create a parameterized path, and there - * is no value in using more than one. (But it *is* worthwhile to create + * is no value in using more than one. (But it *is* worthwhile to create * a separate parameterized path for each one, since that leads to different * join orders.) * @@ -2102,12 +2102,12 @@ generate_implied_equalities_for_column(PlannerInfo *root, continue; /* - * Scan members, looking for a match to the target column. Note that + * Scan members, looking for a match to the target column. Note that * child EC members are considered, but only when they belong to the * target relation. (Unlike regular members, the same expression * could be a child member of more than one EC. Therefore, it's * potentially order-dependent which EC a child relation's target - * column gets matched to. This is annoying but it only happens in + * column gets matched to. This is annoying but it only happens in * corner cases, so for now we live with just reporting the first * match. See also get_eclass_for_sort_expr.) */ @@ -2186,7 +2186,7 @@ generate_implied_equalities_for_column(PlannerInfo *root, * a joinclause involving the two given relations. * * This is essentially a very cut-down version of - * generate_join_implied_equalities(). Note it's OK to occasionally say "yes" + * generate_join_implied_equalities(). Note it's OK to occasionally say "yes" * incorrectly. Hence we don't bother with details like whether the lack of a * cross-type operator might prevent the clause from actually being generated. */ @@ -2222,7 +2222,7 @@ have_relevant_eclass_joinclause(PlannerInfo *root, * OK as a possibly-overoptimistic heuristic. * * We don't test ec_has_const either, even though a const eclass won't - * generate real join clauses. This is because if we had "WHERE a.x = + * generate real join clauses. This is because if we had "WHERE a.x = * b.y and a.x = 42", it is worth considering a join between a and b, * since the join result is likely to be small even though it'll end * up being an unqualified nestloop. @@ -2279,7 +2279,7 @@ has_relevant_eclass_joinclause(PlannerInfo *root, RelOptInfo *rel1) * against the specified relation. * * This is just a heuristic test and doesn't have to be exact; it's better - * to say "yes" incorrectly than "no". Hence we don't bother with details + * to say "yes" incorrectly than "no". Hence we don't bother with details * like whether the lack of a cross-type operator might prevent the clause * from actually being generated. */ @@ -2300,7 +2300,7 @@ eclass_useful_for_merging(EquivalenceClass *eclass, /* * Note we don't test ec_broken; if we did, we'd need a separate code path - * to look through ec_sources. Checking the members anyway is OK as a + * to look through ec_sources. Checking the members anyway is OK as a * possibly-overoptimistic heuristic. */ diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index a912174fb00..42dcb111aeb 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -222,7 +222,7 @@ static Const *string_to_const(const char *str, Oid datatype); * Note: in cases involving LATERAL references in the relation's tlist, it's * possible that rel->lateral_relids is nonempty. Currently, we include * lateral_relids into the parameterization reported for each path, but don't - * take it into account otherwise. The fact that any such rels *must* be + * take it into account otherwise. The fact that any such rels *must* be * available as parameter sources perhaps should influence our choices of * index quals ... but for now, it doesn't seem worth troubling over. * In particular, comments below about "unparameterized" paths should be read @@ -270,7 +270,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) match_restriction_clauses_to_index(rel, index, &rclauseset); /* - * Build index paths from the restriction clauses. These will be + * Build index paths from the restriction clauses. These will be * non-parameterized paths. Plain paths go directly to add_path(), * bitmap paths are added to bitindexpaths to be handled below. */ @@ -278,10 +278,10 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) &bitindexpaths); /* - * Identify the join clauses that can match the index. For the moment - * we keep them separate from the restriction clauses. Note that this + * Identify the join clauses that can match the index. For the moment + * we keep them separate from the restriction clauses. Note that this * step finds only "loose" join clauses that have not been merged into - * EquivalenceClasses. Also, collect join OR clauses for later. + * EquivalenceClasses. Also, collect join OR clauses for later. */ MemSet(&jclauseset, 0, sizeof(jclauseset)); match_join_clauses_to_index(root, rel, index, @@ -343,9 +343,9 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel) /* * Likewise, if we found anything usable, generate BitmapHeapPaths for the - * most promising combinations of join bitmap index paths. Our strategy + * most promising combinations of join bitmap index paths. Our strategy * is to generate one such path for each distinct parameterization seen - * among the available bitmap index paths. This may look pretty + * among the available bitmap index paths. This may look pretty * expensive, but usually there won't be very many distinct * parameterizations. (This logic is quite similar to that in * consider_index_join_clauses, but we're working with whole paths not @@ -461,7 +461,7 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel, * * For simplicity in selecting relevant clauses, we represent each set of * outer rels as a maximum set of clause_relids --- that is, the indexed - * relation itself is also included in the relids set. considered_relids + * relation itself is also included in the relids set. considered_relids * lists all relids sets we've already tried. */ for (indexcol = 0; indexcol < index->ncolumns; indexcol++) @@ -550,7 +550,7 @@ consider_index_join_outer_rels(PlannerInfo *root, RelOptInfo *rel, /* * If this clause was derived from an equivalence class, the * clause list may contain other clauses derived from the same - * eclass. We should not consider that combining this clause with + * eclass. We should not consider that combining this clause with * one of those clauses generates a usefully different * parameterization; so skip if any clause derived from the same * eclass would already have been included when using oldrelids. @@ -633,9 +633,9 @@ get_join_index_paths(PlannerInfo *root, RelOptInfo *rel, } /* - * Add applicable eclass join clauses. The clauses generated for each + * Add applicable eclass join clauses. The clauses generated for each * column are redundant (cf generate_implied_equalities_for_column), - * so we need at most one. This is the only exception to the general + * so we need at most one. This is the only exception to the general * rule of using all available index clauses. */ foreach(lc, eclauseset->indexclauses[indexcol]) @@ -722,7 +722,7 @@ bms_equal_any(Relids relids, List *relids_list) * bitmap indexpaths are added to *bitindexpaths for later processing. * * This is a fairly simple frontend to build_index_paths(). Its reason for - * existence is mainly to handle ScalarArrayOpExpr quals properly. If the + * existence is mainly to handle ScalarArrayOpExpr quals properly. If the * index AM supports them natively, we should just include them in simple * index paths. If not, we should exclude them while building simple index * paths, and then make a separate attempt to include them in bitmap paths. @@ -736,7 +736,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel, ListCell *lc; /* - * Build simple index paths using the clauses. Allow ScalarArrayOpExpr + * Build simple index paths using the clauses. Allow ScalarArrayOpExpr * clauses only if the index AM supports them natively. */ indexpaths = build_index_paths(root, rel, @@ -748,7 +748,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel, * Submit all the ones that can form plain IndexScan plans to add_path. (A * plain IndexPath can represent either a plain IndexScan or an * IndexOnlyScan, but for our purposes here that distinction does not - * matter. However, some of the indexes might support only bitmap scans, + * matter. However, some of the indexes might support only bitmap scans, * and those we mustn't submit to add_path here.) * * Also, pick out the ones that are usable as bitmap scans. For that, we @@ -792,7 +792,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel, * We return a list of paths because (1) this routine checks some cases * that should cause us to not generate any IndexPath, and (2) in some * cases we want to consider both a forward and a backward scan, so as - * to obtain both sort orders. Note that the paths are just returned + * to obtain both sort orders. Note that the paths are just returned * to the caller and not immediately fed to add_path(). * * At top level, useful_predicate should be exactly the index's predOK flag @@ -975,7 +975,7 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel, } /* - * 3. Check if an index-only scan is possible. If we're not building + * 3. Check if an index-only scan is possible. If we're not building * plain indexscans, this isn't relevant since bitmap scans don't support * index data retrieval anyway. */ @@ -1080,13 +1080,13 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel, continue; /* - * Ignore partial indexes that do not match the query. If a partial + * Ignore partial indexes that do not match the query. If a partial * index is marked predOK then we know it's OK. Otherwise, we have to * test whether the added clauses are sufficient to imply the * predicate. If so, we can use the index in the current context. * * We set useful_predicate to true iff the predicate was proven using - * the current set of clauses. This is needed to prevent matching a + * the current set of clauses. This is needed to prevent matching a * predOK index to an arm of an OR, which would be a legal but * pointlessly inefficient plan. (A better plan will be generated by * just scanning the predOK index alone, no OR.) @@ -1256,7 +1256,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel, * Given a nonempty list of bitmap paths, AND them into one path. * * This is a nontrivial decision since we can legally use any subset of the - * given path set. We want to choose a good tradeoff between selectivity + * given path set. We want to choose a good tradeoff between selectivity * and cost of computing the bitmap. * * The result is either a single one of the inputs, or a BitmapAndPath @@ -1283,12 +1283,12 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) * In theory we should consider every nonempty subset of the given paths. * In practice that seems like overkill, given the crude nature of the * estimates, not to mention the possible effects of higher-level AND and - * OR clauses. Moreover, it's completely impractical if there are a large + * OR clauses. Moreover, it's completely impractical if there are a large * number of paths, since the work would grow as O(2^N). * * As a heuristic, we first check for paths using exactly the same sets of * WHERE clauses + index predicate conditions, and reject all but the - * cheapest-to-scan in any such group. This primarily gets rid of indexes + * cheapest-to-scan in any such group. This primarily gets rid of indexes * that include the interesting columns but also irrelevant columns. (In * situations where the DBA has gone overboard on creating variant * indexes, this can make for a very large reduction in the number of @@ -1308,7 +1308,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) * costsize.c and clausesel.c aren't very smart about redundant clauses. * They will usually double-count the redundant clauses, producing a * too-small selectivity that makes a redundant AND step look like it - * reduces the total cost. Perhaps someday that code will be smarter and + * reduces the total cost. Perhaps someday that code will be smarter and * we can remove this limitation. (But note that this also defends * against flat-out duplicate input paths, which can happen because * match_join_clauses_to_index will find the same OR join clauses that @@ -1316,7 +1316,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) * of.) * * For the same reason, we reject AND combinations in which an index - * predicate clause duplicates another clause. Here we find it necessary + * predicate clause duplicates another clause. Here we find it necessary * to be even stricter: we'll reject a partial index if any of its * predicate clauses are implied by the set of WHERE clauses and predicate * clauses used so far. This covers cases such as a condition "x = 42" @@ -1379,7 +1379,7 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths) /* * For each surviving index, consider it as an "AND group leader", and see * whether adding on any of the later indexes results in an AND path with - * cheaper total cost than before. Then take the cheapest AND group. + * cheaper total cost than before. Then take the cheapest AND group. */ for (i = 0; i < npaths; i++) { @@ -1711,7 +1711,7 @@ find_indexpath_quals(Path *bitmapqual, List **quals, List **preds) /* * find_list_position * Return the given node's position (counting from 0) in the given - * list of nodes. If it's not equal() to any existing list member, + * list of nodes. If it's not equal() to any existing list member, * add it at the end, and return that position. */ static int @@ -1817,7 +1817,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index) * Since we produce parameterized paths before we've begun to generate join * relations, it's impossible to predict exactly how many times a parameterized * path will be iterated; we don't know the size of the relation that will be - * on the outside of the nestloop. However, we should try to account for + * on the outside of the nestloop. However, we should try to account for * multiple iterations somehow in costing the path. The heuristic embodied * here is to use the rowcount of the smallest other base relation needed in * the join clauses used by the path. (We could alternatively consider the @@ -2032,7 +2032,7 @@ match_clause_to_index(IndexOptInfo *index, * doesn't involve a volatile function or a Var of the index's relation. * In particular, Vars belonging to other relations of the query are * accepted here, since a clause of that form can be used in a - * parameterized indexscan. It's the responsibility of higher code levels + * parameterized indexscan. It's the responsibility of higher code levels * to manage restriction and join clauses appropriately. * * Note: we do need to check for Vars of the index's relation on the @@ -2056,7 +2056,7 @@ match_clause_to_index(IndexOptInfo *index, * It is also possible to match RowCompareExpr clauses to indexes (but * currently, only btree indexes handle this). In this routine we will * report a match if the first column of the row comparison matches the - * target index column. This is sufficient to guarantee that some index + * target index column. This is sufficient to guarantee that some index * condition can be constructed from the RowCompareExpr --- whether the * remaining columns match the index too is considered in * adjust_rowcompare_for_index(). @@ -2094,7 +2094,7 @@ match_clause_to_indexcol(IndexOptInfo *index, bool plain_op; /* - * Never match pseudoconstants to indexes. (Normally this could not + * Never match pseudoconstants to indexes. (Normally this could not * happen anyway, since a pseudoconstant clause couldn't contain a Var, * but what if someone builds an expression index on a constant? It's not * totally unreasonable to do so with a partial index, either.) @@ -2378,7 +2378,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys, * We allow any column of the index to match each pathkey; they * don't have to match left-to-right as you might expect. This is * correct for GiST, which is the sole existing AM supporting - * amcanorderbyop. We might need different logic in future for + * amcanorderbyop. We might need different logic in future for * other implementations. */ for (indexcol = 0; indexcol < index->ncolumns; indexcol++) @@ -2429,7 +2429,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys, * Note that we currently do not consider the collation of the ordering * operator's result. In practical cases the result type will be numeric * and thus have no collation, and it's not very clear what to match to - * if it did have a collation. The index's collation should match the + * if it did have a collation. The index's collation should match the * ordering operator's input collation, not its result. * * If successful, return 'clause' as-is if the indexkey is on the left, @@ -2679,7 +2679,7 @@ ec_member_matches_indexcol(PlannerInfo *root, RelOptInfo *rel, * if it is true. * 2. A list of expressions in this relation, and a corresponding list of * equality operators. The caller must have already checked that the operators - * represent equality. (Note: the operators could be cross-type; the + * represent equality. (Note: the operators could be cross-type; the * expressions should correspond to their RHS inputs.) * * The caller need only supply equality conditions arising from joins; @@ -2868,7 +2868,7 @@ match_index_to_operand(Node *operand, int indkey; /* - * Ignore any RelabelType node above the operand. This is needed to be + * Ignore any RelabelType node above the operand. This is needed to be * able to apply indexscanning in binary-compatible-operator cases. Note: * we can assume there is at most one RelabelType node; * eval_const_expressions() will have simplified if more than one. @@ -2935,10 +2935,10 @@ match_index_to_operand(Node *operand, * indexscan machinery. The key idea is that these operators allow us * to derive approximate indexscan qual clauses, such that any tuples * that pass the operator clause itself must also satisfy the simpler - * indexscan condition(s). Then we can use the indexscan machinery + * indexscan condition(s). Then we can use the indexscan machinery * to avoid scanning as much of the table as we'd otherwise have to, * while applying the original operator as a qpqual condition to ensure - * we deliver only the tuples we want. (In essence, we're using a regular + * we deliver only the tuples we want. (In essence, we're using a regular * index as if it were a lossy index.) * * An example of what we're doing is @@ -2952,7 +2952,7 @@ match_index_to_operand(Node *operand, * * Another thing that we do with this machinery is to provide special * smarts for "boolean" indexes (that is, indexes on boolean columns - * that support boolean equality). We can transform a plain reference + * that support boolean equality). We can transform a plain reference * to the indexkey into "indexkey = true", or "NOT indexkey" into * "indexkey = false", so as to make the expression indexable using the * regular index operators. (As of Postgres 8.1, we must do this here @@ -3374,7 +3374,7 @@ expand_indexqual_opclause(RestrictInfo *rinfo, Oid opfamily, Oid idxcollation) /* * LIKE and regex operators are not members of any btree index opfamily, * but they can be members of opfamilies for more exotic index types such - * as GIN. Therefore, we should only do expansion if the operator is + * as GIN. Therefore, we should only do expansion if the operator is * actually not in the opfamily. But checking that requires a syscache * lookup, so it's best to first see if the operator is one we are * interested in. @@ -3492,7 +3492,7 @@ expand_indexqual_rowcompare(RestrictInfo *rinfo, * column matches) or a simple OpExpr (if the first-column match is all * there is). In these cases the modified clause is always "<=" or ">=" * even when the original was "<" or ">" --- this is necessary to match all - * the rows that could match the original. (We are essentially building a + * the rows that could match the original. (We are essentially building a * lossy version of the row comparison when we do this.) * * *indexcolnos receives an integer list of the index column numbers (zero diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index a9961161dbc..be54f3de0ba 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -107,7 +107,7 @@ add_paths_to_joinrel(PlannerInfo *root, /* * If it's SEMI or ANTI join, compute correction factors for cost - * estimation. These will be the same for all paths. + * estimation. These will be the same for all paths. */ if (jointype == JOIN_SEMI || jointype == JOIN_ANTI) compute_semi_anti_join_factors(root, outerrel, innerrel, @@ -122,7 +122,7 @@ add_paths_to_joinrel(PlannerInfo *root, * to the parameter source rel instead of joining to the other input rel. * This restriction reduces the number of parameterized paths we have to * deal with at higher join levels, without compromising the quality of - * the resulting plan. We express the restriction as a Relids set that + * the resulting plan. We express the restriction as a Relids set that * must overlap the parameterization of any proposed join path. */ foreach(lc, root->join_info_list) @@ -155,7 +155,7 @@ add_paths_to_joinrel(PlannerInfo *root, * However, when a LATERAL subquery is involved, we have to be a bit * laxer, because there will simply not be any paths for the joinrel that * aren't parameterized by whatever the subquery is parameterized by, - * unless its parameterization is resolved within the joinrel. Hence, add + * unless its parameterization is resolved within the joinrel. Hence, add * to param_source_rels anything that is laterally referenced in either * input and is not in the join already. */ @@ -208,7 +208,7 @@ add_paths_to_joinrel(PlannerInfo *root, /* * 1. Consider mergejoin paths where both relations must be explicitly - * sorted. Skip this if we can't mergejoin. + * sorted. Skip this if we can't mergejoin. */ if (mergejoin_allowed) sort_inner_and_outer(root, joinrel, outerrel, innerrel, @@ -233,7 +233,7 @@ add_paths_to_joinrel(PlannerInfo *root, /* * 3. Consider paths where the inner relation need not be explicitly - * sorted. This includes mergejoins only (nestloops were already built in + * sorted. This includes mergejoins only (nestloops were already built in * match_unsorted_outer). * * Diked out as redundant 2/13/2000 -- tgl. There isn't any really @@ -507,7 +507,7 @@ try_hashjoin_path(PlannerInfo *root, * We already know that the clause is a binary opclause referencing only the * rels in the current join. The point here is to check whether it has the * form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr", - * rather than mixing outer and inner vars on either side. If it matches, + * rather than mixing outer and inner vars on either side. If it matches, * we set the transient flag outer_is_left to identify which side is which. */ static inline bool @@ -572,7 +572,7 @@ sort_inner_and_outer(PlannerInfo *root, * sort. * * This function intentionally does not consider parameterized input - * paths, except when the cheapest-total is parameterized. If we did so, + * paths, except when the cheapest-total is parameterized. If we did so, * we'd have a combinatorial explosion of mergejoin paths of dubious * value. This interacts with decisions elsewhere that also discriminate * against mergejoins with parameterized inputs; see comments in @@ -619,7 +619,7 @@ sort_inner_and_outer(PlannerInfo *root, * * Actually, it's not quite true that every mergeclause ordering will * generate a different path order, because some of the clauses may be - * partially redundant (refer to the same EquivalenceClasses). Therefore, + * partially redundant (refer to the same EquivalenceClasses). Therefore, * what we do is convert the mergeclause list to a list of canonical * pathkeys, and then consider different orderings of the pathkeys. * @@ -713,7 +713,7 @@ sort_inner_and_outer(PlannerInfo *root, * cheapest-total inner-indexscan path (if any), and one on the * cheapest-startup inner-indexscan path (if different). * - * We also consider mergejoins if mergejoin clauses are available. We have + * We also consider mergejoins if mergejoin clauses are available. We have * two ways to generate the inner path for a mergejoin: sort the cheapest * inner path, or use an inner path that is already suitably ordered for the * merge. If we have several mergeclauses, it could be that there is no inner @@ -845,8 +845,8 @@ match_unsorted_outer(PlannerInfo *root, /* * If we need to unique-ify the outer path, it's pointless to consider - * any but the cheapest outer. (XXX we don't consider parameterized - * outers, nor inners, for unique-ified cases. Should we?) + * any but the cheapest outer. (XXX we don't consider parameterized + * outers, nor inners, for unique-ified cases. Should we?) */ if (save_jointype == JOIN_UNIQUE_OUTER) { @@ -887,7 +887,7 @@ match_unsorted_outer(PlannerInfo *root, { /* * Consider nestloop joins using this outer path and various - * available paths for the inner relation. We consider the + * available paths for the inner relation. We consider the * cheapest-total paths for each available parameterization of the * inner relation, including the unparameterized case. */ @@ -1042,7 +1042,7 @@ match_unsorted_outer(PlannerInfo *root, /* * Look for an inner path ordered well enough for the first - * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified + * 'sortkeycnt' innersortkeys. NB: trialsortkeys list is modified * destructively, which is why we made a copy... */ trialsortkeys = list_truncate(trialsortkeys, sortkeycnt); diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index 05eaef525d5..610892890f5 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -213,7 +213,7 @@ join_search_one_level(PlannerInfo *root, int level) /*---------- * When special joins are involved, there may be no legal way - * to make an N-way join for some values of N. For example consider + * to make an N-way join for some values of N. For example consider * * SELECT ... FROM t1 WHERE * x IN (SELECT ... FROM t2,t3 WHERE ...) AND @@ -337,7 +337,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, ListCell *l; /* - * Ensure output params are set on failure return. This is just to + * Ensure output params are set on failure return. This is just to * suppress uninitialized-variable warnings from overly anal compilers. */ *sjinfo_p = NULL; @@ -345,7 +345,7 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, /* * If we have any special joins, the proposed join might be illegal; and - * in any case we have to determine its join type. Scan the join info + * in any case we have to determine its join type. Scan the join info * list for conflicts. */ match_sjinfo = NULL; @@ -609,7 +609,7 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2) /* * If it's a plain inner join, then we won't have found anything in - * join_info_list. Make up a SpecialJoinInfo so that selectivity + * join_info_list. Make up a SpecialJoinInfo so that selectivity * estimation functions will know what's being joined. */ if (sjinfo == NULL) @@ -916,7 +916,7 @@ have_join_order_restriction(PlannerInfo *root, * * Essentially, this tests whether have_join_order_restriction() could * succeed with this rel and some other one. It's OK if we sometimes - * say "true" incorrectly. (Therefore, we don't bother with the relatively + * say "true" incorrectly. (Therefore, we don't bother with the relatively * expensive has_legal_joinclause test.) */ static bool @@ -1027,7 +1027,7 @@ is_dummy_rel(RelOptInfo *rel) * dummy. * * Also, when called during GEQO join planning, we are in a short-lived - * memory context. We must make sure that the dummy path attached to a + * memory context. We must make sure that the dummy path attached to a * baserel survives the GEQO cycle, else the baserel is trashed for future * GEQO cycles. On the other hand, when we are marking a joinrel during GEQO, * we don't want the dummy path to clutter the main planning context. Upshot diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 9179c61cbdb..5d953dfb45a 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -46,7 +46,7 @@ static bool right_merge_direction(PlannerInfo *root, PathKey *pathkey); * entry if there's not one already. * * Note that this function must not be used until after we have completed - * merging EquivalenceClasses. (We don't try to enforce that here; instead, + * merging EquivalenceClasses. (We don't try to enforce that here; instead, * equivclass.c will complain if a merge occurs after root->canon_pathkeys * has become nonempty.) */ @@ -120,7 +120,7 @@ make_canonical_pathkey(PlannerInfo *root, * * Both the given pathkey and the list members must be canonical for this * to work properly, but that's okay since we no longer ever construct any - * non-canonical pathkeys. (Note: the notion of a pathkey *list* being + * non-canonical pathkeys. (Note: the notion of a pathkey *list* being * canonical includes the additional requirement of no redundant entries, * which is exactly what we are checking for here.) * @@ -162,7 +162,7 @@ pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys) * * If rel is not NULL, it identifies a specific relation we're considering * a path for, and indicates that child EC members for that relation can be - * considered. Otherwise child members are ignored. (See the comments for + * considered. Otherwise child members are ignored. (See the comments for * get_eclass_for_sort_expr.) * * create_it is TRUE if we should create any missing EquivalenceClass @@ -192,7 +192,7 @@ make_pathkey_from_sortinfo(PlannerInfo *root, /* * EquivalenceClasses need to contain opfamily lists based on the family * membership of mergejoinable equality operators, which could belong to - * more than one opfamily. So we have to look up the opfamily's equality + * more than one opfamily. So we have to look up the opfamily's equality * operator and get its membership. */ equality_op = get_opfamily_member(opfamily, @@ -355,7 +355,7 @@ get_cheapest_path_for_pathkeys(List *paths, List *pathkeys, /* * Since cost comparison is a lot cheaper than pathkey comparison, do - * that first. (XXX is that still true?) + * that first. (XXX is that still true?) */ if (matched_path != NULL && compare_path_costs(matched_path, path, cost_criterion) <= 0) @@ -397,7 +397,7 @@ get_cheapest_fractional_path_for_pathkeys(List *paths, /* * Since cost comparison is a lot cheaper than pathkey comparison, do - * that first. (XXX is that still true?) + * that first. (XXX is that still true?) */ if (matched_path != NULL && compare_fractional_path_costs(matched_path, path, fraction) <= 0) @@ -555,7 +555,7 @@ build_expression_pathkey(PlannerInfo *root, /* * convert_subquery_pathkeys * Build a pathkeys list that describes the ordering of a subquery's - * result, in the terms of the outer query. This is essentially a + * result, in the terms of the outer query. This is essentially a * task of conversion. * * 'rel': outer query's RelOptInfo for the subquery relation. @@ -608,7 +608,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, /* * Note: it might look funny to be setting sortref = 0 for a - * reference to a volatile sub_eclass. However, the + * reference to a volatile sub_eclass. However, the * expression is *not* volatile in the outer query: it's just * a Var referencing whatever the subquery emitted. (IOW, the * outer query isn't going to re-execute the volatile @@ -645,7 +645,7 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel, /* * Otherwise, the sub_pathkey's EquivalenceClass could contain * multiple elements (representing knowledge that multiple items - * are effectively equal). Each element might match none, one, or + * are effectively equal). Each element might match none, one, or * more of the output columns that are visible to the outer query. * This means we may have multiple possible representations of the * sub_pathkey in the context of the outer query. Ideally we @@ -873,7 +873,7 @@ make_pathkeys_for_sortclauses(PlannerInfo *root, * right sides. * * Note this is called before EC merging is complete, so the links won't - * necessarily point to canonical ECs. Before they are actually used for + * necessarily point to canonical ECs. Before they are actually used for * anything, update_mergeclause_eclasses must be called to ensure that * they've been updated to point to canonical ECs. */ @@ -1007,7 +1007,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, * It's possible that multiple matching clauses might have different * ECs on the other side, in which case the order we put them into our * result makes a difference in the pathkeys required for the other - * input path. However this routine hasn't got any info about which + * input path. However this routine hasn't got any info about which * order would be best, so we don't worry about that. * * It's also possible that the selected mergejoin clauses produce @@ -1038,7 +1038,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, /* * If we didn't find a mergeclause, we're done --- any additional - * sort-key positions in the pathkeys are useless. (But we can still + * sort-key positions in the pathkeys are useless. (But we can still * mergejoin if we found at least one mergeclause.) */ if (matched_restrictinfos == NIL) @@ -1070,7 +1070,7 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root, * Returns a pathkeys list that can be applied to the outer relation. * * Since we assume here that a sort is required, there is no particular use - * in matching any available ordering of the outerrel. (joinpath.c has an + * in matching any available ordering of the outerrel. (joinpath.c has an * entirely separate code path for considering sort-free mergejoins.) Rather, * it's interesting to try to match the requested query_pathkeys so that a * second output sort may be avoided; and failing that, we try to list "more @@ -1401,7 +1401,7 @@ pathkeys_useful_for_merging(PlannerInfo *root, RelOptInfo *rel, List *pathkeys) /* * If we didn't find a mergeclause, we're done --- any additional - * sort-key positions in the pathkeys are useless. (But we can still + * sort-key positions in the pathkeys are useless. (But we can still * mergejoin if we found at least one mergeclause.) */ if (matched) @@ -1431,7 +1431,7 @@ right_merge_direction(PlannerInfo *root, PathKey *pathkey) pathkey->pk_opfamily == query_pathkey->pk_opfamily) { /* - * Found a matching query sort column. Prefer this pathkey's + * Found a matching query sort column. Prefer this pathkey's * direction iff it matches. Note that we ignore pk_nulls_first, * which means that a sort might be needed anyway ... but we still * want to prefer only one of the two possible directions, and we @@ -1507,13 +1507,13 @@ truncate_useless_pathkeys(PlannerInfo *root, * useful according to truncate_useless_pathkeys(). * * This is a cheap test that lets us skip building pathkeys at all in very - * simple queries. It's OK to err in the direction of returning "true" when + * simple queries. It's OK to err in the direction of returning "true" when * there really aren't any usable pathkeys, but erring in the other direction * is bad --- so keep this in sync with the routines above! * * We could make the test more complex, for example checking to see if any of * the joinclauses are really mergejoinable, but that likely wouldn't win - * often enough to repay the extra cycles. Queries with neither a join nor + * often enough to repay the extra cycles. Queries with neither a join nor * a sort are reasonably common, though, so this much work seems worthwhile. */ bool diff --git a/src/backend/optimizer/path/tidpath.c b/src/backend/optimizer/path/tidpath.c index a751a7d36cd..a31d67493bb 100644 --- a/src/backend/optimizer/path/tidpath.c +++ b/src/backend/optimizer/path/tidpath.c @@ -19,7 +19,7 @@ * representation all the way through to execution. * * There is currently no special support for joins involving CTID; in - * particular nothing corresponding to best_inner_indexscan(). Since it's + * particular nothing corresponding to best_inner_indexscan(). Since it's * not very useful to store TIDs of one table in another table, there * doesn't seem to be enough use-case to justify adding a lot of code * for that. @@ -57,7 +57,7 @@ static List *TidQualFromRestrictinfo(List *restrictinfo, int varno); * or * pseudoconstant = CTID * - * We check that the CTID Var belongs to relation "varno". That is probably + * We check that the CTID Var belongs to relation "varno". That is probably * redundant considering this is only applied to restriction clauses, but * let's be safe. */ diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c index 523a1e75f89..129fc3dfae6 100644 --- a/src/backend/optimizer/plan/analyzejoins.c +++ b/src/backend/optimizer/plan/analyzejoins.c @@ -40,7 +40,7 @@ static List *remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved); * Check for relations that don't actually need to be joined at all, * and remove them from the query. * - * We are passed the current joinlist and return the updated list. Other + * We are passed the current joinlist and return the updated list. Other * data structures that have to be updated are accessible via "root". */ List * @@ -90,7 +90,7 @@ restart: * Restart the scan. This is necessary to ensure we find all * removable joins independently of ordering of the join_info_list * (note that removal of attr_needed bits may make a join appear - * removable that did not before). Also, since we just deleted the + * removable that did not before). Also, since we just deleted the * current list cell, we'd have to have some kluge to continue the * list scan anyway. */ @@ -107,7 +107,7 @@ restart: * We already know that the clause is a binary opclause referencing only the * rels in the current join. The point here is to check whether it has the * form "outerrel_expr op innerrel_expr" or "innerrel_expr op outerrel_expr", - * rather than mixing outer and inner vars on either side. If it matches, + * rather than mixing outer and inner vars on either side. If it matches, * we set the transient flag outer_is_left to identify which side is which. */ static inline bool @@ -154,7 +154,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo) /* * Currently, we only know how to remove left joins to a baserel with - * unique indexes. We can check most of these criteria pretty trivially + * unique indexes. We can check most of these criteria pretty trivially * to avoid doing useless extra work. But checking whether any of the * indexes are unique would require iterating over the indexlist, so for * now we just make sure there are indexes of some sort or other. If none @@ -203,7 +203,7 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo) * actually references some inner-rel attributes; but the correct check * for that is relatively expensive, so we first check against ph_eval_at, * which must mention the inner rel if the PHV uses any inner-rel attrs as - * non-lateral references. Note that if the PHV's syntactic scope is just + * non-lateral references. Note that if the PHV's syntactic scope is just * the inner rel, we can't drop the rel even if the PHV is variable-free. */ foreach(l, root->placeholder_list) diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 784805fbf43..4b641a2ca1f 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -171,7 +171,7 @@ static Material *make_material(Plan *lefttree); /* * create_plan * Creates the access plan for a query by recursively processing the - * desired tree of pathnodes, starting at the node 'best_path'. For + * desired tree of pathnodes, starting at the node 'best_path'. For * every pathnode found, we create a corresponding plan node containing * appropriate id, target list, and qualification information. * @@ -286,7 +286,7 @@ create_scan_plan(PlannerInfo *root, Path *best_path) /* * For table scans, rather than using the relation targetlist (which is * only those Vars actually needed by the query), we prefer to generate a - * tlist containing all Vars in order. This will allow the executor to + * tlist containing all Vars in order. This will allow the executor to * optimize away projection of the table tuples, if possible. (Note that * planner.c may replace the tlist we generate here, forcing projection to * occur.) @@ -523,7 +523,7 @@ use_physical_tlist(PlannerInfo *root, RelOptInfo *rel) * * If the plan node immediately above a scan would prefer to get only * needed Vars and not a physical tlist, it must call this routine to - * undo the decision made by use_physical_tlist(). Currently, Hash, Sort, + * undo the decision made by use_physical_tlist(). Currently, Hash, Sort, * and Material nodes want this, so they don't have to store useless columns. */ static void @@ -654,7 +654,7 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path) /* * * Expensive function pullups may have pulled local predicates * into - * this path node. Put them in the qpqual of the plan node. * JMH, + * this path node. Put them in the qpqual of the plan node. * JMH, * 6/15/92 */ if (get_loc_restrictinfo(best_path) != NIL) @@ -1170,10 +1170,10 @@ create_indexscan_plan(PlannerInfo *root, /* * The qpqual list must contain all restrictions not automatically handled * by the index, other than pseudoconstant clauses which will be handled - * by a separate gating plan node. All the predicates in the indexquals + * by a separate gating plan node. All the predicates in the indexquals * will be checked (either by the index itself, or by nodeIndexscan.c), * but if there are any "special" operators involved then they must be - * included in qpqual. The upshot is that qpqual must contain + * included in qpqual. The upshot is that qpqual must contain * scan_clauses minus whatever appears in indexquals. * * In normal cases simple pointer equality checks will be enough to spot @@ -1310,15 +1310,15 @@ create_bitmap_scan_plan(PlannerInfo *root, /* * The qpqual list must contain all restrictions not automatically handled * by the index, other than pseudoconstant clauses which will be handled - * by a separate gating plan node. All the predicates in the indexquals + * by a separate gating plan node. All the predicates in the indexquals * will be checked (either by the index itself, or by * nodeBitmapHeapscan.c), but if there are any "special" operators - * involved then they must be added to qpqual. The upshot is that qpqual + * involved then they must be added to qpqual. The upshot is that qpqual * must contain scan_clauses minus whatever appears in indexquals. * * This loop is similar to the comparable code in create_indexscan_plan(), * but with some differences because it has to compare the scan clauses to - * stripped (no RestrictInfos) indexquals. See comments there for more + * stripped (no RestrictInfos) indexquals. See comments there for more * info. * * In normal cases simple equal() checks will be enough to spot duplicate @@ -1363,7 +1363,7 @@ create_bitmap_scan_plan(PlannerInfo *root, /* * When dealing with special operators, we will at this point have - * duplicate clauses in qpqual and bitmapqualorig. We may as well drop + * duplicate clauses in qpqual and bitmapqualorig. We may as well drop * 'em from bitmapqualorig, since there's no point in making the tests * twice. */ @@ -1475,7 +1475,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, /* * Here, we only detect qual-free subplans. A qual-free subplan would * cause us to generate "... OR true ..." which we may as well reduce - * to just "true". We do not try to eliminate redundant subclauses + * to just "true". We do not try to eliminate redundant subclauses * because (a) it's not as likely as in the AND case, and (b) we might * well be working with hundreds or even thousands of OR conditions, * perhaps from a long IN list. The performance of list_append_unique @@ -1571,7 +1571,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual, /* * We know that the index predicate must have been implied by the * query condition as a whole, but it may or may not be implied by - * the conditions that got pushed into the bitmapqual. Avoid + * the conditions that got pushed into the bitmapqual. Avoid * generating redundant conditions. */ if (!predicate_implied_by(list_make1(pred), ipath->indexclauses)) @@ -1954,14 +1954,14 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path, Assert(rte->rtekind == RTE_RELATION); /* - * Sort clauses into best execution order. We do this first since the FDW + * Sort clauses into best execution order. We do this first since the FDW * might have more info than we do and wish to adjust the ordering. */ scan_clauses = order_qual_clauses(root, scan_clauses); /* * Let the FDW perform its processing on the restriction clauses and - * generate the plan node. Note that the FDW might remove restriction + * generate the plan node. Note that the FDW might remove restriction * clauses that it intends to execute remotely, or even add more (if it * has selected some join clauses for remote use but also wants them * rechecked locally). @@ -2615,7 +2615,7 @@ replace_nestloop_params_mutator(Node *node, PlannerInfo *root) * * Note that after doing this, we might have different * representations of the contents of the same PHV in different - * parts of the plan tree. This is OK because equal() will just + * parts of the plan tree. This is OK because equal() will just * match on phid/phlevelsup, so setrefs.c will still recognize an * upper-level reference to a lower-level copy of the same PHV. */ @@ -2793,7 +2793,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path) /* * Check to see if the indexkey is on the right; if so, commute - * the clause. The indexkey should be the side that refers to + * the clause. The indexkey should be the side that refers to * (only) the base relation. */ if (!bms_equal(rinfo->left_relids, index->rel->relids)) @@ -2887,7 +2887,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path) * * This is a simplified version of fix_indexqual_references. The input does * not have RestrictInfo nodes, and we assume that indxpath.c already - * commuted the clauses to put the index keys on the left. Also, we don't + * commuted the clauses to put the index keys on the left. Also, we don't * bother to support any cases except simple OpExprs, since nothing else * is allowed for ordering operators. */ @@ -3126,7 +3126,7 @@ order_qual_clauses(PlannerInfo *root, List *clauses) /* * Sort. We don't use qsort() because it's not guaranteed stable for - * equal keys. The expected number of entries is small enough that a + * equal keys. The expected number of entries is small enough that a * simple insertion sort should be good enough. */ for (i = 1; i < nitems; i++) @@ -3771,7 +3771,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols, * prepare_sort_from_pathkeys * Prepare to sort according to given pathkeys * - * This is used to set up for both Sort and MergeAppend nodes. It calculates + * This is used to set up for both Sort and MergeAppend nodes. It calculates * the executor's representation of the sort key information, and adjusts the * plan targetlist if needed to add resjunk sort columns. * @@ -3784,7 +3784,7 @@ make_sort(PlannerInfo *root, Plan *lefttree, int numCols, * * We must convert the pathkey information into arrays of sort key column * numbers, sort operator OIDs, collation OIDs, and nulls-first flags, - * which is the representation the executor wants. These are returned into + * which is the representation the executor wants. These are returned into * the output parameters *p_numsortkeys etc. * * When looking for matches to an EquivalenceClass's members, we will only @@ -4229,7 +4229,7 @@ make_material(Plan *lefttree) * materialize_finished_plan: stick a Material node atop a completed plan * * There are a couple of places where we want to attach a Material node - * after completion of subquery_planner(). This currently requires hackery. + * after completion of subquery_planner(). This currently requires hackery. * Since subquery_planner has already run SS_finalize_plan on the subplan * tree, we have to kluge up parameter lists for the Material node. * Possibly this could be fixed by postponing SS_finalize_plan processing @@ -4435,7 +4435,7 @@ make_group(PlannerInfo *root, /* * distinctList is a list of SortGroupClauses, identifying the targetlist items - * that should be considered by the Unique filter. The input path must + * that should be considered by the Unique filter. The input path must * already be sorted accordingly. */ Unique * @@ -4453,7 +4453,7 @@ make_unique(Plan *lefttree, List *distinctList) /* * Charge one cpu_operator_cost per comparison per input tuple. We assume - * all columns get compared at most of the tuples. (XXX probably this is + * all columns get compared at most of the tuples. (XXX probably this is * an overestimate.) */ plan->total_cost += cpu_operator_cost * plan->plan_rows * numCols; diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c index b57bfd21760..f88e493edb8 100644 --- a/src/backend/optimizer/plan/initsplan.c +++ b/src/backend/optimizer/plan/initsplan.c @@ -87,12 +87,12 @@ static void check_hashjoinable(RestrictInfo *restrictinfo); * appearing in the jointree. * * The initial invocation must pass root->parse->jointree as the value of - * jtnode. Internally, the function recurses through the jointree. + * jtnode. Internally, the function recurses through the jointree. * * At the end of this process, there should be one baserel RelOptInfo for * every non-join RTE that is used in the query. Therefore, this routine * is the only place that should call build_simple_rel with reloptkind - * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build + * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build * "other rel" RelOptInfos for the members of any appendrels we find here.) */ void @@ -234,10 +234,10 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars, * means setting suitable where_needed values for them. * * Note that this only deals with lateral references in unflattened LATERAL - * subqueries. When we flatten a LATERAL subquery, its lateral references + * subqueries. When we flatten a LATERAL subquery, its lateral references * become plain Vars in the parent query, but they may have to be wrapped in * PlaceHolderVars if they need to be forced NULL by outer joins that don't - * also null the LATERAL subquery. That's all handled elsewhere. + * also null the LATERAL subquery. That's all handled elsewhere. * * This has to run before deconstruct_jointree, since it might result in * creation of PlaceHolderInfos. @@ -360,7 +360,7 @@ extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex) /* * We mark the Vars as being "needed" at the LATERAL RTE. This is a bit * of a cheat: a more formal approach would be to mark each one as needed - * at the join of the LATERAL RTE with its source RTE. But it will work, + * at the join of the LATERAL RTE with its source RTE. But it will work, * and it's much less tedious than computing a separate where_needed for * each Var. */ @@ -568,7 +568,7 @@ create_lateral_join_info(PlannerInfo *root) * add_lateral_info * Add a LateralJoinInfo to root->lateral_info_list, if needed * - * We suppress redundant list entries. The passed Relids are copied if saved. + * We suppress redundant list entries. The passed Relids are copied if saved. */ static void add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs) @@ -615,7 +615,7 @@ add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs) * deconstruct_jointree * Recursively scan the query's join tree for WHERE and JOIN/ON qual * clauses, and add these to the appropriate restrictinfo and joininfo - * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes + * lists belonging to base RelOptInfos. Also, add SpecialJoinInfo nodes * to root->join_info_list for any outer joins appearing in the query tree. * Return a "joinlist" data structure showing the join order decisions * that need to be made by make_one_rel(). @@ -632,9 +632,9 @@ add_lateral_info(PlannerInfo *root, Relids lhs, Relids rhs) * be evaluated at the lowest level where all the variables it mentions are * available. However, we cannot push a qual down into the nullable side(s) * of an outer join since the qual might eliminate matching rows and cause a - * NULL row to be incorrectly emitted by the join. Therefore, we artificially + * NULL row to be incorrectly emitted by the join. Therefore, we artificially * OR the minimum-relids of such an outer join into the required_relids of - * clauses appearing above it. This forces those clauses to be delayed until + * clauses appearing above it. This forces those clauses to be delayed until * application of the outer join (or maybe even higher in the join tree). */ List * @@ -755,7 +755,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join, *inner_join_rels = *qualscope; /* - * Try to process any quals postponed by children. If they need + * Try to process any quals postponed by children. If they need * further postponement, add them to my output postponed_qual_list. */ foreach(l, child_postponed_quals) @@ -807,7 +807,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join, * regard for whether this level is an outer join, which is correct. * Then we place our own join quals, which are restricted by lower * outer joins in any case, and are forced to this level if this is an - * outer join and they mention the outer side. Finally, if this is an + * outer join and they mention the outer side. Finally, if this is an * outer join, we create a join_info_list entry for the join. This * will prevent quals above us in the join tree that use those rels * from being pushed down below this level. (It's okay for upper @@ -897,7 +897,7 @@ deconstruct_recurse(PlannerInfo *root, Node *jtnode, bool below_outer_join, nullable_rels); /* - * Try to process any quals postponed by children. If they need + * Try to process any quals postponed by children. If they need * further postponement, add them to my output postponed_qual_list. * Quals that can be processed now must be included in my_quals, so * that they'll be handled properly in make_outerjoininfo. @@ -1059,7 +1059,7 @@ make_outerjoininfo(PlannerInfo *root, * complain if any nullable rel is FOR [KEY] UPDATE/SHARE. * * You might be wondering why this test isn't made far upstream in the - * parser. It's because the parser hasn't got enough info --- consider + * parser. It's because the parser hasn't got enough info --- consider * FOR UPDATE applied to a view. Only after rewriting and flattening do * we know whether the view contains an outer join. * @@ -1074,8 +1074,8 @@ make_outerjoininfo(PlannerInfo *root, (jointype == JOIN_FULL && bms_is_member(rc->rti, left_rels))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s cannot be applied to the nullable side of an outer join", LCS_asString(rc->strength)))); } @@ -1117,7 +1117,7 @@ make_outerjoininfo(PlannerInfo *root, min_lefthand = bms_intersect(clause_relids, left_rels); /* - * Similarly for required RHS. But here, we must also include any lower + * Similarly for required RHS. But here, we must also include any lower * inner joins, to ensure we don't try to commute with any of them. */ min_righthand = bms_int_members(bms_union(clause_relids, inner_join_rels), @@ -1169,7 +1169,7 @@ make_outerjoininfo(PlannerInfo *root, * Here, we have to consider that "our join condition" includes any * clauses that syntactically appeared above the lower OJ and below * ours; those are equivalent to degenerate clauses in our OJ and must - * be treated as such. Such clauses obviously can't reference our + * be treated as such. Such clauses obviously can't reference our * LHS, and they must be non-strict for the lower OJ's RHS (else * reduce_outer_joins would have reduced the lower OJ to a plain * join). Hence the other ways in which we handle clauses within our @@ -1248,7 +1248,7 @@ make_outerjoininfo(PlannerInfo *root, * distribute_qual_to_rels * Add clause information to either the baserestrictinfo or joininfo list * (depending on whether the clause is a join) of each base relation - * mentioned in the clause. A RestrictInfo node is created and added to + * mentioned in the clause. A RestrictInfo node is created and added to * the appropriate list for each rel. Alternatively, if the clause uses a * mergejoinable operator and is not delayed by outer-join rules, enter * the left- and right-side expressions into the query's list of @@ -1313,7 +1313,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * In ordinary SQL, a WHERE or JOIN/ON clause can't reference any rels * that aren't within its syntactic scope; however, if we pulled up a * LATERAL subquery then we might find such references in quals that have - * been pulled up. We need to treat such quals as belonging to the join + * been pulled up. We need to treat such quals as belonging to the join * level that includes every rel they reference. Although we could make * pull_up_subqueries() place such quals correctly to begin with, it's * easier to handle it here. When we find a clause that contains Vars @@ -1357,10 +1357,10 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * gating Result plan node. We put such a clause into the regular * RestrictInfo lists for the moment, but eventually createplan.c will * pull it out and make a gating Result node immediately above whatever - * plan node the pseudoconstant clause is assigned to. It's usually best + * plan node the pseudoconstant clause is assigned to. It's usually best * to put a gating node as high in the plan tree as possible. If we are * not below an outer join, we can actually push the pseudoconstant qual - * all the way to the top of the tree. If we are below an outer join, we + * all the way to the top of the tree. If we are below an outer join, we * leave the qual at its original syntactic level (we could push it up to * just below the outer join, but that seems more complex than it's * worth). @@ -1414,7 +1414,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * Note: it is not immediately obvious that a simple boolean is enough * for this: if for some reason we were to attach a degenerate qual to * its original join level, it would need to be treated as an outer join - * qual there. However, this cannot happen, because all the rels the + * qual there. However, this cannot happen, because all the rels the * clause mentions must be in the outer join's min_righthand, therefore * the join it needs must be formed before the outer join; and we always * attach quals to the lowest level where they can be evaluated. But @@ -1448,7 +1448,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * We can't use such a clause to deduce equivalence (the left and * right sides might be unequal above the join because one of them has * gone to NULL) ... but we might be able to use it for more limited - * deductions, if it is mergejoinable. So consider adding it to the + * deductions, if it is mergejoinable. So consider adding it to the * lists of set-aside outer-join clauses. */ is_pushed_down = false; @@ -1478,7 +1478,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, else { /* - * Normal qual clause or degenerate outer-join clause. Either way, we + * Normal qual clause or degenerate outer-join clause. Either way, we * can mark it as pushed-down. */ is_pushed_down = true; @@ -1598,7 +1598,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * * In all cases, it's important to initialize the left_ec and right_ec * fields of a mergejoinable clause, so that all possibly mergejoinable - * expressions have representations in EquivalenceClasses. If + * expressions have representations in EquivalenceClasses. If * process_equivalence is successful, it will take care of that; * otherwise, we have to call initialize_mergeclause_eclasses to do it. */ @@ -1674,7 +1674,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * For an is_pushed_down qual, we can evaluate the qual as soon as (1) we have * all the rels it mentions, and (2) we are at or above any outer joins that * can null any of these rels and are below the syntactic location of the - * given qual. We must enforce (2) because pushing down such a clause below + * given qual. We must enforce (2) because pushing down such a clause below * the OJ might cause the OJ to emit null-extended rows that should not have * been formed, or that should have been rejected by the clause. (This is * only an issue for non-strict quals, since if we can prove a qual mentioning @@ -1700,7 +1700,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause, * required relids overlap the LHS too) causes that OJ's delay_upper_joins * flag to be set TRUE. This will prevent any higher-level OJs from * being interchanged with that OJ, which would result in not having any - * correct place to evaluate the qual. (The case we care about here is a + * correct place to evaluate the qual. (The case we care about here is a * sub-select WHERE clause within the RHS of some outer join. The WHERE * clause must effectively be treated as a degenerate clause of that outer * join's condition. Rather than trying to match such clauses with joins @@ -1928,7 +1928,7 @@ distribute_restrictinfo_to_rels(PlannerInfo *root, * that provides all its variables. * * "nullable_relids" is the set of relids used in the expressions that are - * potentially nullable below the expressions. (This has to be supplied by + * potentially nullable below the expressions. (This has to be supplied by * caller because this function is used after deconstruct_jointree, so we * don't have knowledge of where the clause items came from.) * @@ -2098,7 +2098,7 @@ check_mergejoinable(RestrictInfo *restrictinfo) * info fields in the restrictinfo. * * Currently, we support hashjoin for binary opclauses where - * the operator is a hashjoinable operator. The arguments can be + * the operator is a hashjoinable operator. The arguments can be * anything --- as long as there are no volatile functions in them. */ static void diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c index 7937ff00e05..94ca92d78e7 100644 --- a/src/backend/optimizer/plan/planagg.c +++ b/src/backend/optimizer/plan/planagg.c @@ -10,9 +10,9 @@ * ORDER BY col ASC/DESC * LIMIT 1) * Given a suitable index on tab.col, this can be much faster than the - * generic scan-all-the-rows aggregation plan. We can handle multiple + * generic scan-all-the-rows aggregation plan. We can handle multiple * MIN/MAX aggregates by generating multiple subqueries, and their - * orderings can be different. However, if the query contains any + * orderings can be different. However, if the query contains any * non-optimizable aggregates, there's no point since we'll have to * scan all the rows anyway. * @@ -128,7 +128,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist) /* * Scan the tlist and HAVING qual to find all the aggregates and verify - * all are MIN/MAX aggregates. Stop as soon as we find one that isn't. + * all are MIN/MAX aggregates. Stop as soon as we find one that isn't. */ aggs_list = NIL; if (find_minmax_aggs_walker((Node *) tlist, &aggs_list)) @@ -163,7 +163,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist) * We can use either an ordering that gives NULLS FIRST or one that * gives NULLS LAST; furthermore there's unlikely to be much * performance difference between them, so it doesn't seem worth - * costing out both ways if we get a hit on the first one. NULLS + * costing out both ways if we get a hit on the first one. NULLS * FIRST is more likely to be available if the operator is a * reverse-sort operator, so try that first if reverse. */ diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c index 3ea916f1661..93484a0cd59 100644 --- a/src/backend/optimizer/plan/planmain.c +++ b/src/backend/optimizer/plan/planmain.c @@ -33,7 +33,7 @@ * which may involve joins but not any fancier features. * * Since query_planner does not handle the toplevel processing (grouping, - * sorting, etc) it cannot select the best path by itself. Instead, it + * sorting, etc) it cannot select the best path by itself. Instead, it * returns the RelOptInfo for the top level of joining, and the caller * (grouping_planner) can choose one of the surviving paths for the rel. * Normally it would choose either the rel's cheapest path, or the cheapest @@ -63,7 +63,7 @@ query_planner(PlannerInfo *root, List *tlist, /* * If the query has an empty join tree, then it's something easy like - * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly. + * "SELECT 2+2;" or "INSERT ... VALUES()". Fall through quickly. */ if (parse->jointree->fromlist == NIL) { @@ -129,7 +129,7 @@ query_planner(PlannerInfo *root, List *tlist, /* * Examine the targetlist and join tree, adding entries to baserel * targetlists for all referenced Vars, and generating PlaceHolderInfo - * entries for all referenced PlaceHolderVars. Restrict and join clauses + * entries for all referenced PlaceHolderVars. Restrict and join clauses * are added to appropriate lists belonging to the mentioned relations. We * also build EquivalenceClasses for provably equivalent expressions. The * SpecialJoinInfo list is also built to hold information about join order @@ -153,7 +153,7 @@ query_planner(PlannerInfo *root, List *tlist, /* * If we formed any equivalence classes, generate additional restriction - * clauses as appropriate. (Implied join clauses are formed on-the-fly + * clauses as appropriate. (Implied join clauses are formed on-the-fly * later.) */ generate_base_implied_equalities(root); @@ -168,14 +168,14 @@ query_planner(PlannerInfo *root, List *tlist, /* * Examine any "placeholder" expressions generated during subquery pullup. * Make sure that the Vars they need are marked as needed at the relevant - * join level. This must be done before join removal because it might + * join level. This must be done before join removal because it might * cause Vars or placeholders to be needed above a join when they weren't * so marked before. */ fix_placeholder_input_needed_levels(root); /* - * Remove any useless outer joins. Ideally this would be done during + * Remove any useless outer joins. Ideally this would be done during * jointree preprocessing, but the necessary information isn't available * until we've built baserel data structures and classified qual clauses. */ diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 0508d16902b..0f1e2e46802 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -192,7 +192,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) /* * We document cursor_tuple_fraction as simply being a fraction, which - * means the edge cases 0 and 1 have to be treated specially here. We + * means the edge cases 0 and 1 have to be treated specially here. We * convert 1 to 0 ("all the tuples") and 0 to a very small fraction. */ if (tuple_fraction >= 1.0) @@ -386,7 +386,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse, } /* - * Preprocess RowMark information. We need to do this after subquery + * Preprocess RowMark information. We need to do this after subquery * pullup (so that all non-inherited RTEs are present) and before * inheritance expansion (so that the info is available for * expand_inherited_tables to examine and modify). @@ -506,7 +506,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse, * to execute that we're better off doing it only once per group, despite * the loss of selectivity. This is hard to estimate short of doing the * entire planning process twice, so we use a heuristic: clauses - * containing subplans are left in HAVING. Otherwise, we move or copy the + * containing subplans are left in HAVING. Otherwise, we move or copy the * HAVING clause into WHERE, in hopes of eliminating tuples before * aggregation instead of after. * @@ -916,8 +916,8 @@ inheritance_planner(PlannerInfo *root) subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ ); /* - * Planning may have modified the query result relation (if there - * were security barrier quals on the result RTE). + * Planning may have modified the query result relation (if there were + * security barrier quals on the result RTE). */ appinfo->child_relid = subroot.parse->resultRelation; @@ -940,7 +940,8 @@ inheritance_planner(PlannerInfo *root) else { List *tmp_rtable = NIL; - ListCell *cell1, *cell2; + ListCell *cell1, + *cell2; /* * Check to see if any of the original RTEs were turned into @@ -1108,7 +1109,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * If there's a top-level ORDER BY, assume we have to fetch all the - * tuples. This might be too simplistic given all the hackery below + * tuples. This might be too simplistic given all the hackery below * to possibly avoid the sort; but the odds of accurate estimates here * are pretty low anyway. */ @@ -1135,7 +1136,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * We should not need to call preprocess_targetlist, since we must be - * in a SELECT query node. Instead, use the targetlist returned by + * in a SELECT query node. Instead, use the targetlist returned by * plan_set_operations (since this tells whether it returned any * resjunk columns!), and transfer any sort key information from the * original tlist. @@ -1152,11 +1153,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) if (parse->rowMarks) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - /*------ - translator: %s is a SQL row locking clause such as FOR UPDATE */ + /*------ + translator: %s is a SQL row locking clause such as FOR UPDATE */ errmsg("%s is not allowed with UNION/INTERSECT/EXCEPT", LCS_asString(((RowMarkClause *) - linitial(parse->rowMarks))->strength)))); + linitial(parse->rowMarks))->strength)))); /* * Calculate pathkeys that represent result ordering requirements @@ -1279,7 +1280,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * Generate the best unsorted and presorted paths for this Query (but - * note there may not be any presorted paths). We also generate (in + * note there may not be any presorted paths). We also generate (in * standard_qp_callback) pathkey representations of the query's sort * clause, distinct clause, etc. */ @@ -1314,7 +1315,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * In GROUP BY mode, an absolute LIMIT is relative to the number - * of groups not the number of tuples. If the caller gave us a + * of groups not the number of tuples. If the caller gave us a * fraction, keep it as-is. (In both cases, we are effectively * assuming that all the groups are about the same size.) */ @@ -1673,7 +1674,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * Furthermore, there cannot be any variables in either HAVING * or the targetlist, so we actually do not need the FROM * table at all! We can just throw away the plan-so-far and - * generate a Result node. This is a sufficiently unusual + * generate a Result node. This is a sufficiently unusual * corner case that it's not worth contorting the structure of * this routine to avoid having to generate the plan in the * first place. @@ -1717,14 +1718,14 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * The "base" targetlist for all steps of the windowing process is - * a flat tlist of all Vars and Aggs needed in the result. (In + * a flat tlist of all Vars and Aggs needed in the result. (In * some cases we wouldn't need to propagate all of these all the * way to the top, since they might only be needed as inputs to * WindowFuncs. It's probably not worth trying to optimize that * though.) We also add window partitioning and sorting * expressions to the base tlist, to ensure they're computed only * once at the bottom of the stack (that's critical for volatile - * functions). As we climb up the stack, we'll add outputs for + * functions). As we climb up the stack, we'll add outputs for * the WindowFuncs computed at each level. */ window_tlist = make_windowInputTargetList(root, @@ -1733,7 +1734,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) /* * The copyObject steps here are needed to ensure that each plan - * node has a separately modifiable tlist. (XXX wouldn't a + * node has a separately modifiable tlist. (XXX wouldn't a * shallow list copy do for that?) */ result_plan->targetlist = (List *) copyObject(window_tlist); @@ -2018,7 +2019,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * * Once grouping_planner() has applied a general tlist to the topmost * scan/join plan node, any tlist eval cost for added-on nodes should be - * accounted for as we create those nodes. Presently, of the node types we + * accounted for as we create those nodes. Presently, of the node types we * can add on later, only Agg, WindowAgg, and Group project new tlists (the * rest just copy their input tuples) --- so make_agg(), make_windowagg() and * make_group() are responsible for calling this function to account for their @@ -2150,7 +2151,7 @@ preprocess_rowmarks(PlannerInfo *root) * insufficient because of rule substitution, query pullup, etc. */ CheckSelectLocking(parse, ((RowMarkClause *) - linitial(parse->rowMarks))->strength); + linitial(parse->rowMarks))->strength); } else { @@ -2184,7 +2185,7 @@ preprocess_rowmarks(PlannerInfo *root) /* * Currently, it is syntactically impossible to have FOR UPDATE et al - * applied to an update/delete target rel. If that ever becomes + * applied to an update/delete target rel. If that ever becomes * possible, we should drop the target from the PlanRowMark list. */ Assert(rc->rti != parse->resultRelation); @@ -2268,7 +2269,7 @@ preprocess_rowmarks(PlannerInfo *root) * preprocess_limit - do pre-estimation for LIMIT and/or OFFSET clauses * * We try to estimate the values of the LIMIT/OFFSET clauses, and pass the - * results back in *count_est and *offset_est. These variables are set to + * results back in *count_est and *offset_est. These variables are set to * 0 if the corresponding clause is not present, and -1 if it's present * but we couldn't estimate the value for it. (The "0" convention is OK * for OFFSET but a little bit bogus for LIMIT: effectively we estimate @@ -2277,7 +2278,7 @@ preprocess_rowmarks(PlannerInfo *root) * be passed to make_limit, which see if you change this code. * * The return value is the suitably adjusted tuple_fraction to use for - * planning the query. This adjustment is not overridable, since it reflects + * planning the query. This adjustment is not overridable, since it reflects * plan actions that grouping_planner() will certainly take, not assumptions * about context. */ @@ -2401,7 +2402,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, else if (*offset_est != 0 && tuple_fraction > 0.0) { /* - * We have an OFFSET but no LIMIT. This acts entirely differently + * We have an OFFSET but no LIMIT. This acts entirely differently * from the LIMIT case: here, we need to increase rather than decrease * the caller's tuple_fraction, because the OFFSET acts to cause more * tuples to be fetched instead of fewer. This only matters if we got @@ -2416,7 +2417,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, /* * If we have absolute counts from both caller and OFFSET, add them - * together; likewise if they are both fractional. If one is + * together; likewise if they are both fractional. If one is * fractional and the other absolute, we want to take the larger, and * we heuristically assume that's the fractional one. */ @@ -2457,7 +2458,7 @@ preprocess_limit(PlannerInfo *root, double tuple_fraction, * * If we have constant-zero OFFSET and constant-null LIMIT, we can skip adding * a Limit node. This is worth checking for because "OFFSET 0" is a common - * locution for an optimization fence. (Because other places in the planner + * locution for an optimization fence. (Because other places in the planner * merely check whether parse->limitOffset isn't NULL, it will still work as * an optimization fence --- we're just suppressing unnecessary run-time * overhead.) @@ -2700,7 +2701,7 @@ choose_hashed_grouping(PlannerInfo *root, /* * Executor doesn't support hashed aggregation with DISTINCT or ORDER BY - * aggregates. (Doing so would imply storing *all* the input values in + * aggregates. (Doing so would imply storing *all* the input values in * the hash table, and/or running many sorts in parallel, either of which * seems like a certain loser.) We similarly don't support ordered-set * aggregates in hashed aggregation, but that case is included in the @@ -2840,7 +2841,7 @@ choose_hashed_grouping(PlannerInfo *root, * pass in the costs as individual variables.) * * But note that making the two choices independently is a bit bogus in - * itself. If the two could be combined into a single choice operation + * itself. If the two could be combined into a single choice operation * it'd probably be better, but that seems far too unwieldy to be practical, * especially considering that the combination of GROUP BY and DISTINCT * isn't very common in real queries. By separating them, we are giving @@ -2937,7 +2938,7 @@ choose_hashed_distinct(PlannerInfo *root, 0.0, work_mem, limit_tuples); /* - * Now for the GROUP case. See comments in grouping_planner about the + * Now for the GROUP case. See comments in grouping_planner about the * sorting choices here --- this code should match that code. */ sorted_p.startup_cost = sorted_startup_cost; @@ -3127,7 +3128,7 @@ make_subplanTargetList(PlannerInfo *root, * add them to the result tlist if not already present. (A Var used * directly as a GROUP BY item will be present already.) Note this * includes Vars used in resjunk items, so we are covering the needs of - * ORDER BY and window specifications. Vars used within Aggrefs will be + * ORDER BY and window specifications. Vars used within Aggrefs will be * pulled out here, too. */ non_group_vars = pull_var_clause((Node *) non_group_cols, @@ -3178,7 +3179,7 @@ get_grouping_column_index(Query *parse, TargetEntry *tle) * Locate grouping columns in the tlist chosen by create_plan. * * This is only needed if we don't use the sub_tlist chosen by - * make_subplanTargetList. We have to forget the column indexes found + * make_subplanTargetList. We have to forget the column indexes found * by that routine and re-locate the grouping exprs in the real sub_tlist. * We assume the grouping exprs are just Vars (see make_subplanTargetList). */ @@ -3209,11 +3210,11 @@ locate_grouping_columns(PlannerInfo *root, /* * The grouping column returned by create_plan might not have the same - * typmod as the original Var. (This can happen in cases where a + * typmod as the original Var. (This can happen in cases where a * set-returning function has been inlined, so that we now have more * knowledge about what it returns than we did when the original Var * was created.) So we can't use tlist_member() to search the tlist; - * instead use tlist_member_match_var. For safety, still check that + * instead use tlist_member_match_var. For safety, still check that * the vartype matches. */ if (!(groupexpr && IsA(groupexpr, Var))) @@ -3339,7 +3340,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists) * * When grouping_planner inserts one or more WindowAgg nodes into the plan, * this function computes the initial target list to be computed by the node - * just below the first WindowAgg. This list must contain all values needed + * just below the first WindowAgg. This list must contain all values needed * to evaluate the window functions, compute the final target list, and * perform any required final sort step. If multiple WindowAggs are needed, * each intermediate one adds its window function results onto this tlist; @@ -3347,7 +3348,7 @@ select_active_windows(PlannerInfo *root, WindowFuncLists *wflists) * * This function is much like make_subplanTargetList, though not quite enough * like it to share code. As in that function, we flatten most expressions - * into their component variables. But we do not want to flatten window + * into their component variables. But we do not want to flatten window * PARTITION BY/ORDER BY clauses, since that might result in multiple * evaluations of them, which would be bad (possibly even resulting in * inconsistent answers, if they contain volatile functions). Also, we must @@ -3520,7 +3521,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, * This depends on the behavior of make_pathkeys_for_window()! * * We are given the target WindowClause and an array of the input column - * numbers associated with the resulting pathkeys. In the easy case, there + * numbers associated with the resulting pathkeys. In the easy case, there * are the same number of pathkey columns as partitioning + ordering columns * and we just have to copy some data around. However, it's possible that * some of the original partitioning + ordering columns were eliminated as @@ -3532,7 +3533,7 @@ make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc, * determine which keys are significant. * * The method used here is a bit brute-force: add the sort columns to a list - * one at a time and note when the resulting pathkey list gets longer. But + * one at a time and note when the resulting pathkey list gets longer. But * it's a sufficiently uncommon case that a faster way doesn't seem worth * the amount of code refactoring that'd be needed. *---------- diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 46affe7dad0..768c5c76704 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -145,7 +145,7 @@ static bool extract_query_dependencies_walker(Node *node, /* * set_plan_references * - * This is the final processing pass of the planner/optimizer. The plan + * This is the final processing pass of the planner/optimizer. The plan * tree is complete; we just have to adjust some representational details * for the convenience of the executor: * @@ -189,7 +189,7 @@ static bool extract_query_dependencies_walker(Node *node, * and root->glob->invalItems (for everything else). * * Notice that we modify Plan nodes in-place, but use expression_tree_mutator - * to process targetlist and qual expressions. We can assume that the Plan + * to process targetlist and qual expressions. We can assume that the Plan * nodes were just built by the planner and are not multiply referenced, but * it's not so safe to assume that for expression tree nodes. */ @@ -262,7 +262,7 @@ add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing) /* * If there are any dead subqueries, they are not referenced in the Plan * tree, so we must add RTEs contained in them to the flattened rtable - * separately. (If we failed to do this, the executor would not perform + * separately. (If we failed to do this, the executor would not perform * expected permission checks for tables mentioned in such subqueries.) * * Note: this pass over the rangetable can't be combined with the previous @@ -292,7 +292,7 @@ add_rtes_to_flat_rtable(PlannerInfo *root, bool recursing) /* * The subquery might never have been planned at all, if it * was excluded on the basis of self-contradictory constraints - * in our query level. In this case apply + * in our query level. In this case apply * flatten_unplanned_rtes. * * If it was planned but the plan is dummy, we assume that it @@ -591,7 +591,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) /* * These plan types don't actually bother to evaluate their * targetlists, because they just return their unmodified input - * tuples. Even though the targetlist won't be used by the + * tuples. Even though the targetlist won't be used by the * executor, we fix it up for possible use by EXPLAIN (not to * mention ease of debugging --- wrong varnos are very confusing). */ @@ -609,7 +609,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) /* * Like the plan types above, LockRows doesn't evaluate its - * tlist or quals. But we have to fix up the RT indexes in + * tlist or quals. But we have to fix up the RT indexes in * its rowmarks. */ set_dummy_tlist_references(plan, rtoffset); @@ -727,7 +727,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) * Set up the visible plan targetlist as being the same as * the first RETURNING list. This is for the use of * EXPLAIN; the executor won't pay any attention to the - * targetlist. We postpone this step until here so that + * targetlist. We postpone this step until here so that * we don't have to do set_returning_clause_references() * twice on identical targetlists. */ @@ -953,7 +953,7 @@ set_subqueryscan_references(PlannerInfo *root, else { /* - * Keep the SubqueryScan node. We have to do the processing that + * Keep the SubqueryScan node. We have to do the processing that * set_plan_references would otherwise have done on it. Notice we do * not do set_upper_references() here, because a SubqueryScan will * always have been created with correct references to its subplan's @@ -1425,7 +1425,7 @@ set_dummy_tlist_references(Plan *plan, int rtoffset) * * In most cases, subplan tlists will be "flat" tlists with only Vars, * so we try to optimize that case by extracting information about Vars - * in advance. Matching a parent tlist to a child is still an O(N^2) + * in advance. Matching a parent tlist to a child is still an O(N^2) * operation, but at least with a much smaller constant factor than plain * tlist_member() searches. * @@ -1870,7 +1870,7 @@ fix_upper_expr_mutator(Node *node, fix_upper_expr_context *context) * adjust any Vars that refer to other tables to reference junk tlist * entries in the top subplan's targetlist. Vars referencing the result * table should be left alone, however (the executor will evaluate them - * using the actual heap tuple, after firing triggers if any). In the + * using the actual heap tuple, after firing triggers if any). In the * adjusted RETURNING list, result-table Vars will have their original * varno (plus rtoffset), but Vars for other rels will have varno OUTER_VAR. * diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index a3f358377da..be92049ec4d 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -434,7 +434,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType, Node *result; /* - * Copy the source Query node. This is a quick and dirty kluge to resolve + * Copy the source Query node. This is a quick and dirty kluge to resolve * the fact that the parser can generate trees with multiple links to the * same sub-Query node, but the planner wants to scribble on the Query. * Try to clean this up when we do querytree redesign... @@ -459,7 +459,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType, * path/costsize.c. * * XXX If an ANY subplan is uncorrelated, build_subplan may decide to hash - * its output. In that case it would've been better to specify full + * its output. In that case it would've been better to specify full * retrieval. At present, however, we can only check hashability after * we've made the subplan :-(. (Determining whether it'll fit in work_mem * is the really hard part.) Therefore, we don't want to be too @@ -496,7 +496,7 @@ make_subplan(PlannerInfo *root, Query *orig_subquery, SubLinkType subLinkType, /* * If it's a correlated EXISTS with an unimportant targetlist, we might be * able to transform it to the equivalent of an IN and then implement it - * by hashing. We don't have enough information yet to tell which way is + * by hashing. We don't have enough information yet to tell which way is * likely to be better (it depends on the expected number of executions of * the EXISTS qual, and we are much too early in planning the outer query * to be able to guess that). So we generate both plans, if possible, and @@ -724,7 +724,7 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, * Otherwise, we have the option to tack a Material node onto the top * of the subplan, to reduce the cost of reading it repeatedly. This * is pointless for a direct-correlated subplan, since we'd have to - * recompute its results each time anyway. For uncorrelated/undirect + * recompute its results each time anyway. For uncorrelated/undirect * correlated subplans, we add Material unless the subplan's top plan * node would materialize its output anyway. Also, if enable_material * is false, then the user does not want us to materialize anything @@ -750,10 +750,10 @@ build_subplan(PlannerInfo *root, Plan *plan, PlannerInfo *subroot, /* * A parameterless subplan (not initplan) should be prepared to handle - * REWIND efficiently. If it has direct parameters then there's no point + * REWIND efficiently. If it has direct parameters then there's no point * since it'll be reset on each scan anyway; and if it's an initplan then * there's no point since it won't get re-run without parameter changes - * anyway. The input of a hashed subplan doesn't need REWIND either. + * anyway. The input of a hashed subplan doesn't need REWIND either. */ if (splan->parParam == NIL && !isInitPlan && !splan->useHashTable) root->glob->rewindPlanIDs = bms_add_member(root->glob->rewindPlanIDs, @@ -850,7 +850,7 @@ generate_subquery_vars(PlannerInfo *root, List *tlist, Index varno) /* * convert_testexpr: convert the testexpr given by the parser into * actually executable form. This entails replacing PARAM_SUBLINK Params - * with Params or Vars representing the results of the sub-select. The + * with Params or Vars representing the results of the sub-select. The * nodes to be substituted are passed in as the List result from * generate_subquery_params or generate_subquery_vars. */ @@ -952,7 +952,7 @@ testexpr_is_hashable(Node *testexpr) * * The combining operators must be hashable and strict. The need for * hashability is obvious, since we want to use hashing. Without - * strictness, behavior in the presence of nulls is too unpredictable. We + * strictness, behavior in the presence of nulls is too unpredictable. We * actually must assume even more than plain strictness: they can't yield * NULL for non-null inputs, either (see nodeSubplan.c). However, hash * indexes and hash joins assume that too. @@ -1060,7 +1060,7 @@ SS_process_ctes(PlannerInfo *root) } /* - * Copy the source Query node. Probably not necessary, but let's keep + * Copy the source Query node. Probably not necessary, but let's keep * this similar to make_subplan. */ subquery = (Query *) copyObject(cte->ctequery); @@ -1086,7 +1086,7 @@ SS_process_ctes(PlannerInfo *root) elog(ERROR, "unexpected outer reference in CTE query"); /* - * Make a SubPlan node for it. This is just enough unlike + * Make a SubPlan node for it. This is just enough unlike * build_subplan that we can't share code. * * Note plan_id, plan_name, and cost fields are set further down. @@ -1309,7 +1309,7 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink, /* * See if the subquery can be simplified based on the knowledge that it's - * being used in EXISTS(). If we aren't able to get rid of its + * being used in EXISTS(). If we aren't able to get rid of its * targetlist, we have to fail, because the pullup operation leaves us * with noplace to evaluate the targetlist. */ @@ -1358,9 +1358,9 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink, * what pull_up_subqueries has to go through. * * In fact, it's even easier than what convert_ANY_sublink_to_join has to - * do. The machinations of simplify_EXISTS_query ensured that there is + * do. The machinations of simplify_EXISTS_query ensured that there is * nothing interesting in the subquery except an rtable and jointree, and - * even the jointree FromExpr no longer has quals. So we can just append + * even the jointree FromExpr no longer has quals. So we can just append * the rtable to our own and use the FromExpr in our jointree. But first, * adjust all level-zero varnos in the subquery to account for the rtable * merger. @@ -1491,7 +1491,7 @@ simplify_EXISTS_query(Query *query) * * On success, the modified subselect is returned, and we store a suitable * upper-level test expression at *testexpr, plus a list of the subselect's - * output Params at *paramIds. (The test expression is already Param-ified + * output Params at *paramIds. (The test expression is already Param-ified * and hence need not go through convert_testexpr, which is why we have to * deal with the Param IDs specially.) * @@ -1654,7 +1654,7 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect, return NULL; /* - * Also reject sublinks in the stuff we intend to pull up. (It might be + * Also reject sublinks in the stuff we intend to pull up. (It might be * possible to support this, but doesn't seem worth the complication.) */ if (contain_subplans((Node *) leftargs)) @@ -1856,7 +1856,7 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context) * is needed for a bare List.) * * Anywhere within the top-level AND/OR clause structure, we can tell - * make_subplan() that NULL and FALSE are interchangeable. So isTopQual + * make_subplan() that NULL and FALSE are interchangeable. So isTopQual * propagates down in both cases. (Note that this is unlike the meaning * of "top level qual" used in most other places in Postgres.) */ @@ -1962,7 +1962,7 @@ SS_finalize_plan(PlannerInfo *root, Plan *plan, bool attach_initplans) * Now determine the set of params that are validly referenceable in this * query level; to wit, those available from outer query levels plus the * output parameters of any local initPlans. (We do not include output - * parameters of regular subplans. Those should only appear within the + * parameters of regular subplans. Those should only appear within the * testexpr of SubPlan nodes, and are taken care of locally within * finalize_primnode. Likewise, special parameters that are generated by * nodes such as ModifyTable are handled within finalize_plan.) @@ -2138,7 +2138,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, /* * In a SubqueryScan, SS_finalize_plan has already been run on the * subplan by the inner invocation of subquery_planner, so there's - * no need to do it again. Instead, just pull out the subplan's + * no need to do it again. Instead, just pull out the subplan's * extParams list, which represents the params it needs from my * level and higher levels. */ @@ -2500,7 +2500,7 @@ finalize_primnode(Node *node, finalize_primnode_context *context) /* * Remove any param IDs of output parameters of the subplan that were - * referenced in the testexpr. These are not interesting for + * referenced in the testexpr. These are not interesting for * parameter change signaling since we always re-evaluate the subplan. * Note that this wouldn't work too well if there might be uses of the * same param IDs elsewhere in the plan, but that can't happen because @@ -2598,7 +2598,7 @@ SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan, /* Label the subplan for EXPLAIN purposes */ node->plan_name = psprintf("InitPlan %d (returns $%d)", - node->plan_id, prm->paramid); + node->plan_id, prm->paramid); return prm; } diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c index 812e56d4c19..776fe426c3e 100644 --- a/src/backend/optimizer/prep/prepjointree.c +++ b/src/backend/optimizer/prep/prepjointree.c @@ -116,7 +116,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid); * * A clause "foo op ANY (sub-SELECT)" can be processed by pulling the * sub-SELECT up to become a rangetable entry and treating the implied - * comparisons as quals of a semijoin. However, this optimization *only* + * comparisons as quals of a semijoin. However, this optimization *only* * works at the top level of WHERE or a JOIN/ON clause, because we cannot * distinguish whether the ANY ought to return FALSE or NULL in cases * involving NULL inputs. Also, in an outer join's ON clause we can only @@ -133,7 +133,7 @@ static Node *find_jointree_node_for_rel(Node *jtnode, int relid); * transformations if any are found. * * This routine has to run before preprocess_expression(), so the quals - * clauses are not yet reduced to implicit-AND format. That means we need + * clauses are not yet reduced to implicit-AND format. That means we need * to recursively search through explicit AND clauses, which are * probably only binary ANDs. We stop as soon as we hit a non-AND item. */ @@ -287,7 +287,7 @@ pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode, /* * Although we could include the pulled-up subqueries in the returned * relids, there's no need since upper quals couldn't refer to their - * outputs anyway. But we *do* need to include the join's own rtindex + * outputs anyway. But we *do* need to include the join's own rtindex * because we haven't yet collapsed join alias variables, so upper * levels would mistakenly think they couldn't use references to this * join. @@ -609,7 +609,7 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode) * * If this jointree node is within either side of an outer join, then * lowest_outer_join references the lowest such JoinExpr node; otherwise - * it is NULL. We use this to constrain the effects of LATERAL subqueries. + * it is NULL. We use this to constrain the effects of LATERAL subqueries. * * If this jointree node is within the nullable side of an outer join, then * lowest_nulling_outer_join references the lowest such JoinExpr node; @@ -759,7 +759,7 @@ pull_up_subqueries_recurse(PlannerInfo *root, Node *jtnode, * Attempt to pull up a single simple subquery. * * jtnode is a RangeTblRef that has been tentatively identified as a simple - * subquery by pull_up_subqueries. We return the replacement jointree node, + * subquery by pull_up_subqueries. We return the replacement jointree node, * or jtnode itself if we determine that the subquery can't be pulled up after * all. * @@ -792,7 +792,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * Create a PlannerInfo data structure for this subquery. * * NOTE: the next few steps should match the first processing in - * subquery_planner(). Can we refactor to avoid code duplication, or + * subquery_planner(). Can we refactor to avoid code duplication, or * would that just make things uglier? */ subroot = makeNode(PlannerInfo); @@ -842,7 +842,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, /* * Now we must recheck whether the subquery is still simple enough to pull - * up. If not, abandon processing it. + * up. If not, abandon processing it. * * We don't really need to recheck all the conditions involved, but it's * easier just to keep this "if" looking the same as the one in @@ -859,7 +859,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * Give up, return unmodified RangeTblRef. * * Note: The work we just did will be redone when the subquery gets - * planned on its own. Perhaps we could avoid that by storing the + * planned on its own. Perhaps we could avoid that by storing the * modified subquery back into the rangetable, but I'm not gonna risk * it now. */ @@ -900,7 +900,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * non-nullable items and lateral references may have to be turned into * PlaceHolderVars. If we are dealing with an appendrel member then * anything that's not a simple Var has to be turned into a - * PlaceHolderVar. Set up required context data for pullup_replace_vars. + * PlaceHolderVar. Set up required context data for pullup_replace_vars. */ rvcontext.root = root; rvcontext.targetlist = subquery->targetList; @@ -925,7 +925,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * replace any of the jointree structure. (This'd be a lot cleaner if we * could use query_tree_mutator.) We have to use PHVs in the targetList, * returningList, and havingQual, since those are certainly above any - * outer join. replace_vars_in_jointree tracks its location in the + * outer join. replace_vars_in_jointree tracks its location in the * jointree and uses PHVs or not appropriately. */ parse->targetList = (List *) @@ -1084,7 +1084,7 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, * Pull up a single simple UNION ALL subquery. * * jtnode is a RangeTblRef that has been identified as a simple UNION ALL - * subquery by pull_up_subqueries. We pull up the leaf subqueries and + * subquery by pull_up_subqueries. We pull up the leaf subqueries and * build an "append relation" for the union set. The result value is just * jtnode, since we don't actually need to change the query jointree. */ @@ -1098,7 +1098,7 @@ pull_up_simple_union_all(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte) /* * Make a modifiable copy of the subquery's rtable, so we can adjust - * upper-level Vars in it. There are no such Vars in the setOperations + * upper-level Vars in it. There are no such Vars in the setOperations * tree proper, so fixing the rtable should be sufficient. */ rtable = copyObject(subquery->rtable); @@ -1370,7 +1370,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte, /* * Don't pull up a subquery that has any set-returning functions in its - * targetlist. Otherwise we might well wind up inserting set-returning + * targetlist. Otherwise we might well wind up inserting set-returning * functions into places where they mustn't go, such as quals of higher * queries. */ @@ -1379,7 +1379,7 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte, /* * Don't pull up a subquery that has any volatile functions in its - * targetlist. Otherwise we might introduce multiple evaluations of these + * targetlist. Otherwise we might introduce multiple evaluations of these * functions, if they get copied to multiple places in the upper query, * leading to surprising results. (Note: the PlaceHolderVar mechanism * doesn't quite guarantee single evaluation; else we could pull up anyway @@ -1609,7 +1609,7 @@ replace_vars_in_jointree(Node *jtnode, /* * If the RangeTblRef refers to a LATERAL subquery (that isn't the * same subquery we're pulling up), it might contain references to the - * target subquery, which we must replace. We drive this from the + * target subquery, which we must replace. We drive this from the * jointree scan, rather than a scan of the rtable, for a couple of * reasons: we can avoid processing no-longer-referenced RTEs, and we * can use the appropriate setting of need_phvs depending on whether @@ -1770,7 +1770,7 @@ pullup_replace_vars_callback(Var *var, /* * Insert PlaceHolderVar if needed. Notice that we are wrapping one * PlaceHolderVar around the whole RowExpr, rather than putting one - * around each element of the row. This is because we need the + * around each element of the row. This is because we need the * expression to yield NULL, not ROW(NULL,NULL,...) when it is forced * to null by an outer join. */ @@ -1872,7 +1872,7 @@ pullup_replace_vars_callback(Var *var, /* * Cache it if possible (ie, if the attno is in range, which it - * probably always should be). We can cache the value even if we + * probably always should be). We can cache the value even if we * decided we didn't need a PHV, since this result will be * suitable for any request that has need_phvs. */ @@ -1915,7 +1915,7 @@ pullup_replace_vars_subquery(Query *query, * * If a query's setOperations tree consists entirely of simple UNION ALL * operations, flatten it into an append relation, which we can process more - * intelligently than the general setops case. Otherwise, do nothing. + * intelligently than the general setops case. Otherwise, do nothing. * * In most cases, this can succeed only for a top-level query, because for a * subquery in FROM, the parent query's invocation of pull_up_subqueries would @@ -2027,7 +2027,7 @@ flatten_simple_union_all(PlannerInfo *root) * SELECT ... FROM a LEFT JOIN b ON (a.x = b.y) WHERE b.y IS NULL; * If the join clause is strict for b.y, then only null-extended rows could * pass the upper WHERE, and we can conclude that what the query is really - * specifying is an anti-semijoin. We change the join type from JOIN_LEFT + * specifying is an anti-semijoin. We change the join type from JOIN_LEFT * to JOIN_ANTI. The IS NULL clause then becomes redundant, and must be * removed to prevent bogus selectivity calculations, but we leave it to * distribute_qual_to_rels to get rid of such clauses. @@ -2267,7 +2267,7 @@ reduce_outer_joins_pass2(Node *jtnode, /* * See if we can reduce JOIN_LEFT to JOIN_ANTI. This is the case if * the join's own quals are strict for any var that was forced null by - * higher qual levels. NOTE: there are other ways that we could + * higher qual levels. NOTE: there are other ways that we could * detect an anti-join, in particular if we were to check whether Vars * coming from the RHS must be non-null because of table constraints. * That seems complicated and expensive though (in particular, one @@ -2425,7 +2425,7 @@ reduce_outer_joins_pass2(Node *jtnode, * pulled-up relid, and change them to reference the replacement relid(s). * * NOTE: although this has the form of a walker, we cheat and modify the - * nodes in-place. This should be OK since the tree was copied by + * nodes in-place. This should be OK since the tree was copied by * pullup_replace_vars earlier. Avoid scribbling on the original values of * the bitmapsets, though, because expression_tree_mutator doesn't copy those. */ diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c index 812fbaddba9..2a24938d843 100644 --- a/src/backend/optimizer/prep/prepqual.c +++ b/src/backend/optimizer/prep/prepqual.c @@ -54,12 +54,12 @@ static Expr *process_duplicate_ors(List *orlist); * Although this can be invoked on its own, it's mainly intended as a helper * for eval_const_expressions(), and that context drives several design * decisions. In particular, if the input is already AND/OR flat, we must - * preserve that property. We also don't bother to recurse in situations + * preserve that property. We also don't bother to recurse in situations * where we can assume that lower-level executions of eval_const_expressions * would already have simplified sub-clauses of the input. * * The difference between this and a simple make_notclause() is that this - * tries to get rid of the NOT node by logical simplification. It's clearly + * tries to get rid of the NOT node by logical simplification. It's clearly * always a win if the NOT node can be eliminated altogether. However, our * use of DeMorgan's laws could result in having more NOT nodes rather than * fewer. We do that unconditionally anyway, because in WHERE clauses it's @@ -152,7 +152,7 @@ negate_clause(Node *node) * those properties. For example, if no direct child of * the given AND clause is an AND or a NOT-above-OR, then * the recursive calls of negate_clause() can't return any - * OR clauses. So we needn't call pull_ors() before + * OR clauses. So we needn't call pull_ors() before * building a new OR clause. Similarly for the OR case. *-------------------- */ @@ -293,7 +293,7 @@ canonicalize_qual(Expr *qual) /* * Pull up redundant subclauses in OR-of-AND trees. We do this only * within the top-level AND/OR structure; there's no point in looking - * deeper. Also remove any NULL constants in the top-level structure. + * deeper. Also remove any NULL constants in the top-level structure. */ newqual = find_duplicate_ors(qual); @@ -374,7 +374,7 @@ pull_ors(List *orlist) * * This may seem like a fairly useless activity, but it turns out to be * applicable to many machine-generated queries, and there are also queries - * in some of the TPC benchmarks that need it. This was in fact almost the + * in some of the TPC benchmarks that need it. This was in fact almost the * sole useful side-effect of the old prepqual code that tried to force * the query into canonical AND-of-ORs form: the canonical equivalent of * ((A AND B) OR (A AND C)) @@ -400,7 +400,7 @@ pull_ors(List *orlist) * results, so it's valid to treat NULL::boolean the same as FALSE and then * simplify AND/OR accordingly. * - * Returns the modified qualification. AND/OR flatness is preserved. + * Returns the modified qualification. AND/OR flatness is preserved. */ static Expr * find_duplicate_ors(Expr *qual) diff --git a/src/backend/optimizer/prep/prepsecurity.c b/src/backend/optimizer/prep/prepsecurity.c index 7daaa3349ed..dd7f9003a28 100644 --- a/src/backend/optimizer/prep/prepsecurity.c +++ b/src/backend/optimizer/prep/prepsecurity.c @@ -33,7 +33,7 @@ typedef struct Relation rel; /* RTE relation at rt_index */ List *targetlist; /* Targetlist for new subquery RTE */ List *colnames; /* Column names in subquery RTE */ - List *vars_processed; /* List of Vars already processed */ + List *vars_processed; /* List of Vars already processed */ } security_barrier_replace_vars_context; static void expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, @@ -43,7 +43,7 @@ static void security_barrier_replace_vars(Node *node, security_barrier_replace_vars_context *context); static bool security_barrier_replace_vars_walker(Node *node, - security_barrier_replace_vars_context *context); + security_barrier_replace_vars_context *context); /* @@ -97,6 +97,7 @@ expand_security_quals(PlannerInfo *root, List *tlist) if (rt_index == parse->resultRelation) { RangeTblEntry *newrte = copyObject(rte); + parse->rtable = lappend(parse->rtable, newrte); parse->resultRelation = list_length(parse->rtable); @@ -117,11 +118,11 @@ expand_security_quals(PlannerInfo *root, List *tlist) rte->modifiedCols = NULL; /* - * For the most part, Vars referencing the original relation should - * remain as they are, meaning that they pull OLD values from the - * expanded RTE. But in the RETURNING list and in any WITH CHECK - * OPTION quals, we want such Vars to represent NEW values, so - * change them to reference the new RTE. + * For the most part, Vars referencing the original relation + * should remain as they are, meaning that they pull OLD values + * from the expanded RTE. But in the RETURNING list and in any + * WITH CHECK OPTION quals, we want such Vars to represent NEW + * values, so change them to reference the new RTE. */ ChangeVarNodes((Node *) parse->returningList, rt_index, parse->resultRelation, 0); @@ -141,7 +142,8 @@ expand_security_quals(PlannerInfo *root, List *tlist) */ while (rte->securityQuals != NIL) { - Node *qual = (Node *) linitial(rte->securityQuals); + Node *qual = (Node *) linitial(rte->securityQuals); + rte->securityQuals = list_delete_first(rte->securityQuals); ChangeVarNodes(qual, rt_index, 1, 0); @@ -160,14 +162,14 @@ static void expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, RangeTblEntry *rte, Node *qual) { - Query *parse = root->parse; - Oid relid = rte->relid; - Query *subquery; - RangeTblEntry *subrte; - RangeTblRef *subrtr; - PlanRowMark *rc; + Query *parse = root->parse; + Oid relid = rte->relid; + Query *subquery; + RangeTblEntry *subrte; + RangeTblRef *subrtr; + PlanRowMark *rc; security_barrier_replace_vars_context context; - ListCell *cell; + ListCell *cell; /* * There should only be 2 possible cases: @@ -182,6 +184,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, switch (rte->rtekind) { case RTE_RELATION: + /* * Turn the relation RTE into a security barrier subquery RTE, * moving all permissions checks down into the subquery. @@ -204,7 +207,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, rte->relid = InvalidOid; rte->subquery = subquery; rte->security_barrier = true; - rte->inh = false; /* must not be set for a subquery */ + rte->inh = false; /* must not be set for a subquery */ /* the permissions checks have now been moved down */ rte->requiredPerms = 0; @@ -219,9 +222,9 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, * Note that we can't push the user-defined quals down since they * may included untrusted functions and that means that we will * end up locking all rows which pass the securityQuals, even if - * those rows don't pass the user-defined quals. This is currently - * documented behavior, but it'd be nice to come up with a better - * solution some day. + * those rows don't pass the user-defined quals. This is + * currently documented behavior, but it'd be nice to come up with + * a better solution some day. */ rc = get_plan_rowmark(root->rowMarks, rt_index); if (rc != NULL) @@ -277,6 +280,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, break; case RTE_SUBQUERY: + /* * Build a new subquery that includes all the same columns as the * original subquery. @@ -288,8 +292,8 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, foreach(cell, rte->subquery->targetList) { - TargetEntry *tle; - Var *var; + TargetEntry *tle; + Var *var; tle = (TargetEntry *) lfirst(cell); var = makeVarFromTargetEntry(1, tle); @@ -333,7 +337,7 @@ expand_security_qual(PlannerInfo *root, List *tlist, int rt_index, * variable that needs to be exposed by the security barrier subquery RTE. * * NOTE: although this has the form of a walker, we cheat and modify the - * nodes in-place. The given expression tree should have been copied + * nodes in-place. The given expression tree should have been copied * earlier to ensure that no unwanted side-effects occur! */ static void @@ -355,7 +359,7 @@ security_barrier_replace_vars(Node *node, static bool security_barrier_replace_vars_walker(Node *node, - security_barrier_replace_vars_context *context) + security_barrier_replace_vars_context *context) { if (node == NULL) return false; @@ -405,7 +409,7 @@ security_barrier_replace_vars_walker(Node *node, Form_pg_attribute att_tup; att_tup = SystemAttributeDefinition(var->varattno, - context->rel->rd_rel->relhasoids); + context->rel->rd_rel->relhasoids); attname = NameStr(att_tup->attname); } else if (var->varattno == InvalidAttrNumber) diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c index ee773b834e9..4ab12e51df7 100644 --- a/src/backend/optimizer/prep/preptlist.c +++ b/src/backend/optimizer/prep/preptlist.c @@ -4,7 +4,7 @@ * Routines to preprocess the parse tree target list * * For INSERT and UPDATE queries, the targetlist must contain an entry for - * each attribute of the target relation in the correct order. For all query + * each attribute of the target relation in the correct order. For all query * types, we may need to add junk tlist entries for Vars used in the RETURNING * list and row ID information needed for SELECT FOR UPDATE locking and/or * EvalPlanQual checking. @@ -79,7 +79,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist) /* * Add necessary junk columns for rowmarked rels. These values are needed * for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual - * rechecking. See comments for PlanRowMark in plannodes.h. + * rechecking. See comments for PlanRowMark in plannodes.h. */ foreach(lc, root->rowMarks) { @@ -144,7 +144,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist) /* * If the query has a RETURNING list, add resjunk entries for any Vars * used in RETURNING that belong to other relations. We need to do this - * to make these Vars available for the RETURNING calculation. Vars that + * to make these Vars available for the RETURNING calculation. Vars that * belong to the result rel don't need to be added, because they will be * made to refer to the actual heap tuple. */ @@ -252,9 +252,9 @@ expand_targetlist(List *tlist, int command_type, * When generating a NULL constant for a dropped column, we label * it INT4 (any other guaranteed-to-exist datatype would do as * well). We can't label it with the dropped column's datatype - * since that might not exist anymore. It does not really matter + * since that might not exist anymore. It does not really matter * what we claim the type is, since NULL is NULL --- its - * representation is datatype-independent. This could perhaps + * representation is datatype-independent. This could perhaps * confuse code comparing the finished plan to the target * relation, however. */ @@ -336,7 +336,7 @@ expand_targetlist(List *tlist, int command_type, /* * The remaining tlist entries should be resjunk; append them all to the * end of the new tlist, making sure they have resnos higher than the last - * real attribute. (Note: although the rewriter already did such + * real attribute. (Note: although the rewriter already did such * renumbering, we have to do it again here in case we are doing an UPDATE * in a table with dropped columns, or an inheritance child table with * extra columns.) diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c index cdf541d34d5..0410fddc546 100644 --- a/src/backend/optimizer/prep/prepunion.c +++ b/src/backend/optimizer/prep/prepunion.c @@ -6,14 +6,14 @@ * * There are two code paths in the planner for set-operation queries. * If a subquery consists entirely of simple UNION ALL operations, it - * is converted into an "append relation". Otherwise, it is handled + * is converted into an "append relation". Otherwise, it is handled * by the general code in this module (plan_set_operations and its * subroutines). There is some support code here for the append-relation * case, but most of the heavy lifting for that is done elsewhere, * notably in prepjointree.c and allpaths.c. * * There is also some code here to support planning of queries that use - * inheritance (SELECT FROM foo*). Inheritance trees are converted into + * inheritance (SELECT FROM foo*). Inheritance trees are converted into * append relations, and thenceforth share code with the UNION ALL case. * * @@ -577,7 +577,7 @@ generate_nonunion_plan(SetOperationStmt *op, PlannerInfo *root, * * The tlist for an Append plan isn't important as far as the Append is * concerned, but we must make it look real anyway for the benefit of the - * next plan level up. In fact, it has to be real enough that the flag + * next plan level up. In fact, it has to be real enough that the flag * column is shown as a variable not a constant, else setrefs.c will get * confused. */ @@ -970,7 +970,7 @@ generate_setop_tlist(List *colTypes, List *colCollations, * Ensure the tlist entry's exposed collation matches the set-op. This * is necessary because plan_set_operations() reports the result * ordering as a list of SortGroupClauses, which don't carry collation - * themselves but just refer to tlist entries. If we don't show the + * themselves but just refer to tlist entries. If we don't show the * right collation then planner.c might do the wrong thing in * higher-level queries. * @@ -1184,7 +1184,7 @@ generate_setop_grouplist(SetOperationStmt *op, List *targetlist) /* * expand_inherited_tables * Expand each rangetable entry that represents an inheritance set - * into an "append relation". At the conclusion of this process, + * into an "append relation". At the conclusion of this process, * the "inh" flag is set in all and only those RTEs that are append * relation parents. */ @@ -1216,7 +1216,7 @@ expand_inherited_tables(PlannerInfo *root) * Check whether a rangetable entry represents an inheritance set. * If so, add entries for all the child tables to the query's * rangetable, and build AppendRelInfo nodes for all the child tables - * and add them to root->append_rel_list. If not, clear the entry's + * and add them to root->append_rel_list. If not, clear the entry's * "inh" flag to prevent later code from looking for AppendRelInfos. * * Note that the original RTE is considered to represent the whole @@ -1527,7 +1527,7 @@ make_inh_translation_list(Relation oldrelation, Relation newrelation, * parent rel's attribute numbering to the child's. * * The only surprise here is that we don't translate a parent whole-row - * reference into a child whole-row reference. That would mean requiring + * reference into a child whole-row reference. That would mean requiring * permissions on all child columns, which is overly strict, since the * query is really only going to reference the inherited columns. Instead * we set the per-column bits for all inherited columns. @@ -1708,6 +1708,7 @@ adjust_appendrel_attrs_mutator(Node *node, foreach(lc, fields) { Var *field = (Var *) lfirst(lc); + field->varlevelsup += context->sublevels_up; } rowexpr = makeNode(RowExpr); @@ -1887,7 +1888,7 @@ adjust_relid_set(Relids relids, Index oldrelid, Index newrelid) * * The expressions have already been fixed, but we have to make sure that * the target resnos match the child table (they may not, in the case of - * a column that was added after-the-fact by ALTER TABLE). In some cases + * a column that was added after-the-fact by ALTER TABLE). In some cases * this can force us to re-order the tlist to preserve resno ordering. * (We do all this work in special cases so that preptlist.c is fast for * the typical case.) diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 3f307e6464c..97dacaaac19 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -540,7 +540,7 @@ count_agg_clauses_walker(Node *node, count_agg_clauses_context *context) /* * If the transition type is pass-by-value then it doesn't add - * anything to the required size of the hashtable. If it is + * anything to the required size of the hashtable. If it is * pass-by-reference then we have to add the estimated size of the * value itself, plus palloc overhead. */ @@ -835,7 +835,7 @@ contain_subplans_walker(Node *node, void *context) * Recursively search for mutable functions within a clause. * * Returns true if any mutable function (or operator implemented by a - * mutable function) is found. This test is needed so that we don't + * mutable function) is found. This test is needed so that we don't * mistakenly think that something like "WHERE random() < 0.5" can be treated * as a constant qualification. * @@ -962,7 +962,7 @@ contain_mutable_functions_walker(Node *node, void *context) * invalid conversions of volatile expressions into indexscan quals. * * We will recursively look into Query nodes (i.e., SubLink sub-selects) - * but not into SubPlans. This is a bit odd, but intentional. If we are + * but not into SubPlans. This is a bit odd, but intentional. If we are * looking at a SubLink, we are probably deciding whether a query tree * transformation is safe, and a contained sub-select should affect that; * for example, duplicating a sub-select containing a volatile function @@ -1207,7 +1207,7 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context) * The idea here is that the caller has verified that the expression contains * one or more Var or Param nodes (as appropriate for the caller's need), and * now wishes to prove that the expression result will be NULL if any of these - * inputs is NULL. If we return false, then the proof succeeded. + * inputs is NULL. If we return false, then the proof succeeded. */ bool contain_nonstrict_functions(Node *clause) @@ -1326,7 +1326,7 @@ contain_nonstrict_functions_walker(Node *node, void *context) * Recursively search for leaky functions within a clause. * * Returns true if any function call with side-effect may be present in the - * clause. Qualifiers from outside the a security_barrier view should not + * clause. Qualifiers from outside the a security_barrier view should not * be pushed down into the view, lest the contents of tuples intended to be * filtered out be revealed via side effects. */ @@ -1465,7 +1465,7 @@ contain_leaky_functions_walker(Node *node, void *context) * * Returns the set of all Relids that are referenced in the clause in such * a way that the clause cannot possibly return TRUE if any of these Relids - * is an all-NULL row. (It is OK to err on the side of conservatism; hence + * is an all-NULL row. (It is OK to err on the side of conservatism; hence * the analysis here is simplistic.) * * The semantics here are subtly different from contain_nonstrict_functions: @@ -1571,7 +1571,7 @@ find_nonnullable_rels_walker(Node *node, bool top_level) * could be FALSE (hence not NULL). However, if *all* the * arms produce NULL then the result is NULL, so we can take * the intersection of the sets of nonnullable rels, just as - * for OR. Fall through to share code. + * for OR. Fall through to share code. */ /* FALL THRU */ case OR_EXPR: @@ -1779,7 +1779,7 @@ find_nonnullable_vars_walker(Node *node, bool top_level) * could be FALSE (hence not NULL). However, if *all* the * arms produce NULL then the result is NULL, so we can take * the intersection of the sets of nonnullable vars, just as - * for OR. Fall through to share code. + * for OR. Fall through to share code. */ /* FALL THRU */ case OR_EXPR: @@ -2049,7 +2049,7 @@ is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK) * variables of the current query level and no uses of volatile functions. * Such an expr is not necessarily a true constant: it can still contain * Params and outer-level Vars, not to mention functions whose results - * may vary from one statement to the next. However, the expr's value + * may vary from one statement to the next. However, the expr's value * will be constant over any one scan of the current query, so it can be * used as, eg, an indexscan key. * @@ -2255,7 +2255,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum, * expression tree, for example "2 + 2" => "4". More interestingly, * we can reduce certain boolean expressions even when they contain * non-constant subexpressions: "x OR true" => "true" no matter what - * the subexpression x is. (XXX We assume that no such subexpression + * the subexpression x is. (XXX We assume that no such subexpression * will have important side-effects, which is not necessarily a good * assumption in the presence of user-defined functions; do we need a * pg_proc flag that prevents discarding the execution of a function?) @@ -2268,7 +2268,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum, * * Whenever a function is eliminated from the expression by means of * constant-expression evaluation or inlining, we add the function to - * root->glob->invalItems. This ensures the plan is known to depend on + * root->glob->invalItems. This ensures the plan is known to depend on * such functions, even though they aren't referenced anymore. * * We assume that the tree has already been type-checked and contains @@ -2451,7 +2451,7 @@ eval_const_expressions_mutator(Node *node, /* * Code for op/func reduction is pretty bulky, so split it out - * as a separate function. Note: exprTypmod normally returns + * as a separate function. Note: exprTypmod normally returns * -1 for a FuncExpr, but not when the node is recognizably a * length coercion; we want to preserve the typmod in the * eventual Const if so. @@ -2495,7 +2495,7 @@ eval_const_expressions_mutator(Node *node, OpExpr *newexpr; /* - * Need to get OID of underlying function. Okay to scribble + * Need to get OID of underlying function. Okay to scribble * on input to this extent. */ set_opfuncid(expr); @@ -2598,7 +2598,7 @@ eval_const_expressions_mutator(Node *node, /* (NOT okay to try to inline it, though!) */ /* - * Need to get OID of underlying function. Okay to + * Need to get OID of underlying function. Okay to * scribble on input to this extent. */ set_opfuncid((OpExpr *) expr); /* rely on struct @@ -2963,13 +2963,13 @@ eval_const_expressions_mutator(Node *node, * TRUE: drop all remaining alternatives * If the first non-FALSE alternative is a constant TRUE, * we can simplify the entire CASE to that alternative's - * expression. If there are no non-FALSE alternatives, + * expression. If there are no non-FALSE alternatives, * we simplify the entire CASE to the default result (ELSE). * * If we have a simple-form CASE with constant test * expression, we substitute the constant value for contained * CaseTestExpr placeholder nodes, so that we have the - * opportunity to reduce constant test conditions. For + * opportunity to reduce constant test conditions. For * example this allows * CASE 0 WHEN 0 THEN 1 ELSE 1/0 END * to reduce to 1 rather than drawing a divide-by-0 error. @@ -3191,7 +3191,7 @@ eval_const_expressions_mutator(Node *node, { /* * We can optimize field selection from a whole-row Var into a - * simple Var. (This case won't be generated directly by the + * simple Var. (This case won't be generated directly by the * parser, because ParseComplexProjection short-circuits it. * But it can arise while simplifying functions.) Also, we * can optimize field selection from a RowExpr construct. @@ -3449,7 +3449,7 @@ simplify_or_arguments(List *args, /* * Since the parser considers OR to be a binary operator, long OR lists * become deeply nested expressions. We must flatten these into long - * argument lists of a single OR operator. To avoid blowing out the stack + * argument lists of a single OR operator. To avoid blowing out the stack * with recursion of eval_const_expressions, we resort to some tenseness * here: we keep a list of not-yet-processed inputs, and handle flattening * of nested ORs by prepending to the to-do list instead of recursing. @@ -3497,7 +3497,7 @@ simplify_or_arguments(List *args, } /* - * OK, we have a const-simplified non-OR argument. Process it per + * OK, we have a const-simplified non-OR argument. Process it per * comments above. */ if (IsA(arg, Const)) @@ -3732,7 +3732,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod, * deliver a constant result, use a transform function to generate a * substitute node tree, or expand in-line the body of the function * definition (which only works for simple SQL-language functions, but - * that is a common case). Each case needs access to the function's + * that is a common case). Each case needs access to the function's * pg_proc tuple, so fetch it just once. * * Note: the allow_non_const flag suppresses both the second and third @@ -3770,7 +3770,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod, if (!newexpr && allow_non_const && OidIsValid(func_form->protransform)) { /* - * Build a dummy FuncExpr node containing the simplified arg list. We + * Build a dummy FuncExpr node containing the simplified arg list. We * use this approach to present a uniform interface to the transform * function regardless of how the function is actually being invoked. */ @@ -3978,7 +3978,7 @@ fetch_function_defaults(HeapTuple func_tuple) * * It is possible for some of the defaulted arguments to be polymorphic; * therefore we can't assume that the default expressions have the correct - * data types already. We have to re-resolve polymorphics and do coercion + * data types already. We have to re-resolve polymorphics and do coercion * just like the parser did. * * This should be a no-op if there are no polymorphic arguments, @@ -4141,7 +4141,7 @@ evaluate_function(Oid funcid, Oid result_type, int32 result_typmod, * do not re-expand them. Also, if a parameter is used more than once * in the SQL-function body, we require it not to contain any volatile * functions (volatiles might deliver inconsistent answers) nor to be - * unreasonably expensive to evaluate. The expensiveness check not only + * unreasonably expensive to evaluate. The expensiveness check not only * prevents us from doing multiple evaluations of an expensive parameter * at runtime, but is a safety value to limit growth of an expression due * to repeated inlining. @@ -4184,7 +4184,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* * Forget it if the function is not SQL-language or has other showstopper - * properties. (The nargs check is just paranoia.) + * properties. (The nargs check is just paranoia.) */ if (funcform->prolang != SQLlanguageId || funcform->prosecdef || @@ -4262,7 +4262,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* * We just do parsing and parse analysis, not rewriting, because rewriting * will not affect table-free-SELECT-only queries, which is all that we - * care about. Also, we can punt as soon as we detect more than one + * care about. Also, we can punt as soon as we detect more than one * command in the function body. */ raw_parsetree_list = pg_parse_query(src); @@ -4304,7 +4304,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* * Make sure the function (still) returns what it's declared to. This * will raise an error if wrong, but that's okay since the function would - * fail at runtime anyway. Note that check_sql_fn_retval will also insert + * fail at runtime anyway. Note that check_sql_fn_retval will also insert * a RelabelType if needed to make the tlist expression match the declared * type of the function. * @@ -4349,7 +4349,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid, /* * We may be able to do it; there are still checks on parameter usage to * make, but those are most easily done in combination with the actual - * substitution of the inputs. So start building expression with inputs + * substitution of the inputs. So start building expression with inputs * substituted. */ usecounts = (int *) palloc0(funcform->pronargs * sizeof(int)); @@ -4549,7 +4549,7 @@ evaluate_expr(Expr *expr, Oid result_type, int32 result_typmod, fix_opfuncids((Node *) expr); /* - * Prepare expr for execution. (Note: we can't use ExecPrepareExpr + * Prepare expr for execution. (Note: we can't use ExecPrepareExpr * because it'd result in recursively invoking eval_const_expressions.) */ exprstate = ExecInitExpr(expr, NULL); @@ -4671,7 +4671,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) * Refuse to inline if the arguments contain any volatile functions or * sub-selects. Volatile functions are rejected because inlining may * result in the arguments being evaluated multiple times, risking a - * change in behavior. Sub-selects are rejected partly for implementation + * change in behavior. Sub-selects are rejected partly for implementation * reasons (pushing them down another level might change their behavior) * and partly because they're likely to be expensive and so multiple * evaluation would be bad. @@ -4698,7 +4698,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) /* * Forget it if the function is not SQL-language or has other showstopper - * properties. In particular it mustn't be declared STRICT, since we + * properties. In particular it mustn't be declared STRICT, since we * couldn't enforce that. It also mustn't be VOLATILE, because that is * supposed to cause it to be executed with its own snapshot, rather than * sharing the snapshot of the calling query. (Rechecking proretset is @@ -4728,9 +4728,9 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) /* * When we call eval_const_expressions below, it might try to add items to - * root->glob->invalItems. Since it is running in the temp context, those + * root->glob->invalItems. Since it is running in the temp context, those * items will be in that context, and will need to be copied out if we're - * successful. Temporarily reset the list so that we can keep those items + * successful. Temporarily reset the list so that we can keep those items * separate from the pre-existing list contents. */ saveInvalItems = root->glob->invalItems; @@ -4760,7 +4760,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) /* * Run eval_const_expressions on the function call. This is necessary to * ensure that named-argument notation is converted to positional notation - * and any default arguments are inserted. It's a bit of overkill for the + * and any default arguments are inserted. It's a bit of overkill for the * arguments, since they'll get processed again later, but no harm will be * done. */ @@ -4812,7 +4812,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte) /* * Make sure the function (still) returns what it's declared to. This * will raise an error if wrong, but that's okay since the function would - * fail at runtime anyway. Note that check_sql_fn_retval will also insert + * fail at runtime anyway. Note that check_sql_fn_retval will also insert * RelabelType(s) and/or NULL columns if needed to make the tlist * expression(s) match the declared type of the function. * diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c index a6421580f9b..0418946d714 100644 --- a/src/backend/optimizer/util/joininfo.c +++ b/src/backend/optimizer/util/joininfo.c @@ -83,7 +83,7 @@ have_relevant_joinclause(PlannerInfo *root, * Add 'restrictinfo' to the joininfo list of each relation it requires. * * Note that the same copy of the restrictinfo node is linked to by all the - * lists it is in. This allows us to exploit caching of information about + * lists it is in. This allows us to exploit caching of information about * the restriction clause (but we must be careful that the information does * not depend on context). * diff --git a/src/backend/optimizer/util/orclauses.c b/src/backend/optimizer/util/orclauses.c index e9fd47bffbe..9e954d0d35f 100644 --- a/src/backend/optimizer/util/orclauses.c +++ b/src/backend/optimizer/util/orclauses.c @@ -50,7 +50,7 @@ static void consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel, * * The added quals are partially redundant with the original OR, and therefore * would cause the size of the joinrel to be underestimated when it is finally - * formed. (This would be true of a full transformation to CNF as well; the + * formed. (This would be true of a full transformation to CNF as well; the * fault is not really in the transformation, but in clauselist_selectivity's * inability to recognize redundant conditions.) We can compensate for this * redundancy by changing the cached selectivity of the original OR clause, @@ -60,10 +60,10 @@ static void consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel, * and on the fact that the same RestrictInfo node will appear in every * joininfo list that might be used when the joinrel is formed. * And it doesn't work in cases where the size estimation is nonlinear - * (i.e., outer and IN joins). But it beats not doing anything. + * (i.e., outer and IN joins). But it beats not doing anything. * * We examine each base relation to see if join clauses associated with it - * contain extractable restriction conditions. If so, add those conditions + * contain extractable restriction conditions. If so, add those conditions * to the rel's baserestrictinfo and update the cached selectivities of the * join clauses. Note that the same join clause will be examined afresh * from the point of view of each baserel that participates in it, so its @@ -129,7 +129,7 @@ static bool is_safe_restriction_clause_for(RestrictInfo *rinfo, RelOptInfo *rel) { /* - * We want clauses that mention the rel, and only the rel. So in + * We want clauses that mention the rel, and only the rel. So in * particular pseudoconstant clauses can be rejected quickly. Then check * the clause's Var membership. */ @@ -168,7 +168,7 @@ extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel) * in those nodes to make is_safe_restriction_clause_for()'s checks * cheaper. We'll strip those nodes from the returned tree, though, * meaning that fresh ones will be built if the clause is accepted as a - * restriction clause. This might seem wasteful --- couldn't we re-use + * restriction clause. This might seem wasteful --- couldn't we re-use * the existing RestrictInfos? But that'd require assuming that * selectivity and other cached data is computed exactly the same way for * a restriction clause as for a join clause, which seems undesirable. @@ -193,7 +193,7 @@ extract_or_clause(RestrictInfo *or_rinfo, RelOptInfo *rel) if (restriction_is_or_clause(rinfo)) { /* - * Recurse to deal with nested OR. Note we *must* recurse + * Recurse to deal with nested OR. Note we *must* recurse * here, this isn't just overly-tense optimization: we * have to descend far enough to find and strip all * RestrictInfos in the expression. @@ -314,7 +314,7 @@ consider_new_or_clause(PlannerInfo *root, RelOptInfo *rel, SpecialJoinInfo sjinfo; /* - * Make up a SpecialJoinInfo for JOIN_INNER semantics. (Compare + * Make up a SpecialJoinInfo for JOIN_INNER semantics. (Compare * approx_tuple_count() in costsize.c.) */ sjinfo.type = T_SpecialJoinInfo; diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index b79af7af4e0..4e05dcd2463 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -127,11 +127,11 @@ compare_fractional_path_costs(Path *path1, Path *path2, * * The fuzz_factor argument must be 1.0 plus delta, where delta is the * fraction of the smaller cost that is considered to be a significant - * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit + * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit * be 1% of the smaller cost. * * The two paths are said to have "equal" costs if both startup and total - * costs are fuzzily the same. Path1 is said to be better than path2 if + * costs are fuzzily the same. Path1 is said to be better than path2 if * it has fuzzily better startup cost and fuzzily no worse total cost, * or if it has fuzzily better total cost and fuzzily no worse startup cost. * Path2 is better than path1 if the reverse holds. Finally, if one path @@ -207,12 +207,12 @@ compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor, * * cheapest_total_path is normally the cheapest-total-cost unparameterized * path; but if there are no unparameterized paths, we assign it to be the - * best (cheapest least-parameterized) parameterized path. However, only + * best (cheapest least-parameterized) parameterized path. However, only * unparameterized paths are considered candidates for cheapest_startup_path, * so that will be NULL if there are no unparameterized paths. * * The cheapest_parameterized_paths list collects all parameterized paths - * that have survived the add_path() tournament for this relation. (Since + * that have survived the add_path() tournament for this relation. (Since * add_path ignores pathkeys and startup cost for a parameterized path, * these will be paths that have best total cost or best row count for their * parameterization.) cheapest_parameterized_paths always includes the @@ -431,7 +431,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path) p1_next = lnext(p1); /* - * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this + * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this * percentage need to be user-configurable?) */ costcmp = compare_path_costs_fuzzily(new_path, old_path, 1.01, @@ -607,7 +607,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path) * and have lower bounds for its costs. * * Note that we do not know the path's rowcount, since getting an estimate for - * that is too expensive to do before prechecking. We assume here that paths + * that is too expensive to do before prechecking. We assume here that paths * of a superset parameterization will generate fewer rows; if that holds, * then paths with different parameterizations cannot dominate each other * and so we can simply ignore existing paths of another parameterization. @@ -907,7 +907,7 @@ create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer) * Compute rows and costs as sums of subplan rows and costs. We charge * nothing extra for the Append itself, which perhaps is too optimistic, * but since it doesn't do any selection or projection, it is a pretty - * cheap node. If you change this, see also make_append(). + * cheap node. If you change this, see also make_append(). */ pathnode->path.rows = 0; pathnode->path.startup_cost = 0; @@ -1456,7 +1456,7 @@ translate_sub_tlist(List *tlist, int relid) * * colnos is an integer list of output column numbers (resno's). We are * interested in whether rows consisting of just these columns are certain - * to be distinct. "Distinctness" is defined according to whether the + * to be distinct. "Distinctness" is defined according to whether the * corresponding upper-level equality operators listed in opids would think * the values are distinct. (Note: the opids entries could be cross-type * operators, and thus not exactly the equality operators that the subquery @@ -1577,7 +1577,7 @@ query_is_distinct_for(Query *query, List *colnos, List *opids) * distinct_col_search - subroutine for query_is_distinct_for * * If colno is in colnos, return the corresponding element of opids, - * else return InvalidOid. (We expect colnos does not contain duplicates, + * else return InvalidOid. (We expect colnos does not contain duplicates, * so the result is well-defined.) */ static Oid @@ -1977,10 +1977,10 @@ create_hashjoin_path(PlannerInfo *root, /* * A hashjoin never has pathkeys, since its output ordering is - * unpredictable due to possible batching. XXX If the inner relation is + * unpredictable due to possible batching. XXX If the inner relation is * small enough, we could instruct the executor that it must not batch, * and then we could assume that the output inherits the outer relation's - * ordering, which might save a sort step. However there is considerable + * ordering, which might save a sort step. However there is considerable * downside if our estimate of the inner relation size is badly off. For * the moment we don't risk it. (Note also that if we wanted to take this * seriously, joinpath.c would have to consider many more paths for the @@ -2007,7 +2007,7 @@ create_hashjoin_path(PlannerInfo *root, * same parameterization level, ensuring that they all enforce the same set * of join quals (and thus that that parameterization can be attributed to * an append path built from such paths). Currently, only a few path types - * are supported here, though more could be added at need. We return NULL + * are supported here, though more could be added at need. We return NULL * if we can't reparameterize the given path. * * Note: we intentionally do not pass created paths to add_path(); it would @@ -2039,7 +2039,7 @@ reparameterize_path(PlannerInfo *root, Path *path, /* * We can't use create_index_path directly, and would not want * to because it would re-compute the indexqual conditions - * which is wasted effort. Instead we hack things a bit: + * which is wasted effort. Instead we hack things a bit: * flat-copy the path node, revise its param_info, and redo * the cost estimate. */ diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c index 1172d24b9a1..8d7c4feca46 100644 --- a/src/backend/optimizer/util/placeholder.c +++ b/src/backend/optimizer/util/placeholder.c @@ -60,7 +60,7 @@ make_placeholder_expr(PlannerInfo *root, Expr *expr, Relids phrels) * We build PlaceHolderInfos only for PHVs that are still present in the * simplified query passed to query_planner(). * - * Note: this should only be called after query_planner() has started. Also, + * Note: this should only be called after query_planner() has started. Also, * create_new_ph must not be TRUE after deconstruct_jointree begins, because * make_outerjoininfo assumes that we already know about all placeholders. */ @@ -94,7 +94,7 @@ find_placeholder_info(PlannerInfo *root, PlaceHolderVar *phv, /* * Any referenced rels that are outside the PHV's syntactic scope are * LATERAL references, which should be included in ph_lateral but not in - * ph_eval_at. If no referenced rels are within the syntactic scope, + * ph_eval_at. If no referenced rels are within the syntactic scope, * force evaluation at the syntactic location. */ rels_used = pull_varnos((Node *) phv->phexpr); diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index 73ba2f60b2d..b2becfa6765 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -427,12 +427,12 @@ estimate_rel_size(Relation rel, int32 *attr_widths, * minimum size estimate of 10 pages. The idea here is to avoid * assuming a newly-created table is really small, even if it * currently is, because that may not be true once some data gets - * loaded into it. Once a vacuum or analyze cycle has been done + * loaded into it. Once a vacuum or analyze cycle has been done * on it, it's more reasonable to believe the size is somewhat * stable. * * (Note that this is only an issue if the plan gets cached and - * used again after the table has been filled. What we're trying + * used again after the table has been filled. What we're trying * to avoid is using a nestloop-type plan on a table that has * grown substantially since the plan was made. Normally, * autovacuum/autoanalyze will occur once enough inserts have @@ -441,7 +441,7 @@ estimate_rel_size(Relation rel, int32 *attr_widths, * such as temporary tables.) * * We approximate "never vacuumed" by "has relpages = 0", which - * means this will also fire on genuinely empty relations. Not + * means this will also fire on genuinely empty relations. Not * great, but fortunately that's a seldom-seen case in the real * world, and it shouldn't degrade the quality of the plan too * much anyway to err in this direction. @@ -786,7 +786,7 @@ relation_excluded_by_constraints(PlannerInfo *root, return false; /* - * OK to fetch the constraint expressions. Include "col IS NOT NULL" + * OK to fetch the constraint expressions. Include "col IS NOT NULL" * expressions for attnotnull columns, in case we can refute those. */ constraint_pred = get_relation_constraints(root, rte->relid, rel, true); @@ -834,7 +834,7 @@ relation_excluded_by_constraints(PlannerInfo *root, * Exception: if there are any dropped columns, we punt and return NIL. * Ideally we would like to handle the dropped-column case too. However this * creates problems for ExecTypeFromTL, which may be asked to build a tupdesc - * for a tlist that includes vars of no-longer-existent types. In theory we + * for a tlist that includes vars of no-longer-existent types. In theory we * could dig out the required info from the pg_attribute entries of the * relation, but that data is not readily available to ExecTypeFromTL. * For now, we don't apply the physical-tlist optimization when there are diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c index eadd2d5104a..9d61a4d71c2 100644 --- a/src/backend/optimizer/util/predtest.c +++ b/src/backend/optimizer/util/predtest.c @@ -133,7 +133,7 @@ predicate_implied_by(List *predicate_list, List *restrictinfo_list) /* * If either input is a single-element list, replace it with its lone - * member; this avoids one useless level of AND-recursion. We only need + * member; this avoids one useless level of AND-recursion. We only need * to worry about this at top level, since eval_const_expressions should * have gotten rid of any trivial ANDs or ORs below that. */ @@ -191,7 +191,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list) /* * If either input is a single-element list, replace it with its lone - * member; this avoids one useless level of AND-recursion. We only need + * member; this avoids one useless level of AND-recursion. We only need * to worry about this at top level, since eval_const_expressions should * have gotten rid of any trivial ANDs or ORs below that. */ @@ -225,7 +225,7 @@ predicate_refuted_by(List *predicate_list, List *restrictinfo_list) * OR-expr A => AND-expr B iff: A => each of B's components * OR-expr A => OR-expr B iff: each of A's components => any of B's * - * An "atom" is anything other than an AND or OR node. Notice that we don't + * An "atom" is anything other than an AND or OR node. Notice that we don't * have any special logic to handle NOT nodes; these should have been pushed * down or eliminated where feasible by prepqual.c. * @@ -658,7 +658,7 @@ predicate_refuted_by_recurse(Node *clause, Node *predicate) * We cannot make the stronger conclusion that B is refuted if B * implies A's arg; that would only prove that B is not-TRUE, not * that it's not NULL either. Hence use equal() rather than - * predicate_implied_by_recurse(). We could do the latter if we + * predicate_implied_by_recurse(). We could do the latter if we * ever had a need for the weak form of refutation. */ not_arg = extract_strong_not_arg(clause); @@ -820,7 +820,7 @@ predicate_classify(Node *clause, PredIterInfo info) } /* - * PredIterInfo routines for iterating over regular Lists. The iteration + * PredIterInfo routines for iterating over regular Lists. The iteration * state variable is the next ListCell to visit. */ static void @@ -1014,13 +1014,13 @@ arrayexpr_cleanup_fn(PredIterInfo info) * implies another: * * A simple and general way is to see if they are equal(); this works for any - * kind of expression. (Actually, there is an implied assumption that the + * kind of expression. (Actually, there is an implied assumption that the * functions in the expression are immutable, ie dependent only on their input * arguments --- but this was checked for the predicate by the caller.) * * When the predicate is of the form "foo IS NOT NULL", we can conclude that * the predicate is implied if the clause is a strict operator or function - * that has "foo" as an input. In this case the clause must yield NULL when + * that has "foo" as an input. In this case the clause must yield NULL when * "foo" is NULL, which we can take as equivalent to FALSE because we know * we are within an AND/OR subtree of a WHERE clause. (Again, "foo" is * already known immutable, so the clause will certainly always fail.) @@ -1244,7 +1244,7 @@ list_member_strip(List *list, Expr *datum) * * The strategy numbers defined by btree indexes (see access/skey.h) are: * (1) < (2) <= (3) = (4) >= (5) > - * and in addition we use (6) to represent <>. <> is not a btree-indexable + * and in addition we use (6) to represent <>. <> is not a btree-indexable * operator, but we assume here that if an equality operator of a btree * opfamily has a negator operator, the negator behaves as <> for the opfamily. * (This convention is also known to get_op_btree_interpretation().) @@ -1328,7 +1328,7 @@ static const StrategyNumber BT_refute_table[6][6] = { * if not able to prove it. * * What we look for here is binary boolean opclauses of the form - * "foo op constant", where "foo" is the same in both clauses. The operators + * "foo op constant", where "foo" is the same in both clauses. The operators * and constants can be different but the operators must be in the same btree * operator family. We use the above operator implication tables to * derive implications between nonidentical clauses. (Note: "foo" is known @@ -1418,7 +1418,7 @@ btree_predicate_proof(Expr *predicate, Node *clause, bool refute_it) /* * Check for matching subexpressions on the non-Const sides. We used to * only allow a simple Var, but it's about as easy to allow any - * expression. Remember we already know that the pred expression does not + * expression. Remember we already know that the pred expression does not * contain any non-immutable functions, so identical expressions should * yield identical results. */ @@ -1690,7 +1690,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it) * Last check: test_op must be immutable. * * Note that we require only the test_op to be immutable, not the - * original clause_op. (pred_op is assumed to have been checked + * original clause_op. (pred_op is assumed to have been checked * immutable by the caller.) Essentially we are assuming that the * opfamily is consistent even if it contains operators that are * merely stable. diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index 8ae8f551212..c938c2700f9 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -262,7 +262,7 @@ RelOptInfo * find_join_rel(PlannerInfo *root, Relids relids) { /* - * Switch to using hash lookup when list grows "too long". The threshold + * Switch to using hash lookup when list grows "too long". The threshold * is arbitrary and is known only here. */ if (!root->join_rel_hash && list_length(root->join_rel_list) > 32) @@ -448,7 +448,7 @@ build_join_rel(PlannerInfo *root, /* * Also, if dynamic-programming join search is active, add the new joinrel - * to the appropriate sublist. Note: you might think the Assert on number + * to the appropriate sublist. Note: you might think the Assert on number * of members should be for equality, but some of the level 1 rels might * have been joinrels already, so we can only assert <=. */ @@ -529,7 +529,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, * the join list need only be computed once for any join RelOptInfo. * The join list is fully determined by the set of rels making up the * joinrel, so we should get the same results (up to ordering) from any - * candidate pair of sub-relations. But the restriction list is whatever + * candidate pair of sub-relations. But the restriction list is whatever * is not handled in the sub-relations, so it depends on which * sub-relations are considered. * @@ -538,7 +538,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, * we put it into the joininfo list for the joinrel. Otherwise, * the clause is now a restrict clause for the joined relation, and we * return it to the caller of build_joinrel_restrictlist() to be stored in - * join paths made from this pair of sub-relations. (It will not need to + * join paths made from this pair of sub-relations. (It will not need to * be considered further up the join tree.) * * In many case we will find the same RestrictInfos in both input @@ -557,7 +557,7 @@ build_joinrel_tlist(PlannerInfo *root, RelOptInfo *joinrel, * * NB: Formerly, we made deep(!) copies of each input RestrictInfo to pass * up to the join relation. I believe this is no longer necessary, because - * RestrictInfo nodes are no longer context-dependent. Instead, just include + * RestrictInfo nodes are no longer context-dependent. Instead, just include * the original nodes in the lists made for the join relation. */ static List * @@ -577,7 +577,7 @@ build_joinrel_restrictlist(PlannerInfo *root, result = subbuild_joinrel_restrictlist(joinrel, inner_rel->joininfo, result); /* - * Add on any clauses derived from EquivalenceClasses. These cannot be + * Add on any clauses derived from EquivalenceClasses. These cannot be * redundant with the clauses in the joininfo lists, so don't bother * checking. */ @@ -945,7 +945,7 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, *restrict_clauses); /* - * And now we can build the ParamPathInfo. No point in saving the + * And now we can build the ParamPathInfo. No point in saving the * input-pair-dependent clause list, though. * * Note: in GEQO mode, we'll be called in a temporary memory context, but @@ -965,8 +965,8 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, * Get the ParamPathInfo for a parameterized path for an append relation. * * For an append relation, the rowcount estimate will just be the sum of - * the estimates for its children. However, we still need a ParamPathInfo - * to flag the fact that the path requires parameters. So this just creates + * the estimates for its children. However, we still need a ParamPathInfo + * to flag the fact that the path requires parameters. So this just creates * a suitable struct with zero ppi_rows (and no ppi_clauses either, since * the Append node isn't responsible for checking quals). */ diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c index 62de5905232..e861ce66576 100644 --- a/src/backend/optimizer/util/restrictinfo.c +++ b/src/backend/optimizer/util/restrictinfo.c @@ -210,7 +210,7 @@ make_restrictinfo_internal(Expr *clause, /* * Fill in all the cacheable fields with "not yet set" markers. None of - * these will be computed until/unless needed. Note in particular that we + * these will be computed until/unless needed. Note in particular that we * don't mark a binary opclause as mergejoinable or hashjoinable here; * that happens only if it appears in the right context (top level of a * joinclause list). diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c index 5e26f3b57e3..f1f1be1b7fe 100644 --- a/src/backend/optimizer/util/tlist.c +++ b/src/backend/optimizer/util/tlist.c @@ -26,7 +26,7 @@ /* * tlist_member * Finds the (first) member of the given tlist whose expression is |