DefineCustomRealVariable("auto_explain.sample_rate",
"Fraction of queries to process.",
- NULL,
- &auto_explain_sample_rate,
- 1.0,
- 0.0,
- 1.0,
- PGC_SUSET,
- 0,
- NULL,
- NULL,
- NULL);
+ NULL,
+ &auto_explain_sample_rate,
+ 1.0,
+ 0.0,
+ 1.0,
+ PGC_SUSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
EmitWarningsOnPlaceholders("auto_explain");
explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
/*
- * For rate sampling, randomly choose top-level statement. Either
- * all nested statements will be explained or none will.
+ * For rate sampling, randomly choose top-level statement. Either all
+ * nested statements will be explained or none will.
*/
if (auto_explain_log_min_duration >= 0 && nesting_level == 0)
current_query_sampled = (random() < auto_explain_sample_rate *
- MAX_RANDOM_VALUE);
+ MAX_RANDOM_VALUE);
if (auto_explain_enabled() && current_query_sampled)
{
typedef struct
{
BloomState blstate; /* bloom index state */
- MemoryContext tmpCtx; /* temporary memory context reset after
- * each tuple */
+ MemoryContext tmpCtx; /* temporary memory context reset after each
+ * tuple */
char data[BLCKSZ]; /* cached page */
int64 count; /* number of tuples in cached page */
-} BloomBuildState;
+} BloomBuildState;
/*
* Flush page cached in BloomBuildState.
bloomBuildCallback, (void *) &buildstate);
/*
- * There are could be some items in cached page. Flush this page
- * if needed.
+ * There are could be some items in cached page. Flush this page if
+ * needed.
*/
if (buildstate.count > 0)
flushCachedPage(index, &buildstate);
/* Opaque for bloom pages */
typedef struct BloomPageOpaqueData
{
- OffsetNumber maxoff; /* number of index tuples on page */
- uint16 flags; /* see bit definitions below */
- uint16 unused; /* placeholder to force maxaligning of size
- * of BloomPageOpaqueData and to place
- * bloom_page_id exactly at the end of page
- */
- uint16 bloom_page_id; /* for identification of BLOOM indexes */
-} BloomPageOpaqueData;
+ OffsetNumber maxoff; /* number of index tuples on page */
+ uint16 flags; /* see bit definitions below */
+ uint16 unused; /* placeholder to force maxaligning of size of
+ * BloomPageOpaqueData and to place
+ * bloom_page_id exactly at the end of page */
+ uint16 bloom_page_id; /* for identification of BLOOM indexes */
+} BloomPageOpaqueData;
typedef BloomPageOpaqueData *BloomPageOpaque;
{
int32 vl_len_; /* varlena header (do not touch directly!) */
int bloomLength; /* length of signature in words (not bits!) */
- int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for each
- * index key */
-} BloomOptions;
+ int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for
+ * each index key */
+} BloomOptions;
/*
* FreeBlockNumberArray - array of block numbers sized so that metadata fill
uint16 nEnd;
BloomOptions opts;
FreeBlockNumberArray notFullPage;
-} BloomMetaPageData;
+} BloomMetaPageData;
/* Magic number to distinguish bloom pages among anothers */
#define BLOOM_MAGICK_NUMBER (0xDBAC0DED)
* precompute it
*/
Size sizeOfBloomTuple;
-} BloomState;
+} BloomState;
#define BloomPageGetFreeSpace(state, page) \
(BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \
{
ItemPointerData heapPtr;
BloomSignatureWord sign[FLEXIBLE_ARRAY_MEMBER];
-} BloomTuple;
+} BloomTuple;
#define BLOOMTUPLEHDRSZ offsetof(BloomTuple, sign)
/* Opaque data structure for bloom index scan */
typedef struct BloomScanOpaqueData
{
- BloomSignatureWord *sign; /* Scan signature */
+ BloomSignatureWord *sign; /* Scan signature */
BloomState state;
-} BloomScanOpaqueData;
+} BloomScanOpaqueData;
typedef BloomScanOpaqueData *BloomScanOpaque;
/* blutils.c */
extern void _PG_init(void);
extern Datum blhandler(PG_FUNCTION_ARGS);
-extern void initBloomState(BloomState * state, Relation index);
+extern void initBloomState(BloomState *state, Relation index);
extern void BloomFillMetapage(Relation index, Page metaPage);
extern void BloomInitMetapage(Relation index);
extern void BloomInitPage(Page page, uint16 flags);
extern Buffer BloomNewBuffer(Relation index);
-extern void signValue(BloomState * state, BloomSignatureWord * sign, Datum value, int attno);
-extern BloomTuple *BloomFormTuple(BloomState * state, ItemPointer iptr, Datum *values, bool *isnull);
-extern bool BloomPageAddItem(BloomState * state, Page page, BloomTuple * tuple);
+extern void signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno);
+extern BloomTuple *BloomFormTuple(BloomState *state, ItemPointer iptr, Datum *values, bool *isnull);
+extern bool BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple);
/* blvalidate.c */
extern bool blvalidate(Oid opclassoid);
/* Kind of relation options for bloom index */
static relopt_kind bl_relopt_kind;
+
/* parse table for fillRelOptions */
static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1];
* October 1988, p. 1195.
*----------
*/
- int32 hi, lo, x;
+ int32 hi,
+ lo,
+ x;
/* Must be in [1, 0x7ffffffe] range at this point. */
hi = next / 127773;
/* Iterate over the tuples */
itup = itupPtr = BloomPageGetTuple(&state, page, FirstOffsetNumber);
itupEnd = BloomPageGetTuple(&state, page,
- OffsetNumberNext(BloomPageGetMaxOffset(page)));
+ OffsetNumberNext(BloomPageGetMaxOffset(page)));
while (itup < itupEnd)
{
/* Do we have to delete this tuple? */
}
Assert(itupPtr == BloomPageGetTuple(&state, page,
- OffsetNumberNext(BloomPageGetMaxOffset(page))));
+ OffsetNumberNext(BloomPageGetMaxOffset(page))));
/*
- * Add page to notFullPage list if we will not mark page as deleted and
- * there is a free space on it
+ * Add page to notFullPage list if we will not mark page as deleted
+ * and there is a free space on it
*/
if (BloomPageGetMaxOffset(page) != 0 &&
BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple &&
AcquireSampleRowsFunc *func,
BlockNumber *totalpages);
static bool fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
/*
* Helper functions
/*
* fileIsForeignScanParallelSafe
- * Reading a file in a parallel worker should work just the same as
- * reading it in the leader, so mark scans safe.
+ * Reading a file in a parallel worker should work just the same as
+ * reading it in the leader, so mark scans safe.
*/
static bool
fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte)
+ RangeTblEntry *rte)
{
return true;
}
unsigned check;
/*
- * The number should come in this format: 978-0-000-00000-0
- * or may be an ISBN-13 number, 979-..., which does not have a short
- * representation. Do the short output version if possible.
+ * The number should come in this format: 978-0-000-00000-0 or may be an
+ * ISBN-13 number, 979-..., which does not have a short representation. Do
+ * the short output version if possible.
*/
if (strncmp("978-", isn, 4) == 0)
{
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("illegal character '%c' in t_bits string", str[off])));
+ errmsg("illegal character '%c' in t_bits string", str[off])));
if (off % 8 == 7)
bits[off / 8] = byte;
lp_offset == MAXALIGN(lp_offset) &&
lp_offset + lp_len <= raw_page_size)
{
- HeapTupleHeader tuphdr;
- bytea *tuple_data_bytea;
- int tuple_data_len;
+ HeapTupleHeader tuphdr;
+ bytea *tuple_data_bytea;
+ int tuple_data_len;
/* Extract information from the tuple header */
tuple_data_bytea = (bytea *) palloc(tuple_data_len + VARHDRSZ);
SET_VARSIZE(tuple_data_bytea, tuple_data_len + VARHDRSZ);
memcpy(VARDATA(tuple_data_bytea), (char *) tuphdr + tuphdr->t_hoff,
- tuple_data_len);
+ tuple_data_len);
values[13] = PointerGetDatum(tuple_data_bytea);
/*
*/
static Datum
tuple_data_split_internal(Oid relid, char *tupdata,
- uint16 tupdata_len, uint16 t_infomask,
- uint16 t_infomask2, bits8 *t_bits,
- bool do_detoast)
+ uint16 tupdata_len, uint16 t_infomask,
+ uint16 t_infomask2, bits8 *t_bits,
+ bool do_detoast)
{
- ArrayBuildState *raw_attrs;
- int nattrs;
- int i;
- int off = 0;
- Relation rel;
- TupleDesc tupdesc;
+ ArrayBuildState *raw_attrs;
+ int nattrs;
+ int i;
+ int off = 0;
+ Relation rel;
+ TupleDesc tupdesc;
/* Get tuple descriptor from relation OID */
rel = relation_open(relid, NoLock);
for (i = 0; i < nattrs; i++)
{
- Form_pg_attribute attr;
- bool is_null;
- bytea *attr_data = NULL;
+ Form_pg_attribute attr;
+ bool is_null;
+ bytea *attr_data = NULL;
attr = tupdesc->attrs[i];
is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits);
/*
- * Tuple header can specify less attributes than tuple descriptor
- * as ALTER TABLE ADD COLUMN without DEFAULT keyword does not
- * actually change tuples in pages, so attributes with numbers greater
- * than (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
+ * Tuple header can specify less attributes than tuple descriptor as
+ * ALTER TABLE ADD COLUMN without DEFAULT keyword does not actually
+ * change tuples in pages, so attributes with numbers greater than
+ * (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
*/
if (i >= (t_infomask2 & HEAP_NATTS_MASK))
is_null = true;
if (!is_null)
{
- int len;
+ int len;
if (attr->attlen == -1)
{
off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1,
tupdata + off);
+
/*
* As VARSIZE_ANY throws an exception if it can't properly
* detect the type of external storage in macros VARTAG_SIZE,
!VARATT_IS_EXTERNAL_ONDISK(tupdata + off) &&
!VARATT_IS_EXTERNAL_INDIRECT(tupdata + off))
ereport(ERROR,
- (errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("first byte of varlena attribute is incorrect for attribute %d", i)));
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("first byte of varlena attribute is incorrect for attribute %d", i)));
len = VARSIZE_ANY(tupdata + off);
}
if (tupdata_len != off)
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("end of tuple reached without looking at all its data")));
+ errmsg("end of tuple reached without looking at all its data")));
return makeArrayResult(raw_attrs, CurrentMemoryContext);
}
Datum
tuple_data_split(PG_FUNCTION_ARGS)
{
- Oid relid;
- bytea *raw_data;
- uint16 t_infomask;
- uint16 t_infomask2;
- char *t_bits_str;
- bool do_detoast = false;
- bits8 *t_bits = NULL;
- Datum res;
+ Oid relid;
+ bytea *raw_data;
+ uint16 t_infomask;
+ uint16 t_infomask2;
+ char *t_bits_str;
+ bool do_detoast = false;
+ bits8 *t_bits = NULL;
+ Datum res;
relid = PG_GETARG_OID(0);
raw_data = PG_ARGISNULL(1) ? NULL : PG_GETARG_BYTEA_P(1);
*/
if (t_infomask & HEAP_HASNULL)
{
- int bits_str_len;
- int bits_len;
+ int bits_str_len;
+ int bits_len;
bits_len = (t_infomask2 & HEAP_NATTS_MASK) / 8 + 1;
if (!t_bits_str)
Datum
gin_trgm_triconsistent(PG_FUNCTION_ARGS)
{
- GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
+ GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
/* text *query = PG_GETARG_TEXT_P(2); */
int32 nkeys = PG_GETARG_INT32(3);
Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4);
- GinTernaryValue res = GIN_MAYBE;
+ GinTernaryValue res = GIN_MAYBE;
int32 i,
ntrue;
bool *boolcheck;
}
/*
- * See comment in gin_trgm_consistent() about * upper bound formula
+ * See comment in gin_trgm_consistent() about * upper bound
+ * formula
*/
res = (nkeys == 0)
? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit)
- ? GIN_MAYBE : GIN_FALSE);
+ ? GIN_MAYBE : GIN_FALSE);
break;
case ILikeStrategyNumber:
#ifndef IGNORECASE
else
{
/*
- * As trigramsMatchGraph implements a monotonic boolean function,
- * promoting all GIN_MAYBE keys to GIN_TRUE will give a
- * conservative result.
+ * As trigramsMatchGraph implements a monotonic boolean
+ * function, promoting all GIN_MAYBE keys to GIN_TRUE will
+ * give a conservative result.
*/
boolcheck = (bool *) palloc(sizeof(bool) * nkeys);
for (i = 0; i < nkeys; i++)
break;
default:
elog(ERROR, "unrecognized strategy number: %d", strategy);
- res = GIN_FALSE; /* keep compiler quiet */
+ res = GIN_FALSE; /* keep compiler quiet */
break;
}
if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */
+
/*
* Prevent gcc optimizing the tmpsml variable using volatile
* keyword. Otherwise comparison of nlimit and tmpsml may give
*recheck = strategy == WordDistanceStrategyNumber;
if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */
+
/*
* Prevent gcc optimizing the sml variable using volatile
* keyword. Otherwise res can differ from the
* word_similarity_dist_op() function.
*/
float4 volatile sml = cnt_sml(qtrg, key, *recheck);
+
res = 1.0 - sml;
}
else if (ISALLTRUE(key))
PG_MODULE_MAGIC;
/* GUC variables */
-double similarity_threshold = 0.3f;
-double word_similarity_threshold = 0.6f;
+double similarity_threshold = 0.3f;
+double word_similarity_threshold = 0.6f;
void _PG_init(void);
/* Trigram with position */
typedef struct
{
- trgm trg;
- int index;
+ trgm trg;
+ int index;
} pos_trgm;
/*
{
/* Define custom GUC variables. */
DefineCustomRealVariable("pg_trgm.similarity_threshold",
- "Sets the threshold used by the %% operator.",
- "Valid range is 0.0 .. 1.0.",
- &similarity_threshold,
- 0.3,
- 0.0,
- 1.0,
- PGC_USERSET,
- 0,
- NULL,
- NULL,
- NULL);
+ "Sets the threshold used by the %% operator.",
+ "Valid range is 0.0 .. 1.0.",
+ &similarity_threshold,
+ 0.3,
+ 0.0,
+ 1.0,
+ PGC_USERSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
DefineCustomRealVariable("pg_trgm.word_similarity_threshold",
- "Sets the threshold used by the <%% operator.",
- "Valid range is 0.0 .. 1.0.",
- &word_similarity_threshold,
- 0.6,
- 0.0,
- 1.0,
- PGC_USERSET,
- 0,
- NULL,
- NULL,
- NULL);
+ "Sets the threshold used by the <%% operator.",
+ "Valid range is 0.0 .. 1.0.",
+ &word_similarity_threshold,
+ 0.6,
+ 0.0,
+ 1.0,
+ PGC_USERSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
}
/*
* Make array of positional trigrams from two trigram arrays trg1 and trg2.
*
* trg1: trigram array of search pattern, of length len1. trg1 is required
- * word which positions don't matter and replaced with -1.
+ * word which positions don't matter and replaced with -1.
* trg2: trigram array of text, of length len2. trg2 is haystack where we
- * search and have to store its positions.
+ * search and have to store its positions.
*
* Returns concatenated trigram array.
*/
make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
{
pos_trgm *result;
- int i, len = len1 + len2;
+ int i,
+ len = len1 + len2;
result = (pos_trgm *) palloc(sizeof(pos_trgm) * len);
static int
comp_ptrgm(const void *v1, const void *v2)
{
- const pos_trgm *p1 = (const pos_trgm *)v1;
- const pos_trgm *p2 = (const pos_trgm *)v2;
- int cmp;
+ const pos_trgm *p1 = (const pos_trgm *) v1;
+ const pos_trgm *p2 = (const pos_trgm *) v2;
+ int cmp;
cmp = CMPTRGM(p1->trg, p2->trg);
if (cmp != 0)
* len2: length of array "trg2" and array "trg2indexes".
* len: length of the array "found".
* check_only: if true then only check existaince of similar search pattern in
- * text.
+ * text.
*
* Returns word similarity.
*/
for (i = 0; i < len2; i++)
{
/* Get index of next trigram */
- int trgindex = trg2indexes[i];
+ int trgindex = trg2indexes[i];
/* Update last position of this trigram */
if (lower >= 0 || found[trgindex])
/* Adjust lower bound if this trigram is present in required substing */
if (found[trgindex])
{
- int prev_lower,
- tmp_ulen2,
- tmp_lower,
- tmp_count;
+ int prev_lower,
+ tmp_ulen2,
+ tmp_lower,
+ tmp_count;
upper = i;
if (lower == -1)
prev_lower = lower;
for (tmp_lower = lower; tmp_lower <= upper; tmp_lower++)
{
- float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2);
- int tmp_trgindex;
+ float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2);
+ int tmp_trgindex;
if (smlr_tmp > smlr_cur)
{
lower = tmp_lower;
count = tmp_count;
}
+
/*
* if we only check that word similarity is greater than
- * pg_trgm.word_similarity_threshold we do not need to calculate
- * a maximum similarity.
+ * pg_trgm.word_similarity_threshold we do not need to
+ * calculate a maximum similarity.
*/
if (check_only && smlr_cur >= word_similarity_threshold)
break;
}
smlr_max = Max(smlr_max, smlr_cur);
+
/*
* if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate a
for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++)
{
- int tmp_trgindex;
+ int tmp_trgindex;
+
tmp_trgindex = trg2indexes[tmp_lower];
if (lastpos[tmp_trgindex] == tmp_lower)
lastpos[tmp_trgindex] = -1;
* str1: search pattern string, of length slen1 bytes.
* str2: text in which we are looking for a word, of length slen2 bytes.
* check_only: if true then only check existaince of similar search pattern in
- * text.
+ * text.
*
* Returns word similarity.
*/
static float4
calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
- bool check_only)
+ bool check_only)
{
bool *found;
pos_trgm *ptrg;
protect_out_of_mem(slen1 + slen2);
/* Make positional trigrams */
- trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) * 3);
- trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) * 3);
+ trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) *3);
+ trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) *3);
len1 = generate_trgm_only(trg1, str1, slen1);
len2 = generate_trgm_only(trg2, str2, slen2);
{
if (i > 0)
{
- int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
+ int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
+
if (cmp != 0)
{
if (found[j])
/* Run iterative procedure to find maximum similarity with word */
result = iterate_word_similarity(trg2indexes, found, ulen1, len2, len,
- check_only);
+ check_only);
pfree(trg2indexes);
pfree(found);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- false);
+ VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
+ false);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- true);
+ VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
+ true);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- true);
+ VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
+ true);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- false);
+ VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
+ false);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- false);
+ VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
+ false);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
typedef struct vbits
{
- BlockNumber next;
- BlockNumber count;
+ BlockNumber next;
+ BlockNumber count;
uint8 bits[FLEXIBLE_ARRAY_MEMBER];
} vbits;
if (SRF_IS_FIRSTCALL())
{
Oid relid = PG_GETARG_OID(0);
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
if (SRF_IS_FIRSTCALL())
{
Oid relid = PG_GETARG_OID(0);
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
{
Oid relid = PG_GETARG_OID(0);
Relation rel;
- BlockNumber nblocks;
- BlockNumber blkno;
+ BlockNumber nblocks;
+ BlockNumber blkno;
Buffer vmbuffer = InvalidBuffer;
int64 all_visible = 0;
int64 all_frozen = 0;
collect_visibility_data(Oid relid, bool include_pd)
{
Relation rel;
- BlockNumber nblocks;
+ BlockNumber nblocks;
vbits *info;
- BlockNumber blkno;
+ BlockNumber blkno;
Buffer vmbuffer = InvalidBuffer;
- BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
+ BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
rel = relation_open(relid, AccessShareLock);
nblocks = RelationGetNumberOfBlocks(rel);
- info = palloc0(offsetof(vbits, bits) + nblocks);
+ info = palloc0(offsetof(vbits, bits) +nblocks);
info->next = 0;
info->count = nblocks;
info->bits[blkno] |= (1 << 1);
/*
- * Page-level data requires reading every block, so only get it if
- * the caller needs it. Use a buffer access strategy, too, to prevent
+ * Page-level data requires reading every block, so only get it if the
+ * caller needs it. Use a buffer access strategy, too, to prevent
* cache-trashing.
*/
if (include_pd)
uint8 mode;
uint8 digest_algo;
uint8 salt[8];
- uint8 iter; /* encoded (one-octet) count */
+ uint8 iter; /* encoded (one-octet) count */
/* calculated: */
uint8 key[PGP_MAX_KEY];
uint8 key_len;
for (;;)
{
- PGresult *res;
+ PGresult *res;
while (PQisBusy(conn))
{
- int wc;
+ int wc;
/* Sleep until there's something to do */
wc = WaitLatchOrSocket(MyLatch,
/*
* If a command has been submitted to the remote server by
* using an asynchronous execution function, the command
- * might not have yet completed. Check to see if a command
- * is still being processed by the remote server, and if so,
- * request cancellation of the command.
+ * might not have yet completed. Check to see if a
+ * command is still being processed by the remote server,
+ * and if so, request cancellation of the command.
*/
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{
if (!PQcancel(cancel, errbuf, sizeof(errbuf)))
ereport(WARNING,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("could not send cancel request: %s",
- errbuf)));
+ errmsg("could not send cancel request: %s",
+ errbuf)));
PQfreeCancel(cancel);
}
}
entry->have_error = true;
/*
- * If a command has been submitted to the remote server by using an
- * asynchronous execution function, the command might not have yet
- * completed. Check to see if a command is still being processed by
- * the remote server, and if so, request cancellation of the
- * command.
+ * If a command has been submitted to the remote server by using
+ * an asynchronous execution function, the command might not have
+ * yet completed. Check to see if a command is still being
+ * processed by the remote server, and if so, request cancellation
+ * of the command.
*/
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{
/*
* All other system attributes are fetched as 0, except for table OID,
* which is fetched as the local table OID. However, we must be
- * careful; the table could be beneath an outer join, in which case
- * it must go to NULL whenever the rest of the row does.
+ * careful; the table could be beneath an outer join, in which case it
+ * must go to NULL whenever the rest of the row does.
*/
- Oid fetchval = 0;
+ Oid fetchval = 0;
if (varattno == TableOidAttributeNumber)
{
0 - FirstLowInvalidHeapAttributeNumber);
/*
- * In case the whole-row reference is under an outer join then it has to
- * go NULL whenver the rest of the row goes NULL. Deparsing a join query
- * would always involve multiple relations, thus qualify_col would be
- * true.
+ * In case the whole-row reference is under an outer join then it has
+ * to go NULL whenver the rest of the row goes NULL. Deparsing a join
+ * query would always involve multiple relations, thus qualify_col
+ * would be true.
*/
if (qualify_col)
{
/* Complete the CASE WHEN statement started above. */
if (qualify_col)
- appendStringInfo(buf," END");
+ appendStringInfo(buf, " END");
heap_close(rel, NoLock);
bms_free(attrs_used);
}
else if (strcmp(def->defname, "fetch_size") == 0)
{
- int fetch_size;
+ int fetch_size;
- fetch_size = strtol(defGetString(def), NULL,10);
+ fetch_size = strtol(defGetString(def), NULL, 10);
if (fetch_size <= 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
/*
* Pull the other remote conditions from the joining relations into join
- * clauses or other remote clauses (remote_conds) of this relation wherever
- * possible. This avoids building subqueries at every join step, which is
- * not currently supported by the deparser logic.
+ * clauses or other remote clauses (remote_conds) of this relation
+ * wherever possible. This avoids building subqueries at every join step,
+ * which is not currently supported by the deparser logic.
*
* For an inner join, clauses from both the relations are added to the
- * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from the
- * outer side are added to remote_conds since those can be evaluated after
- * the join is evaluated. The clauses from inner side are added to the
- * joinclauses, since they need to evaluated while constructing the join.
+ * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from
+ * the outer side are added to remote_conds since those can be evaluated
+ * after the join is evaluated. The clauses from inner side are added to
+ * the joinclauses, since they need to evaluated while constructing the
+ * join.
*
- * For a FULL OUTER JOIN, the other clauses from either relation can not be
- * added to the joinclauses or remote_conds, since each relation acts as an
- * outer relation for the other. Consider such full outer join as
+ * For a FULL OUTER JOIN, the other clauses from either relation can not
+ * be added to the joinclauses or remote_conds, since each relation acts
+ * as an outer relation for the other. Consider such full outer join as
* unshippable because of the reasons mentioned above in this comment.
*
* The joining sides can not have local conditions, thus no need to test
ForeignServer *server;
UserMapping *user; /* only set in use_remote_estimate mode */
- int fetch_size; /* fetch size for this remote table */
+ int fetch_size; /* fetch size for this remote table */
/*
* Name of the relation while EXPLAINing ForeignScan. It is used for join
List *targetAttrs, List *returningList,
List **retrieved_attrs);
extern void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- List *targetlist,
- List *targetAttrs,
- List *remote_conds,
- List **params_list,
- List *returningList,
- List **retrieved_attrs);
+ Index rtindex, Relation rel,
+ List *targetlist,
+ List *targetAttrs,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs);
extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
List *returningList,
List **retrieved_attrs);
extern void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- List *remote_conds,
- List **params_list,
- List *returningList,
- List **retrieved_attrs);
+ Index rtindex, Relation rel,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs);
extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel);
extern void deparseAnalyzeSql(StringInfo buf, Relation rel,
List **retrieved_attrs);
if (nid == NID_undef)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("unknown OpenSSL extension in certificate at position %d",
- call_cntr)));
+ errmsg("unknown OpenSSL extension in certificate at position %d",
+ call_cntr)));
values[0] = CStringGetTextDatum(OBJ_nid2sn(nid));
nulls[0] = false;
static bool pg_decode_filter(LogicalDecodingContext *ctx,
RepOriginId origin_id);
static void pg_decode_message(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn, XLogRecPtr message_lsn,
- bool transactional, const char *prefix,
- Size sz, const char *message);
+ ReorderBufferTXN *txn, XLogRecPtr message_lsn,
+ bool transactional, const char *prefix,
+ Size sz, const char *message);
void
_PG_init(void)
{
XLogRecPtr lsn = record->EndRecPtr;
Buffer buffer;
- BlockNumber regpgno;
+ BlockNumber regpgno;
Page page;
XLogRedoAction action;
"fillfactor",
"Packs table pages only to this percentage",
RELOPT_KIND_HEAP,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100
},
"fillfactor",
"Packs btree index pages only to this percentage",
RELOPT_KIND_BTREE,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100
},
"fillfactor",
"Packs hash index pages only to this percentage",
RELOPT_KIND_HASH,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100
},
"fillfactor",
"Packs gist index pages only to this percentage",
RELOPT_KIND_GIST,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100
},
"fillfactor",
"Packs spgist index pages only to this percentage",
RELOPT_KIND_SPGIST,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100
},
LOCKMODE
AlterTableGetRelOptionsLockLevel(List *defList)
{
- LOCKMODE lockmode = NoLock;
- ListCell *cell;
+ LOCKMODE lockmode = NoLock;
+ ListCell *cell;
if (defList == NIL)
return AccessExclusiveLock;
foreach(cell, defList)
{
- DefElem *def = (DefElem *) lfirst(cell);
- int i;
+ DefElem *def = (DefElem *) lfirst(cell);
+ int i;
for (i = 0; relOpts[i]; i++)
{
int64 nDeletedHeapTuples = 0;
ginxlogDeleteListPages data;
Buffer buffers[GIN_NDELETE_AT_ONCE];
- BlockNumber freespace[GIN_NDELETE_AT_ONCE];
+ BlockNumber freespace[GIN_NDELETE_AT_ONCE];
data.ndeleted = 0;
while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
bool inVacuum = (stats == NULL);
/*
- * We would like to prevent concurrent cleanup process. For
- * that we will lock metapage in exclusive mode using LockPage()
- * call. Nobody other will use that lock for metapage, so
- * we keep possibility of concurrent insertion into pending list
+ * We would like to prevent concurrent cleanup process. For that we will
+ * lock metapage in exclusive mode using LockPage() call. Nobody other
+ * will use that lock for metapage, so we keep possibility of concurrent
+ * insertion into pending list
*/
if (inVacuum)
{
/*
- * We are called from [auto]vacuum/analyze or
- * gin_clean_pending_list() and we would like to wait
- * concurrent cleanup to finish.
+ * We are called from [auto]vacuum/analyze or gin_clean_pending_list()
+ * and we would like to wait concurrent cleanup to finish.
*/
LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
workMemory =
(IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ?
- autovacuum_work_mem : maintenance_work_mem;
+ autovacuum_work_mem : maintenance_work_mem;
}
else
{
/*
- * We are called from regular insert and if we see
- * concurrent cleanup just exit in hope that concurrent
- * process will clean up pending list.
+ * We are called from regular insert and if we see concurrent cleanup
+ * just exit in hope that concurrent process will clean up pending
+ * list.
*/
if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
return;
Assert(!GinPageIsDeleted(page));
/*
- * Are we walk through the page which as we remember was a tail when we
- * start our cleanup? But if caller asks us to clean up whole pending
- * list then ignore old tail, we will work until list becomes empty.
+ * Are we walk through the page which as we remember was a tail when
+ * we start our cleanup? But if caller asks us to clean up whole
+ * pending list then ignore old tail, we will work until list becomes
+ * empty.
*/
if (blkno == blknoFinish && full_clean == false)
cleanupFinish = true;
* locking */
/*
- * remove read pages from pending list, at this point all
- * content of read pages is in regular structure
+ * remove read pages from pending list, at this point all content
+ * of read pages is in regular structure
*/
shiftList(index, metabuffer, blkno, fill_fsm, stats);
ReleaseBuffer(metabuffer);
/*
- * As pending list pages can have a high churn rate, it is
- * desirable to recycle them immediately to the FreeSpace Map when
- * ordinary backends clean the list.
+ * As pending list pages can have a high churn rate, it is desirable to
+ * recycle them immediately to the FreeSpace Map when ordinary backends
+ * clean the list.
*/
if (fsm_vac && fill_fsm)
IndexFreeSpaceMapVacuum(index);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("recovery is in progress"),
- errhint("GIN pending list cannot be cleaned up during recovery.")));
+ errhint("GIN pending list cannot be cleaned up during recovery.")));
/* Must be a GIN index */
if (indexRel->rd_rel->relkind != RELKIND_INDEX ||
&htup->t_self);
/* If we've maxed out our available memory, dump everything to the index */
- if (buildstate->accum.allocatedMemory >= (Size)maintenance_work_mem * 1024L)
+ if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L)
{
ItemPointerData *list;
Datum key;
{
/* Yes, so initialize stats to zeroes */
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
+
/*
- * and cleanup any pending inserts */
+ * and cleanup any pending inserts
+ */
ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
false, stats);
}
gistvacuumpage(Relation rel, Page page, Buffer buffer)
{
OffsetNumber deletable[MaxIndexTuplesPerPage];
- int ndeletable = 0;
- OffsetNumber offnum, maxoff;
+ int ndeletable = 0;
+ OffsetNumber offnum,
+ maxoff;
Assert(GistPageIsLeaf(page));
static void
gistkillitems(IndexScanDesc scan)
{
- GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- ItemId iid;
- int i;
- bool killedsomething = false;
+ GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId iid;
+ int i;
+ bool killedsomething = false;
Assert(so->curBlkno != InvalidBlockNumber);
Assert(!XLogRecPtrIsInvalid(so->curPageLSN));
page = BufferGetPage(buffer);
/*
- * If page LSN differs it means that the page was modified since the last read.
- * killedItems could be not valid so LP_DEAD hints applying is not safe.
+ * If page LSN differs it means that the page was modified since the last
+ * read. killedItems could be not valid so LP_DEAD hints applying is not
+ * safe.
*/
- if(PageGetLSN(page) != so->curPageLSN)
+ if (PageGetLSN(page) != so->curPageLSN)
{
UnlockReleaseBuffer(buffer);
- so->numKilled = 0; /* reset counter */
+ so->numKilled = 0; /* reset counter */
return;
}
Assert(GistPageIsLeaf(page));
/*
- * Mark all killedItems as dead. We need no additional recheck,
- * because, if page was modified, pageLSN must have changed.
+ * Mark all killedItems as dead. We need no additional recheck, because,
+ * if page was modified, pageLSN must have changed.
*/
for (i = 0; i < so->numKilled; i++)
{
maxoff = PageGetMaxOffsetNumber(page);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- ItemId iid = PageGetItemId(page, i);
+ ItemId iid = PageGetItemId(page, i);
IndexTuple it;
bool match;
bool recheck;
* If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual.
*/
- if(scan->ignore_killed_tuples && ItemIdIsDead(iid))
+ if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
continue;
it = (IndexTuple) PageGetItem(page, iid);
+
/*
* Must call gistindex_keytest in tempCxt, and clean up any leftover
* junk afterward.
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
- * sizeof(OffsetNumber));
+ * sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
- * sizeof(OffsetNumber));
+ * sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}
ScanKey skey = scan->keyData + i;
/*
- * Copy consistent support function to ScanKey structure
- * instead of function implementing filtering operator.
+ * Copy consistent support function to ScanKey structure instead
+ * of function implementing filtering operator.
*/
fmgr_info_copy(&(skey->sk_func),
&(so->giststate->consistentFn[skey->sk_attno - 1]),
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
/*
- * Copy distance support function to ScanKey structure
- * instead of function implementing ordering operator.
+ * Copy distance support function to ScanKey structure instead of
+ * function implementing ordering operator.
*/
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
{
BlockNumber page = InvalidBlockNumber;
BlockNumber sync_startpage = InvalidBlockNumber;
- BlockNumber report_page = InvalidBlockNumber;
+ BlockNumber report_page = InvalidBlockNumber;
ParallelHeapScanDesc parallel_scan;
Assert(scan->rs_parallel);
RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
{
Page page;
- BlockNumber blockNum = InvalidBlockNumber,
+ BlockNumber blockNum = InvalidBlockNumber,
firstBlock = InvalidBlockNumber;
int extraBlocks = 0;
int lockWaiters = 0;
return;
/*
- * It might seem like multiplying the number of lock waiters by as much
- * as 20 is too aggressive, but benchmarking revealed that smaller numbers
- * were insufficient. 512 is just an arbitrary cap to prevent pathological
- * results.
+ * It might seem like multiplying the number of lock waiters by as much as
+ * 20 is too aggressive, but benchmarking revealed that smaller numbers
+ * were insufficient. 512 is just an arbitrary cap to prevent
+ * pathological results.
*/
extraBlocks = Min(512, lockWaiters * 20);
}
/*
- * Updating the upper levels of the free space map is too expensive
- * to do for every block, but it's worth doing once at the end to make
- * sure that subsequent insertion activity sees all of those nifty free
- * pages we just inserted.
+ * Updating the upper levels of the free space map is too expensive to do
+ * for every block, but it's worth doing once at the end to make sure that
+ * subsequent insertion activity sees all of those nifty free pages we
+ * just inserted.
*
* Note that we're using the freespace value that was reported for the
* last block we added as if it were the freespace value for every block
}
/*
- * In addition to whatever extension we performed above, we always add
- * at least one block to satisfy our own request.
+ * In addition to whatever extension we performed above, we always add at
+ * least one block to satisfy our own request.
*
* XXX This does an lseek - rather expensive - but at the moment it is the
* only way to accurately determine how many blocks are in a relation. Is
OldestXmin = RecentGlobalXmin;
else
OldestXmin =
- TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
- relation);
+ TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
+ relation);
Assert(TransactionIdIsValid(OldestXmin));
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
Page page;
- uint8 *map;
+ uint8 *map;
#ifdef TRACE_VISIBILITYMAP
elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
page = BufferGetPage(vmBuf);
- map = (uint8 *)PageGetContents(page);
+ map = (uint8 *) PageGetContents(page);
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))
* Check for a conflict-in as we would if we were going to
* write to this page. We aren't actually going to write,
* but we want a chance to report SSI conflicts that would
- * otherwise be masked by this unique constraint violation.
+ * otherwise be masked by this unique constraint
+ * violation.
*/
CheckForSerializableConflictIn(rel, NULL, buf);
/*
* Check to see if we need to issue one final WAL record for this index,
- * which may be needed for correctness on a hot standby node when
- * non-MVCC index scans could take place.
+ * which may be needed for correctness on a hot standby node when non-MVCC
+ * index scans could take place.
*
* If the WAL is replayed in hot standby, the replay process needs to get
* cleanup locks on all index leaf pages, just as we've been doing here.
if (ndeletable > 0)
{
/*
- * Notice that the issued XLOG_BTREE_VACUUM WAL record includes all
- * information to the replay code to allow it to get a cleanup lock
- * on all pages between the previous lastBlockVacuumed and this page.
- * This ensures that WAL replay locks all leaf pages at some point,
- * which is important should non-MVCC scans be requested.
- * This is currently unused on standby, but we record it anyway, so
- * that the WAL contains the required information.
+ * Notice that the issued XLOG_BTREE_VACUUM WAL record includes
+ * all information to the replay code to allow it to get a cleanup
+ * lock on all pages between the previous lastBlockVacuumed and
+ * this page. This ensures that WAL replay locks all leaf pages at
+ * some point, which is important should non-MVCC scans be
+ * requested. This is currently unused on standby, but we record
+ * it anyway, so that the WAL contains the required information.
*
* Since we can visit leaf pages out-of-order when recursing,
* replay might end up locking such pages an extra time, but it
xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/*
- * This section of code is thought to be no longer needed, after
- * analysis of the calling paths. It is retained to allow the code
- * to be reinstated if a flaw is revealed in that thinking.
+ * This section of code is thought to be no longer needed, after analysis
+ * of the calling paths. It is retained to allow the code to be reinstated
+ * if a flaw is revealed in that thinking.
*
* If we are running non-MVCC scans using this index we need to do some
* additional work to ensure correctness, which is known as a "pin scan"
* described in more detail in next paragraphs. We used to do the extra
- * work in all cases, whereas we now avoid that work in most cases.
- * If lastBlockVacuumed is set to InvalidBlockNumber then we skip the
+ * work in all cases, whereas we now avoid that work in most cases. If
+ * lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* additional work required for the pin scan.
*
* Avoiding this extra work is important since it requires us to touch
while (ptr < end)
{
- OffsetNumber offset,
- length;
+ OffsetNumber offset,
+ length;
memcpy(&offset, ptr, sizeof(offset));
ptr += sizeof(offset);
xl_logical_message *xlrec = (xl_logical_message *) rec;
appendStringInfo(buf, "%s message size %zu bytes",
- xlrec->transactional ? "transactional" : "nontransactional",
+ xlrec->transactional ? "transactional" : "nontransactional",
xlrec->message_size);
}
}
Oid dbId, Oid tsId,
bool relcacheInitFileInval)
{
- int i;
+ int i;
if (relcacheInitFileInval)
appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u",
if (parsed.nmsgs > 0)
{
standby_desc_invalidations(
- buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId,
- XactCompletionRelcacheInitFileInval(parsed.xinfo));
+ buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId,
+ XactCompletionRelcacheInitFileInval(parsed.xinfo));
}
if (XactCompletionForceSyncCommit(parsed.xinfo))
const struct config_enum_entry wal_level_options[] = {
{"minimal", WAL_LEVEL_MINIMAL, false},
{"replica", WAL_LEVEL_REPLICA, false},
- {"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
- {"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
+ {"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
+ {"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"logical", WAL_LEVEL_LOGICAL, false},
{NULL, 0, false}
};
{
TransactionId xidLastCommit;
CommitTimestampEntry dataLastCommit;
- bool commitTsActive;
+ bool commitTsActive;
} CommitTimestampShared;
CommitTimestampShared *commitTsShared;
* No-op if the module is not active.
*
* An unlocked read here is fine, because in a standby (the only place
- * where the flag can change in flight) this routine is only called by
- * the recovery process, which is also the only process which can change
- * the flag.
+ * where the flag can change in flight) this routine is only called by the
+ * recovery process, which is also the only process which can change the
+ * flag.
*/
if (!commitTsShared->commitTsActive)
return;
int pageno;
/*
- * Nothing to do if module not enabled. Note we do an unlocked read of the
- * flag here, which is okay because this routine is only called from
+ * Nothing to do if module not enabled. Note we do an unlocked read of
+ * the flag here, which is okay because this routine is only called from
* GetNewTransactionId, which is never called in a standby.
*/
Assert(!InRecovery);
{
LWLockAcquire(CommitTsLock, LW_EXCLUSIVE);
if (ShmemVariableCache->oldestCommitTsXid != InvalidTransactionId &&
- TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact))
+ TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact))
ShmemVariableCache->oldestCommitTsXid = oldestXact;
LWLockRelease(CommitTsLock);
}
Buffer buffer; /* registered buffer */
int flags; /* flags for this buffer */
int deltaLen; /* space consumed in delta field */
- char *image; /* copy of page image for modification,
- * do not do it in-place to have aligned
- * memory chunk */
+ char *image; /* copy of page image for modification, do not
+ * do it in-place to have aligned memory chunk */
char delta[MAX_DELTA_SIZE]; /* delta between page images */
} PageData;
char *oldest_datname = get_database_name(oldest_datoid);
/*
- * Immediately kick autovacuum into action as we're already
- * in ERROR territory.
+ * Immediately kick autovacuum into action as we're already in
+ * ERROR territory.
*/
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used",
"database with OID %u must be vacuumed before %d more multixact members are used",
- MultiXactState->offsetStopLimit - nextOffset + nmembers,
- MultiXactState->oldestMultiXactDB,
+ MultiXactState->offsetStopLimit - nextOffset + nmembers,
+ MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers),
errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.")));
nworkers = 0;
/*
- * If we are running under serializable isolation, we can't use
- * parallel workers, at least not until somebody enhances that mechanism
- * to be parallel-aware.
+ * If we are running under serializable isolation, we can't use parallel
+ * workers, at least not until somebody enhances that mechanism to be
+ * parallel-aware.
*/
if (IsolationIsSerializable())
nworkers = 0;
}
/*
- * We can't finish transaction commit or abort until all of the
- * workers have exited. This means, in particular, that we can't respond
- * to interrupts at this stage.
+ * We can't finish transaction commit or abort until all of the workers
+ * have exited. This means, in particular, that we can't respond to
+ * interrupts at this stage.
*/
HOLD_INTERRUPTS();
WaitForParallelWorkersToExit(pcxt);
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("invalid magic number in dynamic shared memory segment")));
+ errmsg("invalid magic number in dynamic shared memory segment")));
/* Look up fixed parallel state. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
*/
/*
- * Join locking group. We must do this before anything that could try
- * to acquire a heavyweight lock, because any heavyweight locks acquired
- * to this point could block either directly against the parallel group
+ * Join locking group. We must do this before anything that could try to
+ * acquire a heavyweight lock, because any heavyweight locks acquired to
+ * this point could block either directly against the parallel group
* leader or against some process which in turn waits for a lock that
* conflicts with the parallel group leader, causing an undetected
* deadlock. (If we can't join the lock group, the leader has gone away,
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
- sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
+ sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
if (nlsns > 0)
sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
for (slotno = 0; slotno < nslots; slotno++)
{
LWLockInitialize(&shared->buffer_locks[slotno].lock,
- shared->lwlock_tranche_id);
+ shared->lwlock_tranche_id);
shared->page_buffer[slotno] = ptr;
shared->page_status[slotno] = SLRU_PAGE_EMPTY;
startPage++;
/* must account for wraparound */
if (startPage > TransactionIdToPage(MaxTransactionId))
- startPage=0;
+ startPage = 0;
}
(void) ZeroSUBTRANSPage(startPage);
TimestampTz prepared_at; /* time of preparation */
/*
- * Note that we need to keep track of two LSNs for each GXACT.
- * We keep track of the start LSN because this is the address we must
- * use to read state data back from WAL when committing a prepared GXACT.
- * We keep track of the end LSN because that is the LSN we need to wait
- * for prior to commit.
+ * Note that we need to keep track of two LSNs for each GXACT. We keep
+ * track of the start LSN because this is the address we must use to read
+ * state data back from WAL when committing a prepared GXACT. We keep
+ * track of the end LSN because that is the LSN we need to wait for prior
+ * to commit.
*/
- XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
+ XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
Oid owner; /* ID of user that executed the xact */
hdr.nabortrels = smgrGetPendingDeletes(false, &abortrels);
hdr.ninvalmsgs = xactGetCommittedInvalidationMessages(&invalmsgs,
&hdr.initfileinval);
- hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
+ hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
save_state_data(&hdr, sizeof(TwoPhaseFileHeader));
save_state_data(gxact->gid, hdr.gidlen);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while allocating an XLog reading processor.")));
+ errdetail("Failed while allocating an XLog reading processor.")));
record = XLogReadRecord(xlogreader, lsn, &errormsg);
if (record == NULL)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read two-phase state from xlog at %X/%X",
- (uint32) (lsn >> 32),
- (uint32) lsn)));
+ (uint32) (lsn >> 32),
+ (uint32) lsn)));
if (XLogRecGetRmid(xlogreader) != RM_XACT_ID ||
(XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("expected two-phase state data is not present in xlog at %X/%X",
- (uint32) (lsn >> 32),
- (uint32) lsn)));
+ (uint32) (lsn >> 32),
+ (uint32) lsn)));
if (len != NULL)
*len = XLogRecGetDataLen(xlogreader);
- *buf = palloc(sizeof(char)*XLogRecGetDataLen(xlogreader));
+ *buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader));
memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader));
XLogReaderFree(xlogreader);
xid = pgxact->xid;
/*
- * Read and validate 2PC state data.
- * State data will typically be stored in WAL files if the LSN is after the
- * last checkpoint record, or moved to disk if for some reason they have
- * lived for a long time.
+ * Read and validate 2PC state data. State data will typically be stored
+ * in WAL files if the LSN is after the last checkpoint record, or moved
+ * to disk if for some reason they have lived for a long time.
*/
if (gxact->ondisk)
buf = ReadTwoPhaseFile(xid, true);
TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START();
/*
- * We are expecting there to be zero GXACTs that need to be
- * copied to disk, so we perform all I/O while holding
- * TwoPhaseStateLock for simplicity. This prevents any new xacts
- * from preparing while this occurs, which shouldn't be a problem
- * since the presence of long-lived prepared xacts indicates the
- * transaction manager isn't active.
+ * We are expecting there to be zero GXACTs that need to be copied to
+ * disk, so we perform all I/O while holding TwoPhaseStateLock for
+ * simplicity. This prevents any new xacts from preparing while this
+ * occurs, which shouldn't be a problem since the presence of long-lived
+ * prepared xacts indicates the transaction manager isn't active.
*
- * It's also possible to move I/O out of the lock, but on
- * every error we should check whether somebody committed our
- * transaction in different backend. Let's leave this optimisation
- * for future, if somebody will spot that this place cause
- * bottleneck.
+ * It's also possible to move I/O out of the lock, but on every error we
+ * should check whether somebody committed our transaction in different
+ * backend. Let's leave this optimisation for future, if somebody will
+ * spot that this place cause bottleneck.
*
- * Note that it isn't possible for there to be a GXACT with
- * a prepare_end_lsn set prior to the last checkpoint yet
- * is marked invalid, because of the efforts with delayChkpt.
+ * Note that it isn't possible for there to be a GXACT with a
+ * prepare_end_lsn set prior to the last checkpoint yet is marked invalid,
+ * because of the efforts with delayChkpt.
*/
LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
gxact->prepare_end_lsn <= redo_horizon)
{
char *buf;
- int len;
+ int len;
XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, &len);
RecreateTwoPhaseFile(pgxact->xid, buf, len);
TwoPhaseFileHeader *hdr;
TransactionId *subxids;
GlobalTransaction gxact;
- const char *gid;
+ const char *gid;
int i;
xid = (TransactionId) strtoul(clde->d_name, NULL, 16);
/*
* Transactions without an assigned xid can contain invalidation
* messages (e.g. explicit relcache invalidations or catcache
- * invalidations for inplace updates); standbys need to process
- * those. We can't emit a commit record without an xid, and we don't
- * want to force assigning an xid, because that'd be problematic for
- * e.g. vacuum. Hence we emit a bespoke record for the
- * invalidations. We don't want to use that in case a commit record is
- * emitted, so they happen synchronously with commits (besides not
- * wanting to emit more WAL recoreds).
+ * invalidations for inplace updates); standbys need to process those.
+ * We can't emit a commit record without an xid, and we don't want to
+ * force assigning an xid, because that'd be problematic for e.g.
+ * vacuum. Hence we emit a bespoke record for the invalidations. We
+ * don't want to use that in case a commit record is emitted, so they
+ * happen synchronously with commits (besides not wanting to emit more
+ * WAL recoreds).
*/
if (nmsgs != 0)
{
LogStandbyInvalidations(nmsgs, invalMessages,
RelcacheInitFileInval);
- wrote_xlog = true; /* not strictly necessary */
+ wrote_xlog = true; /* not strictly necessary */
}
/*
* this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the
* KnownAssignedXids machinery requires tracking every XID assignment. It
- * might be OK to skip it only when wal_level < replica, but for now
- * we don't.)
+ * might be OK to skip it only when wal_level < replica, but for now we
+ * don't.)
*
* However, if we're doing cleanup of any non-temp rels or committing any
* command that wanted to force sync commit, then we must flush XLOG
/*
* If asked by the primary (because someone is waiting for a synchronous
- * commit = remote_apply), we will need to ask walreceiver to send a
- * reply immediately.
+ * commit = remote_apply), we will need to ask walreceiver to send a reply
+ * immediately.
*/
if (XactCompletionApplyFeedback(parsed->xinfo))
XLogRequestWalReceiverReply();
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
- "recovery_target_action",
- item->value),
+ errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
+ "recovery_target_action",
+ item->value),
errhint("Valid values are \"pause\", \"promote\", and \"shutdown\".")));
ereport(DEBUG2,
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
- "recovery_target",
- item->value),
+ errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
+ "recovery_target",
+ item->value),
errhint("The only allowed value is \"immediate\".")));
ereport(DEBUG2,
(errmsg_internal("recovery_target = '%s'",
}
/*
- * For Hot Standby, the WAL must be generated with 'replica' mode, and
- * we must have at least as many backend slots as the primary.
+ * For Hot Standby, the WAL must be generated with 'replica' mode, and we
+ * must have at least as many backend slots as the primary.
*/
if (ArchiveRecoveryRequested && EnableHotStandby)
{
* is no use of such file. There is no harm in retaining it, but it
* is better to get rid of the map file so that we don't have any
* redundant file in data directory and it will avoid any sort of
- * confusion. It seems prudent though to just rename the file out
- * of the way rather than delete it completely, also we ignore any
- * error that occurs in rename operation as even if map file is
- * present without backup_label file, it is harmless.
+ * confusion. It seems prudent though to just rename the file out of
+ * the way rather than delete it completely, also we ignore any error
+ * that occurs in rename operation as even if map file is present
+ * without backup_label file, it is harmless.
*/
if (stat(TABLESPACE_MAP, &st) == 0)
{
unlink(TABLESPACE_MAP_OLD);
if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0)
ereport(LOG,
- (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
- TABLESPACE_MAP, BACKUP_LABEL_FILE),
- errdetail("File \"%s\" was renamed to \"%s\".",
- TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
+ (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
+ TABLESPACE_MAP, BACKUP_LABEL_FILE),
+ errdetail("File \"%s\" was renamed to \"%s\".",
+ TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
else
ereport(LOG,
- (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
- TABLESPACE_MAP, BACKUP_LABEL_FILE),
- errdetail("Could not rename file \"%s\" to \"%s\": %m.",
- TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
+ (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
+ TABLESPACE_MAP, BACKUP_LABEL_FILE),
+ errdetail("Could not rename file \"%s\" to \"%s\": %m.",
+ TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
}
/*
ereport(DEBUG1,
(errmsg_internal("redo record is at %X/%X; shutdown %s",
(uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
- wasShutdown ? "TRUE" : "FALSE")));
+ wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg_internal("next transaction ID: %u:%u; next OID: %u",
- checkPoint.nextXidEpoch, checkPoint.nextXid,
- checkPoint.nextOid)));
+ checkPoint.nextXidEpoch, checkPoint.nextXid,
+ checkPoint.nextOid)));
ereport(DEBUG1,
(errmsg_internal("next MultiXactId: %u; next MultiXactOffset: %u",
- checkPoint.nextMulti, checkPoint.nextMultiOffset)));
+ checkPoint.nextMulti, checkPoint.nextMultiOffset)));
ereport(DEBUG1,
- (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
- checkPoint.oldestXid, checkPoint.oldestXidDB)));
+ (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
+ checkPoint.oldestXid, checkPoint.oldestXidDB)));
ereport(DEBUG1,
(errmsg_internal("oldest MultiXactId: %u, in database %u",
- checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
+ checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
ereport(DEBUG1,
(errmsg_internal("commit timestamp Xid oldest/newest: %u/%u",
- checkPoint.oldestCommitTsXid,
- checkPoint.newestCommitTsXid)));
+ checkPoint.oldestCommitTsXid,
+ checkPoint.newestCommitTsXid)));
if (!TransactionIdIsNormal(checkPoint.nextXid))
ereport(PANIC,
(errmsg("invalid next transaction ID")));
SpinLockRelease(&XLogCtl->info_lck);
/*
- * If rm_redo called XLogRequestWalReceiverReply, then we
- * wake up the receiver so that it notices the updated
+ * If rm_redo called XLogRequestWalReceiverReply, then we wake
+ * up the receiver so that it notices the updated
* lastReplayedEndRecPtr and sends a reply to the master.
*/
if (doRequestWalReceiverReply)
MemoryContext oldcontext;
/*
- * Label file and tablespace map file need to be long-lived, since they
- * are read in pg_stop_backup.
+ * Label file and tablespace map file need to be long-lived, since
+ * they are read in pg_stop_backup.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
label_file = makeStringInfo();
MemoryContextSwitchTo(oldcontext);
startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file,
- dir, NULL, tblspc_map_file, false, true);
+ dir, NULL, tblspc_map_file, false, true);
nonexclusive_backup_running = true;
before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0);
* Note: different from CancelBackup which just cancels online backup mode.
*
* Note: this version is only called to stop an exclusive backup. The function
- * pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to
- * stop non-exclusive backups.
+ * pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to
+ * stop non-exclusive backups.
*
* Permission checking for this function is managed through the normal
* GRANT system.
errhint("Did you mean to use pg_stop_backup('f')?")));
/*
- * Exclusive backups were typically started in a different connection,
- * so don't try to verify that exclusive_backup_running is set in this one.
- * Actual verification that an exclusive backup is in fact running is handled
- * inside do_pg_stop_backup.
+ * Exclusive backups were typically started in a different connection, so
+ * don't try to verify that exclusive_backup_running is set in this one.
+ * Actual verification that an exclusive backup is in fact running is
+ * handled inside do_pg_stop_backup.
*/
stoppoint = do_pg_stop_backup(NULL, true, NULL);
Datum
pg_stop_backup_v2(PG_FUNCTION_ARGS)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- Datum values[3];
- bool nulls[3];
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ Datum values[3];
+ bool nulls[3];
- bool exclusive = PG_GETARG_BOOL(0);
- XLogRecPtr stoppoint;
+ bool exclusive = PG_GETARG_BOOL(0);
+ XLogRecPtr stoppoint;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
errhint("Did you mean to use pg_stop_backup('t')?")));
/*
- * Stop the non-exclusive backup. Return a copy of the backup
- * label and tablespace map so they can be written to disk by
- * the caller.
+ * Stop the non-exclusive backup. Return a copy of the backup label
+ * and tablespace map so they can be written to disk by the caller.
*/
stoppoint = do_pg_stop_backup(label_file->data, true, NULL);
nonexclusive_backup_running = false;
}
/* Stoppoint is included on both exclusive and nonexclusive backups */
- values[0] = LSNGetDatum(stoppoint);
+ values[0] = LSNGetDatum(stoppoint);
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
tuplestore_donestoring(typstore);
if (total_len < SizeOfXLogRecord)
{
report_invalid_record(state,
- "invalid record length at %X/%X: wanted %u, got %u",
+ "invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, total_len);
goto err;
if (record->xl_tot_len < SizeOfXLogRecord)
{
report_invalid_record(state,
- "invalid record length at %X/%X: wanted %u, got %u",
+ "invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, record->xl_tot_len);
return false;
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("unrecognized default ACL object type %c", objtype),
- errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\".")));
+ errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\".")));
}
/*
Form_pg_proc proc;
Oid transfn;
Oid finalfn = InvalidOid; /* can be omitted */
- Oid combinefn = InvalidOid; /* can be omitted */
+ Oid combinefn = InvalidOid; /* can be omitted */
Oid serialfn = InvalidOid; /* can be omitted */
- Oid deserialfn = InvalidOid; /* can be omitted */
+ Oid deserialfn = InvalidOid; /* can be omitted */
Oid mtransfn = InvalidOid; /* can be omitted */
Oid minvtransfn = InvalidOid; /* can be omitted */
Oid mfinalfn = InvalidOid; /* can be omitted */
/* handle the combinefn, if supplied */
if (aggcombinefnName)
{
- Oid combineType;
+ Oid combineType;
/*
- * Combine function must have 2 argument, each of which is the
- * trans type
+ * Combine function must have 2 argument, each of which is the trans
+ * type
*/
fnArgs[0] = aggTransType;
fnArgs[1] = aggTransType;
if (combineType != aggTransType)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("return type of combine function %s is not %s",
- NameListToString(aggcombinefnName),
- format_type_be(aggTransType))));
+ errmsg("return type of combine function %s is not %s",
+ NameListToString(aggcombinefnName),
+ format_type_be(aggTransType))));
/*
* A combine function to combine INTERNAL states must accept nulls and
}
/*
- * Validate the serialization function, if present. We must ensure that the
- * return type of this function is the same as the specified serialType.
+ * Validate the serialization function, if present. We must ensure that
+ * the return type of this function is the same as the specified
+ * serialType.
*/
if (aggserialfnName)
{
if (rettype != aggSerialType)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("return type of serialization function %s is not %s",
- NameListToString(aggserialfnName),
- format_type_be(aggSerialType))));
+ errmsg("return type of serialization function %s is not %s",
+ NameListToString(aggserialfnName),
+ format_type_be(aggSerialType))));
}
/*
if (rettype != aggTransType)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("return type of deserialization function %s is not %s",
- NameListToString(aggdeserialfnName),
- format_type_be(aggTransType))));
+ errmsg("return type of deserialization function %s is not %s",
+ NameListToString(aggdeserialfnName),
+ format_type_be(aggTransType))));
}
/*
/*
* There's little point in having a serialization/deserialization
* function on aggregates that don't have an internal state, so let's
- * just disallow this as it may help clear up any confusion or needless
- * authoring of these functions.
+ * just disallow this as it may help clear up any confusion or
+ * needless authoring of these functions.
*/
if (transTypeId != INTERNALOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("a serialization type must only be specified when the aggregate transition data type is %s",
- format_type_be(INTERNALOID))));
+ format_type_be(INTERNALOID))));
serialTypeId = typenameTypeId(NULL, serialType);
/*
* We disallow INTERNAL serialType as the whole point of the
- * serialized types is to allow the aggregate state to be output,
- * and we cannot output INTERNAL. This check, combined with the one
- * above ensures that the trans type and serialization type are not the
+ * serialized types is to allow the aggregate state to be output, and
+ * we cannot output INTERNAL. This check, combined with the one above
+ * ensures that the trans type and serialization type are not the
* same.
*/
if (serialTypeId == INTERNALOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("aggregate serialization data type cannot be %s",
+ errmsg("aggregate serialization data type cannot be %s",
format_type_be(serialTypeId))));
/*
*/
if (serialfuncName != NIL)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("must specify serialization type when specifying serialization function")));
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("must specify serialization type when specifying serialization function")));
/* likewise for the deserialization function */
if (deserialfuncName != NIL)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("must specify serialization type when specifying deserialization function")));
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("must specify serialization type when specifying deserialization function")));
}
/*
mfinalfuncExtraArgs,
sortoperatorName, /* sort operator name */
transTypeId, /* transition data type */
- serialTypeId, /* serialization data type */
+ serialTypeId, /* serialization data type */
transSpace, /* transition space */
mtransTypeId, /* transition data type */
mtransSpace, /* transition space */
ObjectAddress
ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddress)
{
- ObjectAddress address;
- ObjectAddress refAddr;
- Relation rel;
+ ObjectAddress address;
+ ObjectAddress refAddr;
+ Relation rel;
address =
get_object_address_rv(stmt->objectType, stmt->relation, stmt->objname,
- stmt->objargs, &rel, AccessExclusiveLock, false);
+ stmt->objargs, &rel, AccessExclusiveLock, false);
/*
- * If a relation was involved, it would have been opened and locked.
- * We don't need the relation here, but we'll retain the lock until
- * commit.
+ * If a relation was involved, it would have been opened and locked. We
+ * don't need the relation here, but we'll retain the lock until commit.
*/
if (rel)
heap_close(rel, NoLock);
oldNspOid = DatumGetObjectId(namespace);
/*
- * If the object is already in the correct namespace, we don't need
- * to do anything except fire the object access hook.
+ * If the object is already in the correct namespace, we don't need to do
+ * anything except fire the object access hook.
*/
if (oldNspOid == nspOid)
{
/*
* get_am_type_oid
- * Worker for various get_am_*_oid variants
+ * Worker for various get_am_*_oid variants
*
* If missing_ok is false, throw an error if access method not found. If
* true, just return InvalidOid.
/*
* get_am_oid - given an access method name, look up its OID.
- * The type is not checked.
+ * The type is not checked.
*/
Oid
get_am_oid(const char *amname, bool missing_ok)
*/
if (!inh)
{
- BlockNumber relallvisible;
+ BlockNumber relallvisible;
visibilitymap_count(onerel, &relallvisible, NULL);
if (get_func_rettype(funcoid) != VOIDOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("encoding conversion function %s must return type %s",
- NameListToString(func_name), "void")));
+ errmsg("encoding conversion function %s must return type %s",
+ NameListToString(func_name), "void")));
/* Check we have EXECUTE rights for the function */
aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
if (is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY FROM not supported with row-level security"),
+ errmsg("COPY FROM not supported with row-level security"),
errhint("Use INSERT statements instead.")));
/* Build target list */
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DO INSTEAD NOTHING rules are not supported for COPY")));
+ errmsg("DO INSTEAD NOTHING rules are not supported for COPY")));
}
else if (list_length(rewritten) > 1)
{
- ListCell *lc;
+ ListCell *lc;
/* examine queries to determine which error message to issue */
foreach(lc, rewritten)
{
- Query *q = (Query *) lfirst(lc);
+ Query *q = (Query *) lfirst(lc);
if (q->querySource == QSRC_QUAL_INSTEAD_RULE)
ereport(ERROR,
if (q->querySource == QSRC_NON_INSTEAD_RULE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DO ALSO rules are not supported for the COPY")));
+ errmsg("DO ALSO rules are not supported for the COPY")));
}
ereport(ERROR,
query->commandType == CMD_DELETE);
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY query must have a RETURNING clause")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("COPY query must have a RETURNING clause")));
}
/* plan the query */
CreateExtensionStmt *ces;
ListCell *lc;
ObjectAddress addr;
- List *cascade_parents;
+ List *cascade_parents;
/* Check extension name validity before trying to cascade */
check_valid_extension_name(curreq);
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s must return type %s",
- NameListToString((List *) handler->arg), "fdw_handler")));
+ NameListToString((List *) handler->arg), "fdw_handler")));
return handlerOid;
}
RelationGetRelationName(matviewRel));
/*
- * Check that there is a unique index with no WHERE clause on
- * one or more columns of the materialized view if CONCURRENTLY
- * is specified.
+ * Check that there is a unique index with no WHERE clause on one or more
+ * columns of the materialized view if CONCURRENTLY is specified.
*/
if (concurrent)
{
- List *indexoidlist = RelationGetIndexList(matviewRel);
- ListCell *indexoidscan;
+ List *indexoidlist = RelationGetIndexList(matviewRel);
+ ListCell *indexoidscan;
bool hasUniqueIndex = false;
foreach(indexoidscan, indexoidlist)
{
Oid indexoid = lfirst_oid(indexoidscan);
Relation indexRel;
- Form_pg_index indexStruct;
+ Form_pg_index indexStruct;
indexRel = index_open(indexoid, AccessShareLock);
indexStruct = indexRel->rd_index;
if (!hasUniqueIndex)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot refresh materialized view \"%s\" concurrently",
- quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
- RelationGetRelationName(matviewRel))),
+ errmsg("cannot refresh materialized view \"%s\" concurrently",
+ quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
+ RelationGetRelationName(matviewRel))),
errhint("Create a unique index with no WHERE clause on one or more columns of the materialized view.")));
}
/*
* There must be at least one unique index on the matview.
*
- * ExecRefreshMatView() checks that after taking the exclusive lock on
- * the matview. So at least one unique index is guaranteed to exist here
+ * ExecRefreshMatView() checks that after taking the exclusive lock on the
+ * matview. So at least one unique index is guaranteed to exist here
* because the lock is still being held.
*/
Assert(foundUniqueIndex);
if (get_func_rettype(restrictionOid) != FLOAT8OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("restriction estimator function %s must return type %s",
- NameListToString(restrictionName), "float8")));
+ errmsg("restriction estimator function %s must return type %s",
+ NameListToString(restrictionName), "float8")));
/* Require EXECUTE rights for the estimator */
aclresult = pg_proc_aclcheck(restrictionOid, GetUserId(), ACL_EXECUTE);
if (get_func_rettype(joinOid) != FLOAT8OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("join estimator function %s must return type %s",
- NameListToString(joinName), "float8")));
+ errmsg("join estimator function %s must return type %s",
+ NameListToString(joinName), "float8")));
/* Require EXECUTE rights for the estimator */
aclresult = pg_proc_aclcheck(joinOid, GetUserId(), ACL_EXECUTE);
/* Must own relation. */
if (pg_class_ownercheck(relid, GetUserId()))
- noperm = false; /* user is allowed to modify this policy */
+ noperm = false; /* user is allowed to modify this policy */
else
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
*/
if (!noperm && num_roles > 0)
{
- int i, j;
+ int i,
+ j;
Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles);
Datum *role_oids;
char *qual_value;
Node *qual_expr;
- List *qual_parse_rtable = NIL;
+ List *qual_parse_rtable = NIL;
char *with_check_value;
Node *with_check_qual;
- List *with_check_parse_rtable = NIL;
+ List *with_check_parse_rtable = NIL;
Datum values[Natts_pg_policy];
bool isnull[Natts_pg_policy];
bool replaces[Natts_pg_policy];
/*
* All of the dependencies will be removed from the policy and then
- * re-added. In order to get them correct, we need to extract out
- * the expressions in the policy and construct a parsestate just
- * enough to build the range table(s) to then pass to
- * recordDependencyOnExpr().
+ * re-added. In order to get them correct, we need to extract out the
+ * expressions in the policy and construct a parsestate just enough to
+ * build the range table(s) to then pass to recordDependencyOnExpr().
*/
/* Get policy qual, to update dependencies */
value_datum = heap_getattr(tuple, Anum_pg_policy_polqual,
- RelationGetDescr(pg_policy_rel), &attr_isnull);
+ RelationGetDescr(pg_policy_rel), &attr_isnull);
if (!attr_isnull)
{
ParseState *qual_pstate;
/* Get WITH CHECK qual, to update dependencies */
value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck,
- RelationGetDescr(pg_policy_rel), &attr_isnull);
+ RelationGetDescr(pg_policy_rel), &attr_isnull);
if (!attr_isnull)
{
ParseState *with_check_pstate;
heap_close(pg_policy_rel, RowExclusiveLock);
- return(noperm || num_roles > 0);
+ return (noperm || num_roles > 0);
}
/*
/* Get policy command */
polcmd_datum = heap_getattr(policy_tuple, Anum_pg_policy_polcmd,
- RelationGetDescr(pg_policy_rel),
- &polcmd_isnull);
+ RelationGetDescr(pg_policy_rel),
+ &polcmd_isnull);
Assert(!polcmd_isnull);
polcmd = DatumGetChar(polcmd_datum);
}
else
{
- Oid *roles;
+ Oid *roles;
Datum roles_datum;
bool attr_isnull;
ArrayType *policy_roles;
/*
- * We need to pull the set of roles this policy applies to from
- * what's in the catalog, so that we can recreate the dependencies
- * correctly for the policy.
+ * We need to pull the set of roles this policy applies to from what's
+ * in the catalog, so that we can recreate the dependencies correctly
+ * for the policy.
*/
roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles,
}
else
{
- Datum value_datum;
- bool attr_isnull;
+ Datum value_datum;
+ bool attr_isnull;
/*
* We need to pull the USING expression and build the range table for
- * the policy from what's in the catalog, so that we can recreate
- * the dependencies correctly for the policy.
+ * the policy from what's in the catalog, so that we can recreate the
+ * dependencies correctly for the policy.
*/
/* Check if the policy has a USING expr */
}
else
{
- Datum value_datum;
- bool attr_isnull;
+ Datum value_datum;
+ bool attr_isnull;
/*
* We need to pull the WITH CHECK expression and build the range table
if (funcrettype != LANGUAGE_HANDLEROID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type %s",
- NameListToString(funcname), "language_handler")));
+ errmsg("function %s must return type %s",
+ NameListToString(funcname), "language_handler")));
}
else
{
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type %s",
- NameListToString(stmt->plhandler), "language_handler")));
+ errmsg("function %s must return type %s",
+ NameListToString(stmt->plhandler), "language_handler")));
}
/* validate the inline function */
* can skip this for internally generated triggers, since the name
* modification above should be sufficient.
*
- * NOTE that this is cool only because we have ShareRowExclusiveLock on the
- * relation, so the trigger set won't be changing underneath us.
+ * NOTE that this is cool only because we have ShareRowExclusiveLock on
+ * the relation, so the trigger set won't be changing underneath us.
*/
if (!isInternal)
{
{
/* backwards-compatibility hack */
ereport(WARNING,
- (errmsg("changing return type of function %s from %s to %s",
- NameListToString(inputName), "opaque", typeName)));
+ (errmsg("changing return type of function %s from %s to %s",
+ NameListToString(inputName), "opaque", typeName)));
SetFunctionReturnType(inputOid, typoid);
}
else
{
/* backwards-compatibility hack */
ereport(WARNING,
- (errmsg("changing return type of function %s from %s to %s",
- NameListToString(outputName), "opaque", "cstring")));
+ (errmsg("changing return type of function %s from %s to %s",
+ NameListToString(outputName), "opaque", "cstring")));
SetFunctionReturnType(outputOid, CSTRINGOID);
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type output function %s must return type %s",
- NameListToString(outputName), "cstring")));
+ errmsg("type output function %s must return type %s",
+ NameListToString(outputName), "cstring")));
}
if (receiveOid)
{
if (resulttype != BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type send function %s must return type %s",
- NameListToString(sendName), "bytea")));
+ errmsg("type send function %s must return type %s",
+ NameListToString(sendName), "bytea")));
}
/*
if (get_func_rettype(procOid) != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type analyze function %s must return type %s",
- NameListToString(procname), "boolean")));
+ errmsg("type analyze function %s must return type %s",
+ NameListToString(procname), "boolean")));
return procOid;
}
typTup = (Form_pg_type) GETSTRUCT(tup);
/*
- * If it's a composite type, invoke ATExecChangeOwner so that we fix up the
- * pg_class entry properly. That will call back to AlterTypeOwnerInternal
- * to take care of the pg_type entry(s).
+ * If it's a composite type, invoke ATExecChangeOwner so that we fix up
+ * the pg_class entry properly. That will call back to
+ * AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/
if (typTup->typtype == TYPTYPE_COMPOSITE)
ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock);
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to change bypassrls attribute")));
+ errmsg("must be superuser to change bypassrls attribute")));
}
else
{
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved",
- stmt->role),
- errdetail("Role names starting with \"pg_\" are reserved.")));
+ stmt->role),
+ errdetail("Role names starting with \"pg_\" are reserved.")));
/*
* Check the pg_authid relation to be certain the role doesn't already
if (rolspec->roletype != ROLESPEC_CSTRING)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot use special role specifier in DROP ROLE")));
+ errmsg("cannot use special role specifier in DROP ROLE")));
role = rolspec->rolename;
tuple = SearchSysCache1(AUTHNAME, PointerGetDatum(role));
errmsg("current user cannot be renamed")));
/*
- * Check that the user is not trying to rename a system role and
- * not trying to rename a role into the reserved "pg_" namespace.
+ * Check that the user is not trying to rename a system role and not
+ * trying to rename a role into the reserved "pg_" namespace.
*/
if (IsReservedName(NameStr(authform->rolname)))
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved",
- NameStr(authform->rolname)),
- errdetail("Role names starting with \"pg_\" are reserved.")));
+ NameStr(authform->rolname)),
+ errdetail("Role names starting with \"pg_\" are reserved.")));
if (IsReservedName(newname))
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved",
- newname),
- errdetail("Role names starting with \"pg_\" are reserved.")));
+ newname),
+ errdetail("Role names starting with \"pg_\" are reserved.")));
/* make sure the new name doesn't exist */
if (SearchSysCacheExists1(AUTHNAME, CStringGetDatum(newname)))
}
/*
- * If the all-visible page is turned out to be all-frozen but not marked,
- * we should so mark it. Note that all_frozen is only valid if all_visible
- * is true, so we must check both.
+ * If the all-visible page is turned out to be all-frozen but not
+ * marked, we should so mark it. Note that all_frozen is only valid
+ * if all_visible is true, so we must check both.
*/
else if (all_visible_according_to_vm && all_visible && all_frozen &&
!VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable > 0 &&
(possibly_freeable >= REL_TRUNCATE_MINIMUM ||
- possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
+ possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION) &&
old_snapshot_threshold < 0)
return true;
else
ReleaseSysCache(roleTup);
/*
- * Verify that session user is allowed to become this role, but
- * skip this in parallel mode, where we must blindly recreate the
- * parallel leader's state.
+ * Verify that session user is allowed to become this role, but skip
+ * this in parallel mode, where we must blindly recreate the parallel
+ * leader's state.
*/
if (!InitializingParallelWorker &&
!is_member_of_role(GetSessionUserId(), roleid))
return false;
/*
- * Parallel-aware nodes return a subset of the tuples in each worker,
- * and in general we can't expect to have enough bookkeeping state to
- * know which ones we returned in this worker as opposed to some other
- * worker.
+ * Parallel-aware nodes return a subset of the tuples in each worker, and
+ * in general we can't expect to have enough bookkeeping state to know
+ * which ones we returned in this worker as opposed to some other worker.
*/
if (node->parallel_aware)
return false;
{
TransactionId xwait;
ItemPointerData ctid_wait;
- XLTW_Oper reason_wait;
+ XLTW_Oper reason_wait;
Datum existing_values[INDEX_MAX_KEYS];
bool existing_isnull[INDEX_MAX_KEYS];
char *error_new;
if (wco->polname != NULL)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
- wco->polname, wco->relname)));
+ errmsg("new row violates row-level security policy \"%s\" for table \"%s\"",
+ wco->polname, wco->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row-level security policy for table \"%s\"",
- wco->relname)));
+ errmsg("new row violates row-level security policy for table \"%s\"",
+ wco->relname)));
break;
case WCO_RLS_CONFLICT_CHECK:
if (wco->polname != NULL)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
- wco->polname, wco->relname)));
+ errmsg("new row violates row-level security policy \"%s\" (USING expression) for table \"%s\"",
+ wco->polname, wco->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
- wco->relname)));
+ errmsg("new row violates row-level security policy (USING expression) for table \"%s\"",
+ wco->relname)));
break;
default:
elog(ERROR, "unrecognized WCO kind: %u", wco->kind);
typedef struct ExecParallelEstimateContext
{
ParallelContext *pcxt;
- int nnodes;
+ int nnodes;
} ExecParallelEstimateContext;
/* Context object for ExecParallelInitializeDSM. */
{
ParallelContext *pcxt;
SharedExecutorInstrumentation *instrumentation;
- int nnodes;
+ int nnodes;
} ExecParallelInitializeDSMContext;
/* Helper functions that run in the parallel leader. */
static bool ExecParallelEstimate(PlanState *node,
ExecParallelEstimateContext *e);
static bool ExecParallelInitializeDSM(PlanState *node,
- ExecParallelInitializeDSMContext *d);
+ ExecParallelInitializeDSMContext *d);
static shm_mq_handle **ExecParallelSetupTupleQueues(ParallelContext *pcxt,
bool reinitialize);
static bool ExecParallelRetrieveInstrumentation(PlanState *planstate,
- SharedExecutorInstrumentation *instrumentation);
+ SharedExecutorInstrumentation *instrumentation);
/* Helper functions that run in the parallel worker. */
static void ParallelQueryMain(dsm_segment *seg, shm_toc *toc);
/* Estimate space for tuple queues. */
shm_toc_estimate_chunk(&pcxt->estimator,
- mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
+ mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
shm_toc_estimate_keys(&pcxt->estimator, 1);
/*
- * Give parallel-aware nodes a chance to add to the estimates, and get
- * a count of how many PlanState nodes there are.
+ * Give parallel-aware nodes a chance to add to the estimates, and get a
+ * count of how many PlanState nodes there are.
*/
e.pcxt = pcxt;
e.nnodes = 0;
pei->tqueue = ExecParallelSetupTupleQueues(pcxt, false);
/*
- * If instrumentation options were supplied, allocate space for the
- * data. It only gets partially initialized here; the rest happens
- * during ExecParallelInitializeDSM.
+ * If instrumentation options were supplied, allocate space for the data.
+ * It only gets partially initialized here; the rest happens during
+ * ExecParallelInitializeDSM.
*/
if (estate->es_instrument)
{
Instrumentation *instrument;
- int i;
+ int i;
instrumentation = shm_toc_allocate(pcxt->toc, instrumentation_len);
instrumentation->instrument_options = estate->es_instrument;
*/
static bool
ExecParallelRetrieveInstrumentation(PlanState *planstate,
- SharedExecutorInstrumentation *instrumentation)
+ SharedExecutorInstrumentation *instrumentation)
{
Instrumentation *instrument;
- int i;
- int n;
- int ibytes;
- int plan_node_id = planstate->plan->plan_node_id;
+ int i;
+ int n;
+ int ibytes;
+ int plan_node_id = planstate->plan->plan_node_id;
/* Find the instumentation for this node. */
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
void
ExecParallelFinish(ParallelExecutorInfo *pei)
{
- int i;
+ int i;
if (pei->finished)
return;
*/
static bool
ExecParallelReportInstrumentation(PlanState *planstate,
- SharedExecutorInstrumentation *instrumentation)
+ SharedExecutorInstrumentation *instrumentation)
{
- int i;
- int plan_node_id = planstate->plan->plan_node_id;
+ int i;
+ int plan_node_id = planstate->plan->plan_node_id;
Instrumentation *instrument;
InstrEndLoop(planstate->instrument);
/*
* If we shuffled the plan_node_id values in ps_instrument into sorted
- * order, we could use binary search here. This might matter someday
- * if we're pushing down sufficiently large plan trees. For now, do it
- * the slow, dumb way.
+ * order, we could use binary search here. This might matter someday if
+ * we're pushing down sufficiently large plan trees. For now, do it the
+ * slow, dumb way.
*/
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
if (instrumentation->plan_node_id[i] == plan_node_id)
stmt = queryTree->utilityStmt;
else
stmt = (Node *) pg_plan_query(queryTree,
- fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0,
- NULL);
+ fcache->readonly_func ? CURSOR_OPT_PARALLEL_OK : 0,
+ NULL);
/* Precheck all commands for validity in a function */
if (IsA(stmt, TransactionStmt))
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull);
static void finalize_partialaggregate(AggState *aggstate,
- AggStatePerAgg peragg,
- AggStatePerGroup pergroupstate,
- Datum *resultVal, bool *resultIsNull);
+ AggStatePerAgg peragg,
+ AggStatePerGroup pergroupstate,
+ Datum *resultVal, bool *resultIsNull);
static void prepare_projection_slot(AggState *aggstate,
TupleTableSlot *slot,
int currentSet);
if (OidIsValid(pertrans->deserialfn_oid))
{
/*
- * Don't call a strict deserialization function with NULL input.
- * A strict deserialization function and a null value means we skip
- * calling the combine function for this state. We assume that this
- * would be a waste of time and effort anyway so just skip it.
+ * Don't call a strict deserialization function with NULL input. A
+ * strict deserialization function and a null value means we skip
+ * calling the combine function for this state. We assume that
+ * this would be a waste of time and effort anyway so just skip
+ * it.
*/
if (pertrans->deserialfn.fn_strict && slot->tts_isnull[0])
continue;
else
{
- FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo;
- MemoryContext oldContext;
+ FunctionCallInfo dsinfo = &pertrans->deserialfn_fcinfo;
+ MemoryContext oldContext;
dsinfo->arg[0] = slot->tts_values[0];
dsinfo->argnull[0] = slot->tts_isnull[0];
AggStatePerGroup pergroupstate,
Datum *resultVal, bool *resultIsNull)
{
- AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
- MemoryContext oldContext;
+ AggStatePerTrans pertrans = &aggstate->pertrans[peragg->transno];
+ MemoryContext oldContext;
oldContext = MemoryContextSwitchTo(aggstate->ss.ps.ps_ExprContext->ecxt_per_tuple_memory);
/*
- * serialfn_oid will be set if we must serialize the input state
- * before calling the combine function on the state.
+ * serialfn_oid will be set if we must serialize the input state before
+ * calling the combine function on the state.
*/
if (OidIsValid(pertrans->serialfn_oid))
{
else
{
FunctionCallInfo fcinfo = &pertrans->serialfn_fcinfo;
+
fcinfo->arg[0] = pergroupstate->transValue;
fcinfo->argnull[0] = pergroupstate->transValueIsNull;
/* If result is pass-by-ref, make sure it is in the right context. */
if (!peragg->resulttypeByVal && !*resultIsNull &&
!MemoryContextContains(CurrentMemoryContext,
- DatumGetPointer(*resultVal)))
+ DatumGetPointer(*resultVal)))
*resultVal = datumCopy(*resultVal,
peragg->resulttypeByVal,
peragg->resulttypeLen);
*
* 1. An aggregate function appears more than once in query:
*
- * SELECT SUM(x) FROM ... HAVING SUM(x) > 0
+ * SELECT SUM(x) FROM ... HAVING SUM(x) > 0
*
- * Since the aggregates are the identical, we only need to calculate
- * the calculate it once. Both aggregates will share the same 'aggno'
- * value.
+ * Since the aggregates are the identical, we only need to calculate
+ * the calculate it once. Both aggregates will share the same 'aggno'
+ * value.
*
* 2. Two different aggregate functions appear in the query, but the
- * aggregates have the same transition function and initial value, but
- * different final function:
+ * aggregates have the same transition function and initial value, but
+ * different final function:
*
- * SELECT SUM(x), AVG(x) FROM ...
+ * SELECT SUM(x), AVG(x) FROM ...
*
- * In this case we must create a new peragg for the varying aggregate,
- * and need to call the final functions separately, but can share the
- * same transition state.
+ * In this case we must create a new peragg for the varying aggregate,
+ * and need to call the final functions separately, but can share the
+ * same transition state.
*
* For either of these optimizations to be valid, the aggregate's
* arguments must be the same, including any modifiers such as ORDER BY,
*/
existing_transno = find_compatible_pertrans(aggstate, aggref,
transfn_oid, aggtranstype,
- serialfn_oid, deserialfn_oid,
- initValue, initValueIsNull,
+ serialfn_oid, deserialfn_oid,
+ initValue, initValueIsNull,
same_input_transnos);
if (existing_transno != -1)
{
/*
* The serialization and deserialization functions must match, if
* present, as we're unable to share the trans state for aggregates
- * which will serialize or deserialize into different formats. Remember
- * that these will be InvalidOid if they're not required for this agg
- * node.
+ * which will serialize or deserialize into different formats.
+ * Remember that these will be InvalidOid if they're not required for
+ * this agg node.
*/
if (aggserialfn != pertrans->serialfn_oid ||
aggdeserialfn != pertrans->deserialfn_oid)
/*
* If chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. outerPlan may also be NULL, in which case there
- * is nothing to rescan at all.
+ * first ExecProcNode. outerPlan may also be NULL, in which case there is
+ * nothing to rescan at all.
*/
if (outerPlan != NULL && outerPlan->chgParam == NULL)
ExecReScan(outerPlan);
/*
* Initialize the parallel context and workers on first execution. We do
* this on first execution rather than during node initialization, as it
- * needs to allocate large dynamic segment, so it is better to do if it
- * is really needed.
+ * needs to allocate large dynamic segment, so it is better to do if it is
+ * really needed.
*/
if (!node->initialized)
{
Gather *gather = (Gather *) node->ps.plan;
/*
- * Sometimes we might have to run without parallelism; but if
- * parallel mode is active then we can try to fire up some workers.
+ * Sometimes we might have to run without parallelism; but if parallel
+ * mode is active then we can try to fire up some workers.
*/
if (gather->num_workers > 0 && IsInParallelMode())
{
}
else
{
- /* No workers? Then never mind. */
+ /* No workers? Then never mind. */
ExecShutdownGatherWorkers(node);
}
}
static HeapTuple
gather_readnext(GatherState *gatherstate)
{
- int waitpos = gatherstate->nextreader;
+ int waitpos = gatherstate->nextreader;
for (;;)
{
tup = TupleQueueReaderNext(reader, true, &readerdone);
/*
- * If this reader is done, remove it. If all readers are done,
- * clean up remaining worker state.
+ * If this reader is done, remove it. If all readers are done, clean
+ * up remaining worker state.
*/
if (readerdone)
{
/* Shut down tuple queue readers before shutting down workers. */
if (node->reader != NULL)
{
- int i;
+ int i;
for (i = 0; i < node->nreaders; ++i)
DestroyTupleQueueReader(node->reader[i]);
ExecReScanGather(GatherState *node)
{
/*
- * Re-initialize the parallel workers to perform rescan of relation.
- * We want to gracefully shutdown all the workers so that they
- * should be able to propagate any error or other information to master
- * backend before dying. Parallel context will be reused for rescan.
+ * Re-initialize the parallel workers to perform rescan of relation. We
+ * want to gracefully shutdown all the workers so that they should be able
+ * to propagate any error or other information to master backend before
+ * dying. Parallel context will be reused for rescan.
*/
ExecShutdownGatherWorkers(node);
/*
* Note that it is possible that the target tuple has been modified in
* this session, after the above heap_lock_tuple. We choose to not error
- * out in that case, in line with ExecUpdate's treatment of similar
- * cases. This can happen if an UPDATE is triggered from within
- * ExecQual(), ExecWithCheckOptions() or ExecProject() above, e.g. by
- * selecting from a wCTE in the ON CONFLICT's SET.
+ * out in that case, in line with ExecUpdate's treatment of similar cases.
+ * This can happen if an UPDATE is triggered from within ExecQual(),
+ * ExecWithCheckOptions() or ExecProject() above, e.g. by selecting from a
+ * wCTE in the ON CONFLICT's SET.
*/
/* Execute UPDATE with projection */
/* Initialize the usesFdwDirectModify flag */
resultRelInfo->ri_usesFdwDirectModify = bms_is_member(i,
- node->fdwDirectModifyPlans);
+ node->fdwDirectModifyPlans);
/*
* Verify result relation is a valid target for the current operation
if (scandesc == NULL)
{
/*
- * We reach here if the scan is not parallel, or if we're executing
- * a scan that was intended to be parallel serially.
+ * We reach here if the scan is not parallel, or if we're executing a
+ * scan that was intended to be parallel serially.
*/
scandesc = heap_beginscan(node->ss.ss_currentRelation,
estate->es_snapshot,
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
- ((SeqScan *) node->ss.ps.plan)->scanrelid,
+ ((SeqScan *) node->ss.ps.plan)->scanrelid,
eflags);
node->ss.ss_currentRelation = currentRelation;
scan = node->ss.ss_currentScanDesc;
if (scan != NULL)
- heap_rescan(scan, /* scan desc */
- NULL); /* new scan keys */
+ heap_rescan(scan, /* scan desc */
+ NULL); /* new scan keys */
ExecScanReScan((ScanState *) node);
}
ParallelContext *pcxt)
{
EState *estate = node->ss.ps.state;
- ParallelHeapScanDesc pscan;
+ ParallelHeapScanDesc pscan;
pscan = shm_toc_allocate(pcxt->toc, node->pscan_len);
heap_parallelscan_initialize(pscan,
void
ExecSeqScanInitializeWorker(SeqScanState *node, shm_toc *toc)
{
- ParallelHeapScanDesc pscan;
+ ParallelHeapScanDesc pscan;
pscan = shm_toc_lookup(toc, node->ss.ps.plan->plan_node_id);
node->ss.ss_currentScanDesc =
/* build expression trees using actual argument & result types */
build_aggregate_transfn_expr(inputTypes,
numArguments,
- 0, /* no ordered-set window functions yet */
- false, /* no variadic window functions yet */
+ 0, /* no ordered-set window functions yet */
+ false, /* no variadic window functions yet */
wfunc->wintype,
wfunc->inputcollid,
transfn_oid,
TQUEUE_REMAP_ARRAY, /* array */
TQUEUE_REMAP_RANGE, /* range */
TQUEUE_REMAP_RECORD /* composite type, named or anonymous */
-} RemapClass;
+} RemapClass;
typedef struct
{
int natts;
RemapClass mapping[FLEXIBLE_ARRAY_MEMBER];
-} RemapInfo;
+} RemapInfo;
typedef struct
{
char mode;
TupleDesc tupledesc;
RemapInfo *remapinfo;
-} TQueueDestReceiver;
+} TQueueDestReceiver;
typedef struct RecordTypemodMap
{
int remotetypmod;
int localtypmod;
-} RecordTypemodMap;
+} RecordTypemodMap;
struct TupleQueueReader
{
#define TUPLE_QUEUE_MODE_CONTROL 'c'
#define TUPLE_QUEUE_MODE_DATA 'd'
-static void tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype,
+static void tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype,
Datum value);
-static void tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value);
-static void tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value);
-static void tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value);
-static void tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
+static void tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value);
+static void tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value);
+static void tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value);
+static void tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc);
static void TupleQueueHandleControlMessage(TupleQueueReader *reader,
Size nbytes, char *data);
static HeapTuple TupleQueueHandleDataMessage(TupleQueueReader *reader,
Size nbytes, HeapTupleHeader data);
static HeapTuple TupleQueueRemapTuple(TupleQueueReader *reader,
- TupleDesc tupledesc, RemapInfo * remapinfo,
+ TupleDesc tupledesc, RemapInfo *remapinfo,
HeapTuple tuple);
static Datum TupleQueueRemap(TupleQueueReader *reader, RemapClass remapclass,
Datum value);
* Invoke the appropriate walker function based on the given RemapClass.
*/
static void
-tqueueWalk(TQueueDestReceiver * tqueue, RemapClass walktype, Datum value)
+tqueueWalk(TQueueDestReceiver *tqueue, RemapClass walktype, Datum value)
{
check_stack_depth();
* contained therein.
*/
static void
-tqueueWalkRecord(TQueueDestReceiver * tqueue, Datum value)
+tqueueWalkRecord(TQueueDestReceiver *tqueue, Datum value)
{
HeapTupleHeader tup;
Oid typeid;
* contained therein.
*/
static void
-tqueueWalkArray(TQueueDestReceiver * tqueue, Datum value)
+tqueueWalkArray(TQueueDestReceiver *tqueue, Datum value)
{
ArrayType *arr = DatumGetArrayTypeP(value);
Oid typeid = ARR_ELEMTYPE(arr);
* contained therein.
*/
static void
-tqueueWalkRange(TQueueDestReceiver * tqueue, Datum value)
+tqueueWalkRange(TQueueDestReceiver *tqueue, Datum value)
{
RangeType *range = DatumGetRangeType(value);
Oid typeid = RangeTypeGetOid(range);
* already done so previously.
*/
static void
-tqueueSendTypmodInfo(TQueueDestReceiver * tqueue, int typmod,
+tqueueSendTypmodInfo(TQueueDestReceiver *tqueue, int typmod,
TupleDesc tupledesc)
{
StringInfoData buf;
*/
static HeapTuple
TupleQueueRemapTuple(TupleQueueReader *reader, TupleDesc tupledesc,
- RemapInfo * remapinfo, HeapTuple tuple)
+ RemapInfo *remapinfo, HeapTuple tuple)
{
Datum *values;
bool *isnull;
retval = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
hostinfo, sizeof(hostinfo), NULL, 0,
- port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV);
+ port->hba->pam_use_hostname ? 0 : NI_NUMERICHOST | NI_NUMERICSERV);
if (retval != 0)
{
ereport(WARNING,
{
ereport(LOG,
(errmsg("pam_set_item(PAM_RHOST) failed: %s",
- pam_strerror(pamh, retval))));
+ pam_strerror(pamh, retval))));
pam_passwd = NULL;
return STATUS_ERROR;
}
static int
CheckBSDAuth(Port *port, char *user)
{
- char *passwd;
- int retval;
+ char *passwd;
+ int retval;
/* Send regular password request to client, and get the response */
sendAuthRequest(port, AUTH_REQ_PASSWORD);
radius_add_attribute(packet, RADIUS_NAS_IDENTIFIER, (unsigned char *) identifier, strlen(identifier));
/*
- * RADIUS password attributes are calculated as:
- * e[0] = p[0] XOR MD5(secret + Request Authenticator)
- * for the first group of 16 octets, and then:
- * e[i] = p[i] XOR MD5(secret + e[i-1])
- * for the following ones (if necessary)
+ * RADIUS password attributes are calculated as: e[0] = p[0] XOR
+ * MD5(secret + Request Authenticator) for the first group of 16 octets,
+ * and then: e[i] = p[i] XOR MD5(secret + e[i-1]) for the following ones
+ * (if necessary)
*/
encryptedpasswordlen = ((strlen(passwd) + RADIUS_VECTOR_LENGTH - 1) / RADIUS_VECTOR_LENGTH) * RADIUS_VECTOR_LENGTH;
cryptvector = palloc(strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH);
for (i = 0; i < encryptedpasswordlen; i += RADIUS_VECTOR_LENGTH)
{
memcpy(cryptvector + strlen(port->hba->radiussecret), md5trailer, RADIUS_VECTOR_LENGTH);
- /* .. and for subsequent iterations the result of the previous XOR (calculated below) */
+
+ /*
+ * .. and for subsequent iterations the result of the previous XOR
+ * (calculated below)
+ */
md5trailer = encryptedpassword + i;
if (!pg_md5_binary(cryptvector, strlen(port->hba->radiussecret) + RADIUS_VECTOR_LENGTH, encryptedpassword + i))
return STATUS_ERROR;
}
- for (j = i; j < i+RADIUS_VECTOR_LENGTH; j++)
+ for (j = i; j < i + RADIUS_VECTOR_LENGTH; j++)
{
if (j < strlen(passwd))
encryptedpassword[j] = passwd[j] ^ encryptedpassword[j];
(buf.st_uid == 0 && buf.st_mode & (S_IWGRP | S_IXGRP | S_IRWXO)))
ereport(FATAL,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("private key file \"%s\" has group or world access",
- ssl_key_file),
+ errmsg("private key file \"%s\" has group or world access",
+ ssl_key_file),
errdetail("File must have permissions u=rw (0600) or less if owned by the database user, or permissions u=rw,g=r (0640) or less if owned by root.")));
#endif
else
ereport(FATAL,
(errmsg("could not load SSL certificate revocation list file \"%s\": %s",
- ssl_crl_file, SSLerrmessage(ERR_get_error()))));
+ ssl_crl_file, SSLerrmessage(ERR_get_error()))));
}
}
port->ssl_in_use = true;
aloop:
+
/*
* Prepare to call SSL_get_error() by clearing thread's OpenSSL error
* queue. In general, the current thread's error queue must be empty
- * before the TLS/SSL I/O operation is attempted, or SSL_get_error()
- * will not work reliably. An extension may have failed to clear the
+ * before the TLS/SSL I/O operation is attempted, or SSL_get_error() will
+ * not work reliably. An extension may have failed to clear the
* per-thread error queue following another call to an OpenSSL I/O
* routine.
*/
/*
* Other clients of OpenSSL in the backend may fail to call
- * ERR_get_error(), but we always do, so as to not cause problems
- * for OpenSSL clients that don't call ERR_clear_error()
- * defensively. Be sure that this happens by calling now.
- * SSL_get_error() relies on the OpenSSL per-thread error queue
- * being intact, so this is the earliest possible point
- * ERR_get_error() may be called.
+ * ERR_get_error(), but we always do, so as to not cause problems for
+ * OpenSSL clients that don't call ERR_clear_error() defensively. Be
+ * sure that this happens by calling now. SSL_get_error() relies on
+ * the OpenSSL per-thread error queue being intact, so this is the
+ * earliest possible point ERR_get_error() may be called.
*/
ecode = ERR_get_error();
switch (err)
/* In blocking mode, wait until the socket is ready */
if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
{
- WaitEvent event;
+ WaitEvent event;
Assert(waitfor);
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
- WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1);
+ WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
/*
* If the postmaster has died, it's not safe to continue running,
* because it is the postmaster's job to kill us if some other backend
* exists uncleanly. Moreover, we won't run very well in this state;
* helper processes like walwriter and the bgwriter will exit, so
- * performance may be poor. Finally, if we don't exit, pg_ctl will
- * be unable to restart the postmaster without manual intervention,
- * so no new connections can be accepted. Exiting clears the deck
- * for a postmaster restart.
+ * performance may be poor. Finally, if we don't exit, pg_ctl will be
+ * unable to restart the postmaster without manual intervention, so no
+ * new connections can be accepted. Exiting clears the deck for a
+ * postmaster restart.
*
- * (Note that we only make this check when we would otherwise sleep
- * on our latch. We might still continue running for a while if the
+ * (Note that we only make this check when we would otherwise sleep on
+ * our latch. We might still continue running for a while if the
* postmaster is killed in mid-query, or even through multiple queries
* if we never have to wait for read. We don't want to burn too many
* cycles checking for this very rare condition, and this should cause
if (event.events & WL_POSTMASTER_DEATH)
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
- errmsg("terminating connection due to unexpected postmaster exit")));
+ errmsg("terminating connection due to unexpected postmaster exit")));
/* Handle interrupt. */
if (event.events & WL_LATCH_SET)
if (n < 0 && !port->noblock && (errno == EWOULDBLOCK || errno == EAGAIN))
{
- WaitEvent event;
+ WaitEvent event;
Assert(waitfor);
ModifyWaitEvent(FeBeWaitSet, 0, waitfor, NULL);
- WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */, &event, 1);
+ WaitEventSetWait(FeBeWaitSet, -1 /* no timeout */ , &event, 1);
/* See comments in secure_read. */
if (event.events & WL_POSTMASTER_DEATH)
ereport(FATAL,
(errcode(ERRCODE_ADMIN_SHUTDOWN),
- errmsg("terminating connection due to unexpected postmaster exit")));
+ errmsg("terminating connection due to unexpected postmaster exit")));
/* Handle interrupt. */
if (event.events & WL_LATCH_SET)
if (PqCommReadingMsg)
ereport(FATAL,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("terminating connection because protocol synchronization was lost")));
+ errmsg("terminating connection because protocol synchronization was lost")));
PqCommReadingMsg = true;
}
/*
* If the message queue is already gone, just ignore the message. This
- * doesn't necessarily indicate a problem; for example, DEBUG messages
- * can be generated late in the shutdown sequence, after all DSMs have
- * already been detached.
+ * doesn't necessarily indicate a problem; for example, DEBUG messages can
+ * be generated late in the shutdown sequence, after all DSMs have already
+ * been detached.
*/
if (pq_mq == NULL)
return 0;
SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
#if defined(_M_AMD64) && _MSC_VER == 1800
+
/*
- * Avoid crashing in certain floating-point operations if
- * we were compiled for x64 with MS Visual Studio 2013 and
- * are running on Windows prior to 7/2008R2 SP1 on an
- * AVX2-capable CPU.
+ * Avoid crashing in certain floating-point operations if we were
+ * compiled for x64 with MS Visual Studio 2013 and are running on
+ * Windows prior to 7/2008R2 SP1 on an AVX2-capable CPU.
*
- * Ref: https://connect.microsoft.com/VisualStudio/feedback/details/811093/visual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instructions
+ * Ref:
+ * https://connect.microsoft.com/VisualStudio/feedback/details/811093/v
+ * isual-studio-2013-rtm-c-x64-code-generation-bug-for-avx2-instruction
+ * s
*/
if (!IsWindows7SP1OrGreater())
{
_set_FMA3_enable(0);
}
-#endif /* defined(_M_AMD64) && _MSC_VER == 1800 */
+#endif /* defined(_M_AMD64) && _MSC_VER == 1800 */
}
#endif /* WIN32 */
return true;
break;
case T_CustomScan:
- foreach (lc, ((CustomScanState *) planstate)->custom_ps)
+ foreach(lc, ((CustomScanState *) planstate)->custom_ps)
{
if (walker((PlanState *) lfirst(lc), context))
return true;
Size
EstimateParamListSpace(ParamListInfo paramLI)
{
- int i;
- Size sz = sizeof(int);
+ int i;
+ Size sz = sizeof(int);
if (paramLI == NULL || paramLI->numParams <= 0)
return sz;
typeOid = prm->ptype;
}
- sz = add_size(sz, sizeof(Oid)); /* space for type OID */
+ sz = add_size(sz, sizeof(Oid)); /* space for type OID */
sz = add_size(sz, sizeof(uint16)); /* space for pflags */
/* space for datum/isnull */
typByVal = true;
}
sz = add_size(sz,
- datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen));
+ datumEstimateSpace(prm->value, prm->isnull, typByVal, typLen));
}
return sz;
READ_BITMAPSET_FIELD(custom_relids);
/* Lookup CustomScanMethods by CustomName */
- token = pg_strtok(&length); /* skip methods: */
- token = pg_strtok(&length); /* CustomName */
+ token = pg_strtok(&length); /* skip methods: */
+ token = pg_strtok(&length); /* CustomName */
custom_name = nullable_string(token, length);
methods = GetCustomScanMethods(custom_name, false);
local_node->methods = methods;
{
const ExtensibleNodeMethods *methods;
ExtensibleNode *local_node;
- const char *extnodename;
+ const char *extnodename;
+
READ_TEMP_LOCALS();
- token = pg_strtok(&length); /* skip: extnodename */
- token = pg_strtok(&length); /* get extnodename */
+ token = pg_strtok(&length); /* skip: extnodename */
+ token = pg_strtok(&length); /* get extnodename */
extnodename = nullable_string(token, length);
if (!extnodename)
set_base_rel_consider_startup(root);
/*
- * Generate access paths for the base rels. set_base_rel_sizes also
- * sets the consider_parallel flag for each baserel, if appropriate.
+ * Generate access paths for the base rels. set_base_rel_sizes also sets
+ * the consider_parallel flag for each baserel, if appropriate.
*/
set_base_rel_sizes(root);
set_base_rel_pathlists(root);
/*
* set_base_rel_sizes
* Set the size estimates (rows and widths) for each base-relation entry.
- * Also determine whether to consider parallel paths for base relations.
+ * Also determine whether to consider parallel paths for base relations.
*
* We do this in a separate pass over the base rels so that rowcount
* estimates are available for parameterized path generation, and also so
switch (rte->rtekind)
{
case RTE_RELATION:
+
/*
* Currently, parallel workers can't access the leader's temporary
* tables. We could possibly relax this if the wrote all of its
*/
if (rte->tablesample != NULL)
{
- Oid proparallel = func_parallel(rte->tablesample->tsmhandler);
+ Oid proparallel = func_parallel(rte->tablesample->tsmhandler);
if (proparallel != PROPARALLEL_SAFE)
return;
break;
case RTE_SUBQUERY:
+
/*
* Subplans currently aren't passed to workers. Even if they
- * were, the subplan might be using parallelism internally, and
- * we can't support nested Gather nodes at present. Finally,
- * we don't have a good way of knowing whether the subplan
- * involves any parallel-restricted operations. It would be
- * nice to relax this restriction some day, but it's going to
- * take a fair amount of work.
+ * were, the subplan might be using parallelism internally, and we
+ * can't support nested Gather nodes at present. Finally, we
+ * don't have a good way of knowing whether the subplan involves
+ * any parallel-restricted operations. It would be nice to relax
+ * this restriction some day, but it's going to take a fair amount
+ * of work.
*/
return;
break;
case RTE_VALUES:
+
/*
* The data for a VALUES clause is stored in the plan tree itself,
* so scanning it in a worker is fine.
break;
case RTE_CTE:
+
/*
* CTE tuplestores aren't shared among parallel workers, so we
* force all CTE scans to happen in the leader. Also, populating
}
/*
- * If there's anything in baserestrictinfo that's parallel-restricted,
- * we give up on parallelizing access to this relation. We could consider
+ * If there's anything in baserestrictinfo that's parallel-restricted, we
+ * give up on parallelizing access to this relation. We could consider
* instead postponing application of the restricted quals until we're
* above all the parallelism in the plan tree, but it's not clear that
* this would be a win in very many cases, and it might be tricky to make
return;
/*
- * If the relation's outputs are not parallel-safe, we must give up.
- * In the common case where the relation only outputs Vars, this check is
+ * If the relation's outputs are not parallel-safe, we must give up. In
+ * the common case where the relation only outputs Vars, this check is
* very cheap; otherwise, we have to do more work.
*/
if (rel->reltarget_has_non_vars &&
int parallel_workers = 0;
/*
- * Decide on the numebr of workers to request for this append path. For
- * now, we just use the maximum value from among the members. It
+ * Decide on the numebr of workers to request for this append path.
+ * For now, we just use the maximum value from among the members. It
* might be useful to use a higher number if the Append node were
* smart enough to spread out the workers, but it currently isn't.
*/
* Run generate_gather_paths() for each just-processed joinrel. We
* could not do this earlier because both regular and partial paths
* can get added to a particular joinrel at multiple times within
- * join_search_one_level. After that, we're done creating paths
- * for the joinrel, so run set_cheapest().
+ * join_search_one_level. After that, we're done creating paths for
+ * the joinrel, so run set_cheapest().
*/
foreach(lc, root->join_rel_level[lev])
{
* We might not really need a Result node here. There are several ways
* that this can happen. For example, MergeAppend doesn't project, so we
* would have thought that we needed a projection to attach resjunk sort
- * columns to its output ... but create_merge_append_plan might have
- * added those same resjunk sort columns to both MergeAppend and its
- * children. Alternatively, apply_projection_to_path might have created
- * a projection path as the subpath of a Gather node even though the
- * subpath was projection-capable. So, if the subpath is capable of
- * projection or the desired tlist is the same expression-wise as the
- * subplan's, just jam it in there. We'll have charged for a Result that
- * doesn't actually appear in the plan, but that's better than having a
- * Result we don't need.
+ * columns to its output ... but create_merge_append_plan might have added
+ * those same resjunk sort columns to both MergeAppend and its children.
+ * Alternatively, apply_projection_to_path might have created a projection
+ * path as the subpath of a Gather node even though the subpath was
+ * projection-capable. So, if the subpath is capable of projection or the
+ * desired tlist is the same expression-wise as the subplan's, just jam it
+ * in there. We'll have charged for a Result that doesn't actually appear
+ * in the plan, but that's better than having a Result we don't need.
*/
if (is_projection_capable_path(best_path->subpath) ||
tlist_same_exprs(tlist, subplan->targetlist))
/*
* If a join between foreign relations was pushed down, remember it. The
* push-down safety of the join depends upon the server and user mapping
- * being same. That can change between planning and execution time, in which
- * case the plan should be invalidated.
+ * being same. That can change between planning and execution time, in
+ * which case the plan should be invalidated.
*/
if (scan_relid == 0)
root->glob->hasForeignJoin = true;
/*
* Replace any outer-relation variables with nestloop params in the qual,
* fdw_exprs and fdw_recheck_quals expressions. We do this last so that
- * the FDW doesn't have to be involved. (Note that parts of fdw_exprs
- * or fdw_recheck_quals could have come from join clauses, so doing this
+ * the FDW doesn't have to be involved. (Note that parts of fdw_exprs or
+ * fdw_recheck_quals could have come from join clauses, so doing this
* beforehand on the scan_clauses wouldn't work.) We assume
* fdw_scan_tlist contains no such variables.
*/
* 0, but there can be no Var with relid 0 in the rel's targetlist or the
* restriction clauses, so we skip this in that case. Note that any such
* columns in base relations that were joined are assumed to be contained
- * in fdw_scan_tlist.) This is a bit of a kluge and might go away someday,
- * so we intentionally leave it out of the API presented to FDWs.
+ * in fdw_scan_tlist.) This is a bit of a kluge and might go away
+ * someday, so we intentionally leave it out of the API presented to FDWs.
*/
scan_plan->fsSystemCol = false;
if (scan_relid > 0)
plan->righttree = NULL;
node->num_workers = nworkers;
node->single_copy = single_copy;
- node->invisible = false;
+ node->invisible = false;
return node;
}
List *rollup_lists,
List *rollup_groupclauses);
static void set_grouped_rel_consider_parallel(PlannerInfo *root,
- RelOptInfo *grouped_rel,
- PathTarget *target);
+ RelOptInfo *grouped_rel,
+ PathTarget *target);
static Size estimate_hashagg_tablesize(Path *path, AggClauseCosts *agg_costs,
- double dNumGroups);
+ double dNumGroups);
static RelOptInfo *create_grouping_paths(PlannerInfo *root,
RelOptInfo *input_rel,
PathTarget *target,
static PathTarget *make_group_input_target(PlannerInfo *root,
PathTarget *final_target);
static PathTarget *make_partialgroup_input_target(PlannerInfo *root,
- PathTarget *final_target);
+ PathTarget *final_target);
static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
static PathTarget *make_window_input_target(PlannerInfo *root,
* findable from the PlannerInfo struct; anything else the FDW wants
* to know should be obtainable via "root".
*
- * Note: CustomScan providers, as well as FDWs that don't want to
- * use this hook, can use the create_upper_paths_hook; see below.
+ * Note: CustomScan providers, as well as FDWs that don't want to use
+ * this hook, can use the create_upper_paths_hook; see below.
*/
if (current_rel->fdwroutine &&
current_rel->fdwroutine->GetForeignUpperPaths)
/*
* All that's left to check now is to make sure all aggregate functions
- * support partial mode. If there's no aggregates then we can skip checking
- * that.
+ * support partial mode. If there's no aggregates then we can skip
+ * checking that.
*/
if (!parse->hasAggs)
grouped_rel->consider_parallel = true;
/*
* Determine whether it's possible to perform sort-based implementations
- * of grouping. (Note that if groupClause is empty, grouping_is_sortable()
- * is trivially true, and all the pathkeys_contained_in() tests will
- * succeed too, so that we'll consider every surviving input path.)
+ * of grouping. (Note that if groupClause is empty,
+ * grouping_is_sortable() is trivially true, and all the
+ * pathkeys_contained_in() tests will succeed too, so that we'll consider
+ * every surviving input path.)
*/
can_sort = grouping_is_sortable(parse->groupClause);
*/
if (grouped_rel->consider_parallel)
{
- Path *cheapest_partial_path = linitial(input_rel->partial_pathlist);
+ Path *cheapest_partial_path = linitial(input_rel->partial_pathlist);
/*
* Build target list for partial aggregate paths. We cannot reuse the
if (parse->hasAggs)
add_partial_path(grouped_rel, (Path *)
- create_agg_path(root,
- grouped_rel,
- path,
- partial_grouping_target,
- parse->groupClause ? AGG_SORTED : AGG_PLAIN,
- parse->groupClause,
- NIL,
- &agg_partial_costs,
- dNumPartialGroups,
- false,
- false,
- true));
+ create_agg_path(root,
+ grouped_rel,
+ path,
+ partial_grouping_target,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ parse->groupClause,
+ NIL,
+ &agg_partial_costs,
+ dNumPartialGroups,
+ false,
+ false,
+ true));
else
add_partial_path(grouped_rel, (Path *)
- create_group_path(root,
- grouped_rel,
- path,
- partial_grouping_target,
- parse->groupClause,
- NIL,
- dNumPartialGroups));
+ create_group_path(root,
+ grouped_rel,
+ path,
+ partial_grouping_target,
+ parse->groupClause,
+ NIL,
+ dNumPartialGroups));
}
}
}
if (hashaggtablesize < work_mem * 1024L)
{
add_partial_path(grouped_rel, (Path *)
- create_agg_path(root,
- grouped_rel,
- cheapest_partial_path,
- partial_grouping_target,
- AGG_HASHED,
- parse->groupClause,
- NIL,
- &agg_partial_costs,
- dNumPartialGroups,
- false,
- false,
- true));
+ create_agg_path(root,
+ grouped_rel,
+ cheapest_partial_path,
+ partial_grouping_target,
+ AGG_HASHED,
+ parse->groupClause,
+ NIL,
+ &agg_partial_costs,
+ dNumPartialGroups,
+ false,
+ false,
+ true));
}
}
}
/*
* Now generate a complete GroupAgg Path atop of the cheapest partial
- * path. We need only bother with the cheapest path here, as the output
- * of Gather is never sorted.
+ * path. We need only bother with the cheapest path here, as the
+ * output of Gather is never sorted.
*/
if (grouped_rel->partial_pathlist)
{
- Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
- double total_groups = path->rows * path->parallel_workers;
+ Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
+ double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root,
grouped_rel,
&total_groups);
/*
- * Gather is always unsorted, so we'll need to sort, unless there's
- * no GROUP BY clause, in which case there will only be a single
- * group.
+ * Gather is always unsorted, so we'll need to sort, unless
+ * there's no GROUP BY clause, in which case there will only be a
+ * single group.
*/
if (parse->groupClause)
path = (Path *) create_sort_path(root,
if (parse->hasAggs)
add_path(grouped_rel, (Path *)
- create_agg_path(root,
- grouped_rel,
- path,
- target,
- parse->groupClause ? AGG_SORTED : AGG_PLAIN,
- parse->groupClause,
- (List *) parse->havingQual,
- &agg_final_costs,
- dNumGroups,
- true,
- true,
- true));
+ create_agg_path(root,
+ grouped_rel,
+ path,
+ target,
+ parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+ parse->groupClause,
+ (List *) parse->havingQual,
+ &agg_final_costs,
+ dNumGroups,
+ true,
+ true,
+ true));
else
add_path(grouped_rel, (Path *)
- create_group_path(root,
- grouped_rel,
- path,
- target,
- parse->groupClause,
- (List *) parse->havingQual,
- dNumGroups));
+ create_group_path(root,
+ grouped_rel,
+ path,
+ target,
+ parse->groupClause,
+ (List *) parse->havingQual,
+ dNumGroups));
}
}
/*
* Provided that the estimated size of the hashtable does not exceed
* work_mem, we'll generate a HashAgg Path, although if we were unable
- * to sort above, then we'd better generate a Path, so that we at least
- * have one.
+ * to sort above, then we'd better generate a Path, so that we at
+ * least have one.
*/
if (hashaggtablesize < work_mem * 1024L ||
grouped_rel->pathlist == NIL)
{
/*
- * We just need an Agg over the cheapest-total input path, since input
- * order won't matter.
+ * We just need an Agg over the cheapest-total input path, since
+ * input order won't matter.
*/
add_path(grouped_rel, (Path *)
create_agg_path(root, grouped_rel,
/*
* Generate a HashAgg Path atop of the cheapest partial path. Once
- * again, we'll only do this if it looks as though the hash table won't
- * exceed work_mem.
+ * again, we'll only do this if it looks as though the hash table
+ * won't exceed work_mem.
*/
if (grouped_rel->partial_pathlist)
{
- Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
+ Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
hashaggtablesize = estimate_hashagg_tablesize(path,
&agg_final_costs,
if (hashaggtablesize < work_mem * 1024L)
{
- double total_groups = path->rows * path->parallel_workers;
+ double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root,
grouped_rel,
&total_groups);
add_path(grouped_rel, (Path *)
- create_agg_path(root,
- grouped_rel,
- path,
- target,
- AGG_HASHED,
- parse->groupClause,
- (List *) parse->havingQual,
- &agg_final_costs,
- dNumGroups,
- true,
- true,
- true));
+ create_agg_path(root,
+ grouped_rel,
+ path,
+ target,
+ AGG_HASHED,
+ parse->groupClause,
+ (List *) parse->havingQual,
+ &agg_final_costs,
+ dNumGroups,
+ true,
+ true,
+ true));
}
}
}
continue;
if (aggref->aggvariadic != tlistaggref->aggvariadic)
continue;
+
/*
* it would be harmless to compare aggcombine and aggpartial, but
* it's also unnecessary
} has_parallel_hazard_arg;
static bool aggregates_allow_partial_walker(Node *node,
- partial_agg_context *context);
+ partial_agg_context *context);
static bool contain_agg_clause_walker(Node *node, void *context);
static bool count_agg_clauses_walker(Node *node,
count_agg_clauses_context *context);
static bool contain_volatile_functions_walker(Node *node, void *context);
static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context);
static bool has_parallel_hazard_walker(Node *node,
- has_parallel_hazard_arg *context);
+ has_parallel_hazard_arg *context);
static bool parallel_too_dangerous(char proparallel,
- has_parallel_hazard_arg *context);
+ has_parallel_hazard_arg *context);
static bool typeid_is_temp(Oid typeid);
static bool contain_nonstrict_functions_walker(Node *node, void *context);
static bool contain_leaked_vars_walker(Node *node, void *context);
if (aggref->aggdistinct || aggref->aggorder)
{
context->allowedtype = PAT_DISABLED;
- return true; /* abort search */
+ return true; /* abort search */
}
aggTuple = SearchSysCache1(AGGFNOID,
ObjectIdGetDatum(aggref->aggfnoid));
{
ReleaseSysCache(aggTuple);
context->allowedtype = PAT_DISABLED;
- return true; /* abort search */
+ return true; /* abort search */
}
/*
context->allowedtype = PAT_INTERNAL_ONLY;
ReleaseSysCache(aggTuple);
- return false; /* continue searching */
+ return false; /* continue searching */
}
return expression_tree_walker(node, aggregates_allow_partial_walker,
(void *) context);
bool
has_parallel_hazard(Node *node, bool allow_restricted)
{
- has_parallel_hazard_arg context;
+ has_parallel_hazard_arg context;
context.allow_restricted = allow_restricted;
return has_parallel_hazard_walker(node, &context);
* recurse through Query objects to as to locate parallel-unsafe
* constructs anywhere in the tree.
*
- * Later, we'll be called again for specific quals, possibly after
- * some planning has been done, we may encounter SubPlan, SubLink,
- * or AlternativeSubLink nodes. Currently, there's no need to recurse
- * through these; they can't be unsafe, since we've already cleared
- * the entire query of unsafe operations, and they're definitely
+ * Later, we'll be called again for specific quals, possibly after some
+ * planning has been done, we may encounter SubPlan, SubLink, or
+ * AlternativeSubLink nodes. Currently, there's no need to recurse
+ * through these; they can't be unsafe, since we've already cleared the
+ * entire query of unsafe operations, and they're definitely
* parallel-restricted.
*/
if (IsA(node, Query))
{
- Query *query = (Query *) node;
+ Query *query = (Query *) node;
if (query->rowMarks != NULL)
return true;
has_parallel_hazard_walker,
context, 0);
}
- else if (IsA(node, SubPlan) || IsA(node, SubLink) ||
- IsA(node, AlternativeSubPlan) || IsA(node, Param))
+ else if (IsA(node, SubPlan) ||IsA(node, SubLink) ||
+ IsA(node, AlternativeSubPlan) ||IsA(node, Param))
{
/*
- * Since we don't have the ability to push subplans down to workers
- * at present, we treat subplan references as parallel-restricted.
+ * Since we don't have the ability to push subplans down to workers at
+ * present, we treat subplan references as parallel-restricted.
*/
if (!context->allow_restricted)
return true;
if (IsA(node, RestrictInfo))
{
RestrictInfo *rinfo = (RestrictInfo *) node;
+
return has_parallel_hazard_walker((Node *) rinfo->clause, context);
}
/*
* It is an error for a parallel worker to touch a temporary table in any
- * way, so we can't handle nodes whose type is the rowtype of such a table.
+ * way, so we can't handle nodes whose type is the rowtype of such a
+ * table.
*/
if (!context->allow_restricted)
{
foreach(opid, rcexpr->opnos)
{
- Oid opfuncid = get_opcode(lfirst_oid(opid));
+ Oid opfuncid = get_opcode(lfirst_oid(opid));
+
if (parallel_too_dangerous(func_parallel(opfuncid), context))
return true;
}
static bool
typeid_is_temp(Oid typeid)
{
- Oid relid = get_typ_typrelid(typeid);
+ Oid relid = get_typ_typrelid(typeid);
if (!OidIsValid(relid))
return false;
/*
* WHERE CURRENT OF doesn't contain function calls. Moreover, it
* is important that this can be pushed down into a
- * security_barrier view, since the planner must always generate
- * a TID scan when CURRENT OF is present -- c.f. cost_tidscan.
+ * security_barrier view, since the planner must always generate a
+ * TID scan when CURRENT OF is present -- c.f. cost_tidscan.
*/
return false;
AttrNumber natt;
Oid inferopfamily = InvalidOid; /* OID of opclass opfamily */
Oid inferopcinputtype = InvalidOid; /* OID of opclass input type */
- int nplain = 0; /* # plain attrs observed */
+ int nplain = 0; /* # plain attrs observed */
/*
* If inference specification element lacks collation/opclass, then no
rel->consider_startup = (root->tuple_fraction > 0);
rel->consider_param_startup = false; /* might get changed later */
rel->consider_parallel = false; /* might get changed later */
- rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */
+ rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */
rel->reltarget = create_empty_pathtarget();
rel->reltarget_has_non_vars = false;
rel->pathlist = NIL;
void
apply_partialaggref_adjustment(PathTarget *target)
{
- ListCell *lc;
+ ListCell *lc;
foreach(lc, target->exprs)
{
- Aggref *aggref = (Aggref *) lfirst(lc);
+ Aggref *aggref = (Aggref *) lfirst(lc);
if (IsA(aggref, Aggref))
{
errmsg("column %s.%s does not exist", relname, colname) :
errmsg("column \"%s\" does not exist", colname),
state->rfirst ? closestfirst ?
- errhint("Perhaps you meant to reference the column \"%s.%s\".",
- state->rfirst->eref->aliasname, closestfirst) :
+ errhint("Perhaps you meant to reference the column \"%s.%s\".",
+ state->rfirst->eref->aliasname, closestfirst) :
errhint("There is a column named \"%s\" in table \"%s\", but it cannot be referenced from this part of the query.",
colname, state->rfirst->eref->aliasname) : 0,
parser_errposition(pstate, location)));
bool skipValidation,
bool isAddConstraint);
static void transformCheckConstraints(CreateStmtContext *cxt,
- bool skipValidation);
+ bool skipValidation);
static void transformConstraintAttrs(CreateStmtContext *cxt,
List *constraintList);
static void transformColumnType(CreateStmtContext *cxt, ColumnDef *column);
if (like_found)
{
/*
- * To match INHERITS, the existence of any LIKE table with OIDs
- * causes the new table to have oids. For the same reason,
- * WITH/WITHOUT OIDs is also ignored with LIKE. We prepend
- * because the first oid option list entry is honored. Our
- * prepended WITHOUT OIDS clause will be overridden if an
- * inherited table has oids.
+ * To match INHERITS, the existence of any LIKE table with OIDs causes
+ * the new table to have oids. For the same reason, WITH/WITHOUT OIDs
+ * is also ignored with LIKE. We prepend because the first oid option
+ * list entry is honored. Our prepended WITHOUT OIDS clause will be
+ * overridden if an inherited table has oids.
*/
stmt->options = lcons(makeDefElem("oids",
- (Node *)makeInteger(cxt.hasoids)), stmt->options);
+ (Node *) makeInteger(cxt.hasoids)), stmt->options);
}
foreach(elements, stmt->tableElts)
if (nodeTag(element) == T_Constraint)
transformTableConstraint(&cxt, (Constraint *) element);
}
+
/*
* transformIndexConstraints wants cxt.alist to contain only index
* statements, so transfer anything we already have into save_alist.
/*
* If creating a new table, we can safely skip validation of check
- * constraints, and nonetheless mark them valid. (This will override
- * any user-supplied NOT VALID flag.)
+ * constraints, and nonetheless mark them valid. (This will override any
+ * user-supplied NOT VALID flag.)
*/
if (skipValidation)
{
*
* We use kill(0) for the fallback barrier as we assume that kernels on
* systems old enough to require fallback barrier support will include an
- * appropriate barrier while checking the existence of the postmaster
- * pid.
+ * appropriate barrier while checking the existence of the postmaster pid.
*/
(void) kill(PostmasterPid, 0);
}
* wi_links entry into free list or running list
* wi_dboid OID of the database this worker is supposed to work on
* wi_tableoid OID of the table currently being vacuumed, if any
- * wi_sharedrel flag indicating whether table is marked relisshared
+ * wi_sharedrel flag indicating whether table is marked relisshared
* wi_proc pointer to PGPROC of the running worker, NULL if not started
* wi_launchtime Time at which this worker was launched
* wi_cost_* Vacuum cost-based delay parameters current in this worker
/*
* There are some conditions that we need to check before trying to
- * start a worker. First, we need to make sure that there is a
- * worker slot available. Second, we need to make sure that no
- * other worker failed while starting up.
+ * start a worker. First, we need to make sure that there is a worker
+ * slot available. Second, we need to make sure that no other worker
+ * failed while starting up.
*/
current_time = GetCurrentTimestamp();
beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0';
beentry->st_progress_command = PROGRESS_COMMAND_INVALID;
beentry->st_progress_command_target = InvalidOid;
+
/*
* we don't zero st_progress_param here to save cycles; nobody should
* examine it until st_progress_command has been set to something other
const int64 *val)
{
volatile PgBackendStatus *beentry = MyBEEntry;
- int i;
+ int i;
if (!beentry || !pgstat_track_activities || nparam == 0)
return;
RemovePgTempFiles();
/*
- * Forcibly remove the files signaling a standby promotion
- * request. Otherwise, the existence of those files triggers
- * a promotion too early, whether a user wants that or not.
+ * Forcibly remove the files signaling a standby promotion request.
+ * Otherwise, the existence of those files triggers a promotion too early,
+ * whether a user wants that or not.
*
- * This removal of files is usually unnecessary because they
- * can exist only during a few moments during a standby
- * promotion. However there is a race condition: if pg_ctl promote
- * is executed and creates the files during a promotion,
- * the files can stay around even after the server is brought up
- * to new master. Then, if new standby starts by using the backup
- * taken from that master, the files can exist at the server
+ * This removal of files is usually unnecessary because they can exist
+ * only during a few moments during a standby promotion. However there is
+ * a race condition: if pg_ctl promote is executed and creates the files
+ * during a promotion, the files can stay around even after the server is
+ * brought up to new master. Then, if new standby starts by using the
+ * backup taken from that master, the files can exist at the server
* startup and should be removed in order to avoid an unexpected
* promotion.
*
- * Note that promotion signal files need to be removed before
- * the startup process is invoked. Because, after that, they can
- * be used by postmaster's SIGUSR1 signal handler.
+ * Note that promotion signal files need to be removed before the startup
+ * process is invoked. Because, after that, they can be used by
+ * postmaster's SIGUSR1 signal handler.
*/
RemovePromoteSignalFiles();
else if (!parse_bool(valptr, &am_walsender))
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- "replication",
- valptr),
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ "replication",
+ valptr),
errhint("Valid values are: \"false\", 0, \"true\", 1, \"database\".")));
}
else
if (pmState == PM_RECOVERY)
{
SignalSomeChildren(SIGTERM, BACKEND_TYPE_BGWORKER);
+
/*
* Only startup, bgwriter, walreceiver, possibly bgworkers,
* and/or checkpointer should be active in this state; we just
/*
* It's possible that this background worker started some OTHER
- * background worker and asked to be notified when that worker
- * started or stopped. If so, cancel any notifications destined
- * for the now-dead backend.
+ * background worker and asked to be notified when that worker started
+ * or stopped. If so, cancel any notifications destined for the
+ * now-dead backend.
*/
if (rw->rw_backend->bgworker_notify)
BackgroundWorkerStopNotifications(rw->rw_pid);
rw->rw_crashed_at = 0;
/*
- * Allocate and assign the Backend element. Note we
- * must do this before forking, so that we can handle out of
- * memory properly.
+ * Allocate and assign the Backend element. Note we must do this
+ * before forking, so that we can handle out of memory properly.
*/
if (!assign_backendlist_entry(rw))
return;
TimeLineID starttli;
XLogRecPtr endptr;
TimeLineID endtli;
- StringInfo labelfile;
- StringInfo tblspc_map_file = NULL;
+ StringInfo labelfile;
+ StringInfo tblspc_map_file = NULL;
int datadirpathlen;
List *tablespaces = NIL;
case XLOG_INVALIDATIONS:
{
xl_invalidations *invalidations =
- (xl_invalidations *) XLogRecGetData(r);
+ (xl_invalidations *) XLogRecGetData(r);
ReorderBufferImmediateInvalidation(
ctx->reorder, invalidations->nmsgs, invalidations->msgs);
static void
DecodeLogicalMsgOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
{
- SnapBuild *builder = ctx->snapshot_builder;
+ SnapBuild *builder = ctx->snapshot_builder;
XLogReaderState *r = buf->record;
- TransactionId xid = XLogRecGetXid(r);
- uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK;
- RepOriginId origin_id = XLogRecGetOrigin(r);
- Snapshot snapshot;
+ TransactionId xid = XLogRecGetXid(r);
+ uint8 info = XLogRecGetInfo(r) & ~XLR_INFO_MASK;
+ RepOriginId origin_id = XLogRecGetOrigin(r);
+ Snapshot snapshot;
xl_logical_message *message;
if (info != XLOG_LOGICAL_MESSAGE)
snapshot = SnapBuildGetOrBuildSnapshot(builder, xid);
ReorderBufferQueueMessage(ctx->reorder, xid, snapshot, buf->endptr,
message->transactional,
- message->message, /* first part of message is prefix */
+ message->message, /* first part of message is
+ * prefix */
message->message_size,
message->message + message->prefix_size);
}
xl_xact_parsed_commit *parsed, TransactionId xid)
{
XLogRecPtr origin_lsn = InvalidXLogRecPtr;
- TimestampTz commit_time = parsed->xact_time;
- RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ TimestampTz commit_time = parsed->xact_time;
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
int i;
if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN)
static void change_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
Relation relation, ReorderBufferChange *change);
static void message_cb_wrapper(ReorderBuffer *cache, ReorderBufferTXN *txn,
- XLogRecPtr message_lsn, bool transactional,
- const char *prefix, Size message_size, const char *message);
+ XLogRecPtr message_lsn, bool transactional,
+ const char *prefix, Size message_size, const char *message);
static void LoadOutputPlugin(OutputPluginCallbacks *callbacks, char *plugin);
LogLogicalMessage(const char *prefix, const char *message, size_t size,
bool transactional)
{
- xl_logical_message xlrec;
+ xl_logical_message xlrec;
/*
* Force xid to be allocated if we're emitting a transactional message.
uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
if (info != XLOG_LOGICAL_MESSAGE)
- elog(PANIC, "logicalmsg_redo: unknown op code %u", info);
+ elog(PANIC, "logicalmsg_redo: unknown op code %u", info);
/* This is only interesting for logical decoding, see decode.c. */
}
} ReplicationStateCtl;
/* external variables */
-RepOriginId replorigin_session_origin = InvalidRepOriginId; /* assumed identity */
+RepOriginId replorigin_session_origin = InvalidRepOriginId; /* assumed identity */
XLogRecPtr replorigin_session_origin_lsn = InvalidXLogRecPtr;
TimestampTz replorigin_session_origin_timestamp = 0;
}
else
{
- ReorderBufferTXN *txn = NULL;
- volatile Snapshot snapshot_now = snapshot;
+ ReorderBufferTXN *txn = NULL;
+ volatile Snapshot snapshot_now = snapshot;
if (xid != InvalidTransactionId)
txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
BeginInternalSubTransaction("replay");
/*
- * Force invalidations to happen outside of a valid transaction - that
- * way entries will just be marked as invalid without accessing the
- * catalog. That's advantageous because we don't need to setup the
- * full state necessary for catalog access.
+ * Force invalidations to happen outside of a valid transaction - that way
+ * entries will just be marked as invalid without accessing the catalog.
+ * That's advantageous because we don't need to setup the full state
+ * necessary for catalog access.
*/
if (use_subtxn)
AbortCurrentTransaction();
change->data.msg.prefix = MemoryContextAlloc(rb->context,
prefix_size);
memcpy(change->data.msg.prefix, data, prefix_size);
- Assert(change->data.msg.prefix[prefix_size-1] == '\0');
+ Assert(change->data.msg.prefix[prefix_size - 1] == '\0');
data += prefix_size;
/* read the messsage */
memcpy(&change->data.msg.message_size, data, sizeof(Size));
data += sizeof(Size);
change->data.msg.message = MemoryContextAlloc(rb->context,
- change->data.msg.message_size);
+ change->data.msg.message_size);
memcpy(change->data.msg.message, data,
change->data.msg.message_size);
data += change->data.msg.message_size;
ReplicationSlotValidateName(name, ERROR);
/*
- * If some other backend ran this code concurrently with us, we'd likely both
- * allocate the same slot, and that would be bad. We'd also be at risk of
- * missing a name collision. Also, we don't want to try to create a new
- * slot while somebody's busy cleaning up an old one, because we might
- * both be monkeying with the same directory.
+ * If some other backend ran this code concurrently with us, we'd likely
+ * both allocate the same slot, and that would be bad. We'd also be at
+ * risk of missing a name collision. Also, we don't want to try to create
+ * a new slot while somebody's busy cleaning up an old one, because we
+ * might both be monkeying with the same directory.
*/
LWLockAcquire(ReplicationSlotAllocationLock, LW_EXCLUSIVE);
if (active_pid != 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("replication slot \"%s\" is active for PID %d",
- name, active_pid)));
+ errmsg("replication slot \"%s\" is active for PID %d",
+ name, active_pid)));
/* We made this slot active, so it's ours now. */
MyReplicationSlot = slot;
ReplicationSlotMarkDirty(void)
{
ReplicationSlot *slot = MyReplicationSlot;
+
Assert(MyReplicationSlot != NULL);
SpinLockAcquire(&slot->mutex);
pg_create_physical_replication_slot(PG_FUNCTION_ARGS)
{
Name name = PG_GETARG_NAME(0);
- bool immediately_reserve = PG_GETARG_BOOL(1);
+ bool immediately_reserve = PG_GETARG_BOOL(1);
Datum values[2];
bool nulls[2];
TupleDesc tupdesc;
static int SyncRepWakeQueue(bool all, int mode);
static bool SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr,
- XLogRecPtr *flushPtr,
- XLogRecPtr *applyPtr,
- bool *am_sync);
+ XLogRecPtr *flushPtr,
+ XLogRecPtr *applyPtr,
+ bool *am_sync);
static int SyncRepGetStandbyPriority(void);
#ifdef USE_ASSERT_CHECKING
/*
* If a wait for synchronous replication is pending, we can neither
* acknowledge the commit nor raise ERROR or FATAL. The latter would
- * lead the client to believe that the transaction aborted, which
- * is not true: it's already committed locally. The former is no good
+ * lead the client to believe that the transaction aborted, which is
+ * not true: it's already committed locally. The former is no good
* either: the client has requested synchronous replication, and is
* entitled to assume that an acknowledged commit is also replicated,
* which might not be true. So in this case we issue a WARNING (which
/*
* If this WALSender is serving a standby that is not on the list of
* potential sync standbys then we have nothing to do. If we are still
- * starting up, still running base backup or the current flush position
- * is still invalid, then leave quickly also.
+ * starting up, still running base backup or the current flush position is
+ * still invalid, then leave quickly also.
*/
if (MyWalSnd->sync_standby_priority == 0 ||
MyWalSnd->state < WALSNDSTATE_STREAMING ||
}
/*
- * We're a potential sync standby. Release waiters if there are
- * enough sync standbys and we are considered as sync.
+ * We're a potential sync standby. Release waiters if there are enough
+ * sync standbys and we are considered as sync.
*/
LWLockAcquire(SyncRepLock, LW_EXCLUSIVE);
/*
- * Check whether we are a sync standby or not, and calculate
- * the oldest positions among all sync standbys.
+ * Check whether we are a sync standby or not, and calculate the oldest
+ * positions among all sync standbys.
*/
got_oldest = SyncRepGetOldestSyncRecPtr(&writePtr, &flushPtr,
&applyPtr, &am_sync);
/*
- * If we are managing a sync standby, though we weren't
- * prior to this, then announce we are now a sync standby.
+ * If we are managing a sync standby, though we weren't prior to this,
+ * then announce we are now a sync standby.
*/
if (announce_next_takeover && am_sync)
{
SyncRepGetOldestSyncRecPtr(XLogRecPtr *writePtr, XLogRecPtr *flushPtr,
XLogRecPtr *applyPtr, bool *am_sync)
{
- List *sync_standbys;
- ListCell *cell;
+ List *sync_standbys;
+ ListCell *cell;
*writePtr = InvalidXLogRecPtr;
*flushPtr = InvalidXLogRecPtr;
}
/*
- * Scan through all sync standbys and calculate the oldest
- * Write, Flush and Apply positions.
+ * Scan through all sync standbys and calculate the oldest Write, Flush
+ * and Apply positions.
*/
- foreach (cell, sync_standbys)
+ foreach(cell, sync_standbys)
{
- WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
+ WalSnd *walsnd = &WalSndCtl->walsnds[lfirst_int(cell)];
XLogRecPtr write;
XLogRecPtr flush;
XLogRecPtr apply;
List *
SyncRepGetSyncStandbys(bool *am_sync)
{
- List *result = NIL;
- List *pending = NIL;
- int lowest_priority;
- int next_highest_priority;
- int this_priority;
- int priority;
- int i;
- bool am_in_pending = false;
- volatile WalSnd *walsnd; /* Use volatile pointer to prevent
- * code rearrangement */
+ List *result = NIL;
+ List *pending = NIL;
+ int lowest_priority;
+ int next_highest_priority;
+ int this_priority;
+ int priority;
+ int i;
+ bool am_in_pending = false;
+ volatile WalSnd *walsnd; /* Use volatile pointer to prevent code
+ * rearrangement */
/* Set default result */
if (am_sync != NULL)
next_highest_priority = lowest_priority + 1;
/*
- * Find the sync standbys which have the highest priority (i.e, 1).
- * Also store all the other potential sync standbys into the pending list,
- * in order to scan it later and find other sync standbys from it quickly.
+ * Find the sync standbys which have the highest priority (i.e, 1). Also
+ * store all the other potential sync standbys into the pending list, in
+ * order to scan it later and find other sync standbys from it quickly.
*/
for (i = 0; i < max_wal_senders; i++)
{
continue;
/*
- * If the priority is equal to 1, consider this standby as sync
- * and append it to the result. Otherwise append this standby
- * to the pending list to check if it's actually sync or not later.
+ * If the priority is equal to 1, consider this standby as sync and
+ * append it to the result. Otherwise append this standby to the
+ * pending list to check if it's actually sync or not later.
*/
if (this_priority == 1)
{
if (list_length(result) == SyncRepConfig->num_sync)
{
list_free(pending);
- return result; /* Exit if got enough sync standbys */
+ return result; /* Exit if got enough sync standbys */
}
}
else
/*
* Track the highest priority among the standbys in the pending
- * list, in order to use it as the starting priority for later scan
- * of the list. This is useful to find quickly the sync standbys
- * from the pending list later because we can skip unnecessary
- * scans for the unused priorities.
+ * list, in order to use it as the starting priority for later
+ * scan of the list. This is useful to find quickly the sync
+ * standbys from the pending list later because we can skip
+ * unnecessary scans for the unused priorities.
*/
if (this_priority < next_highest_priority)
next_highest_priority = this_priority;
priority = next_highest_priority;
while (priority <= lowest_priority)
{
- ListCell *cell;
- ListCell *prev = NULL;
- ListCell *next;
+ ListCell *cell;
+ ListCell *prev = NULL;
+ ListCell *next;
next_highest_priority = lowest_priority + 1;
/*
* We should always exit here after the scan of pending list
- * starts because we know that the list has enough elements
- * to reach SyncRepConfig->num_sync.
+ * starts because we know that the list has enough elements to
+ * reach SyncRepConfig->num_sync.
*/
if (list_length(result) == SyncRepConfig->num_sync)
{
}
/*
- * Remove the entry for this sync standby from the list
- * to prevent us from looking at the same entry again.
+ * Remove the entry for this sync standby from the list to
+ * prevent us from looking at the same entry again.
*/
pending = list_delete_cell(pending, cell, prev);
*/
Assert(wait_fd != PGINVALID_SOCKET);
rc = WaitLatchOrSocket(&walrcv->latch,
- WL_POSTMASTER_DEATH | WL_SOCKET_READABLE |
+ WL_POSTMASTER_DEATH | WL_SOCKET_READABLE |
WL_TIMEOUT | WL_LATCH_SET,
wait_fd,
NAPTIME_PER_CYCLE);
/*
* The recovery process has asked us to send apply
* feedback now. Make sure the flag is really set to
- * false in shared memory before sending the reply,
- * so we don't miss a new request for a reply.
+ * false in shared memory before sending the reply, so
+ * we don't miss a new request for a reply.
*/
walrcv->force_reply = false;
pg_memory_barrier();
TimeLineID receive_start_tli;
XLogRecPtr received_lsn;
TimeLineID received_tli;
- TimestampTz last_send_time;
- TimestampTz last_receipt_time;
+ TimestampTz last_send_time;
+ TimestampTz last_receipt_time;
XLogRecPtr latest_end_lsn;
- TimestampTz latest_end_time;
+ TimestampTz latest_end_time;
char *slotname;
/* No WAL receiver, just return a tuple with NULL values */
if (!superuser())
{
/*
- * Only superusers can see details. Other users only get the pid
- * value to know whether it is a WAL receiver, but no details.
+ * Only superusers can see details. Other users only get the pid value
+ * to know whether it is a WAL receiver, but no details.
*/
MemSet(&nulls[1], true, PG_STAT_GET_WAL_RECEIVER_COLS - 1);
}
/* Returns the record as Datum */
PG_RETURN_DATUM(HeapTupleGetDatum(
- heap_form_tuple(tupdesc, values, nulls)));
+ heap_form_tuple(tupdesc, values, nulls)));
}
pq_beginmessage(&buf, 'D');
pq_sendint(&buf, 2, 2); /* # of columns */
len = strlen(histfname);
- pq_sendint(&buf, len, 4); /* col1 len */
+ pq_sendint(&buf, len, 4); /* col1 len */
pq_sendbytes(&buf, histfname, len);
fd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0666);
/* Initialize shared memory status, too */
{
- WalSnd *walsnd = MyWalSnd;
+ WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex);
walsnd->sentPtr = sentPtr;
pq_sendint(&buf, 2, 2); /* number of columns */
len = strlen(tli_str);
- pq_sendint(&buf, len, 4); /* length */
+ pq_sendint(&buf, len, 4); /* length */
pq_sendbytes(&buf, tli_str, len);
len = strlen(startpos_str);
/* slot_name */
len = strlen(NameStr(MyReplicationSlot->data.name));
- pq_sendint(&buf, len, 4); /* col1 len */
+ pq_sendint(&buf, len, 4); /* col1 len */
pq_sendbytes(&buf, NameStr(MyReplicationSlot->data.name), len);
/* consistent wal location */
/* Also update the sent position status in shared memory */
{
- WalSnd *walsnd = MyWalSnd;
+ WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex);
walsnd->sentPtr = MyReplicationSlot->data.restart_lsn;
* standby.
*/
{
- WalSnd *walsnd = MyWalSnd;
+ WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex);
walsnd->write = writePtr;
*/
for (i = 0; i < max_wal_senders; i++)
{
- WalSnd *walsnd = &WalSndCtl->walsnds[i];
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
SpinLockAcquire(&walsnd->mutex);
*/
if (am_cascading_walsender)
{
- WalSnd *walsnd = MyWalSnd;
+ WalSnd *walsnd = MyWalSnd;
bool reload;
SpinLockAcquire(&walsnd->mutex);
/* Update shared memory status */
{
- WalSnd *walsnd = MyWalSnd;
+ WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex);
walsnd->sentPtr = sentPtr;
/* Update shared memory status */
{
- WalSnd *walsnd = MyWalSnd;
+ WalSnd *walsnd = MyWalSnd;
SpinLockAcquire(&walsnd->mutex);
walsnd->sentPtr = sentPtr;
for (i = 0; i < max_wal_senders; i++)
{
- WalSnd *walsnd = &WalSndCtl->walsnds[i];
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
if (walsnd->pid == 0)
continue;
void
WalSndSetState(WalSndState state)
{
- WalSnd *walsnd = MyWalSnd;
+ WalSnd *walsnd = MyWalSnd;
Assert(am_walsender);
for (i = 0; i < max_wal_senders; i++)
{
- WalSnd *walsnd = &WalSndCtl->walsnds[i];
+ WalSnd *walsnd = &WalSndCtl->walsnds[i];
XLogRecPtr sentPtr;
XLogRecPtr write;
XLogRecPtr flush;
* any triggers, indexes, child tables, policies, or RLS enabled.
* (Note: these tests are too strict, because they will reject
* relations that once had such but don't anymore. But we don't
- * really care, because this whole business of converting relations
- * to views is just a kluge to allow dump/reload of views that
+ * really care, because this whole business of converting relations to
+ * views is just a kluge to allow dump/reload of views that
* participate in circular dependencies.)
*/
if (event_relation->rd_rel->relkind != RELKIND_VIEW &&
static List *sort_policies_by_name(List *policies);
-static int row_security_policy_cmp(const void *a, const void *b);
+static int row_security_policy_cmp(const void *a, const void *b);
static void add_security_quals(int rt_index,
- List *permissive_policies,
- List *restrictive_policies,
- List **securityQuals,
- bool *hasSubLinks);
+ List *permissive_policies,
+ List *restrictive_policies,
+ List **securityQuals,
+ bool *hasSubLinks);
static void add_with_check_options(Relation rel,
- int rt_index,
- WCOKind kind,
- List *permissive_policies,
- List *restrictive_policies,
- List **withCheckOptions,
- bool *hasSubLinks);
+ int rt_index,
+ WCOKind kind,
+ List *permissive_policies,
+ List *restrictive_policies,
+ List **withCheckOptions,
+ bool *hasSubLinks);
static bool check_role_for_policy(ArrayType *policy_roles, Oid user_id);
rel = heap_open(rte->relid, NoLock);
commandType = rt_index == root->resultRelation ?
- root->commandType : CMD_SELECT;
+ root->commandType : CMD_SELECT;
/*
* In some cases, we need to apply USING policies (which control the
* visibility of records) associated with multiple command types (see
* specific cases below).
*
- * When considering the order in which to apply these USING policies,
- * we prefer to apply higher privileged policies, those which allow the
- * user to lock records (UPDATE and DELETE), first, followed by policies
- * which don't (SELECT).
+ * When considering the order in which to apply these USING policies, we
+ * prefer to apply higher privileged policies, those which allow the user
+ * to lock records (UPDATE and DELETE), first, followed by policies which
+ * don't (SELECT).
*
* Note that the optimizer is free to push down and reorder quals which
* use leakproof functions.
*
* In all cases, if there are no policy clauses allowing access to rows in
- * the table for the specific type of operation, then a single always-false
- * clause (a default-deny policy) will be added (see add_security_quals).
+ * the table for the specific type of operation, then a single
+ * always-false clause (a default-deny policy) will be added (see
+ * add_security_quals).
*/
/*
* For a SELECT, if UPDATE privileges are required (eg: the user has
- * specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals first.
+ * specified FOR [KEY] UPDATE/SHARE), then add the UPDATE USING quals
+ * first.
*
* This way, we filter out any records from the SELECT FOR SHARE/UPDATE
* which the user does not have access to via the UPDATE USING policies,
* a WHERE clause which involves columns from the relation), we collect up
* CMD_SELECT policies and add them via add_security_quals first.
*
- * This way, we filter out any records which are not visible through an ALL
- * or SELECT USING policy.
+ * This way, we filter out any records which are not visible through an
+ * ALL or SELECT USING policy.
*/
if ((commandType == CMD_UPDATE || commandType == CMD_DELETE) &&
rte->requiredPerms & ACL_SELECT)
hasSubLinks);
/*
- * Get and add ALL/SELECT policies, if SELECT rights are required
- * for this relation (eg: when RETURNING is used). These are added as
- * WCO policies rather than security quals to ensure that an error is
+ * Get and add ALL/SELECT policies, if SELECT rights are required for
+ * this relation (eg: when RETURNING is used). These are added as WCO
+ * policies rather than security quals to ensure that an error is
* raised if a policy is violated; otherwise, we might end up silently
* dropping rows to be added.
*/
&select_restrictive_policies);
add_with_check_options(rel, rt_index,
commandType == CMD_INSERT ?
- WCO_RLS_INSERT_CHECK : WCO_RLS_UPDATE_CHECK,
+ WCO_RLS_INSERT_CHECK : WCO_RLS_UPDATE_CHECK,
select_permissive_policies,
select_restrictive_policies,
withCheckOptions,
hasSubLinks);
/*
- * Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK
- * WCOs to ensure they are considered when taking the UPDATE
- * path of an INSERT .. ON CONFLICT DO UPDATE, if SELECT
- * rights are required for this relation, also as WCO policies,
- * again, to avoid silently dropping data. See above.
+ * Get and add ALL/SELECT policies, as WCO_RLS_CONFLICT_CHECK WCOs
+ * to ensure they are considered when taking the UPDATE path of an
+ * INSERT .. ON CONFLICT DO UPDATE, if SELECT rights are required
+ * for this relation, also as WCO policies, again, to avoid
+ * silently dropping data. See above.
*/
if (rte->requiredPerms & ACL_SELECT)
{
List *conflict_select_restrictive_policies = NIL;
get_policies_for_relation(rel, CMD_SELECT, user_id,
- &conflict_select_permissive_policies,
+ &conflict_select_permissive_policies,
&conflict_select_restrictive_policies);
add_with_check_options(rel, rt_index,
WCO_RLS_CONFLICT_CHECK,
*/
foreach(item, relation->rd_rsdesc->policies)
{
- bool cmd_matches = false;
- RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item);
+ bool cmd_matches = false;
+ RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item);
/* Always add ALL policies, if they exist. */
if (policy->polcmd == '*')
}
/*
- * Add this policy to the list of permissive policies if it
- * applies to the specified role.
+ * Add this policy to the list of permissive policies if it applies to
+ * the specified role.
*/
if (cmd_matches && check_role_for_policy(policy->roles, user_id))
*permissive_policies = lappend(*permissive_policies, policy);
if (row_security_policy_hook_restrictive)
{
List *hook_policies =
- (*row_security_policy_hook_restrictive) (cmd, relation);
+ (*row_security_policy_hook_restrictive) (cmd, relation);
/*
* We sort restrictive policies by name so that any WCOs they generate
if (row_security_policy_hook_permissive)
{
List *hook_policies =
- (*row_security_policy_hook_permissive) (cmd, relation);
+ (*row_security_policy_hook_permissive) (cmd, relation);
foreach(item, hook_policies)
{
foreach(item, policies)
{
RowSecurityPolicy *policy = (RowSecurityPolicy *) lfirst(item);
+
pols[ii++] = *policy;
}
Expr *rowsec_expr;
/*
- * First collect up the permissive quals. If we do not find any permissive
- * policies then no rows are visible (this is handled below).
+ * First collect up the permissive quals. If we do not find any
+ * permissive policies then no rows are visible (this is handled below).
*/
foreach(item, permissive_policies)
{
/*
* We now know that permissive policies exist, so we can now add
* security quals based on the USING clauses from the restrictive
- * policies. Since these need to be "AND"d together, we can
- * just add them one at a time.
+ * policies. Since these need to be "AND"d together, we can just add
+ * them one at a time.
*/
foreach(item, restrictive_policies)
{
*securityQuals = list_append_unique(*securityQuals, rowsec_expr);
}
else
+
/*
* A permissive policy must exist for rows to be visible at all.
* Therefore, if there were no permissive policies found, return a
List *permissive_quals = NIL;
#define QUAL_FOR_WCO(policy) \
- ( kind != WCO_RLS_CONFLICT_CHECK && \
+ ( kind != WCO_RLS_CONFLICT_CHECK && \
(policy)->with_check_qual != NULL ? \
(policy)->with_check_qual : (policy)->qual )
}
/*
- * There must be at least one permissive qual found or no rows are
- * allowed to be added. This is the same as in add_security_quals.
+ * There must be at least one permissive qual found or no rows are allowed
+ * to be added. This is the same as in add_security_quals.
*
- * If there are no permissive_quals then we fall through and return a single
- * 'false' WCO, preventing all new rows.
+ * If there are no permissive_quals then we fall through and return a
+ * single 'false' WCO, preventing all new rows.
*/
if (permissive_quals != NIL)
{
/*
* It would be nice to include the I/O locks in the BufferDesc, but that
- * would increase the size of a BufferDesc to more than one cache line, and
- * benchmarking has shown that keeping every BufferDesc aligned on a cache
- * line boundary is important for performance. So, instead, the array of
- * I/O locks is allocated in a separate tranche. Because those locks are
- * not highly contentended, we lay out the array with minimal padding.
+ * would increase the size of a BufferDesc to more than one cache line,
+ * and benchmarking has shown that keeping every BufferDesc aligned on a
+ * cache line boundary is important for performance. So, instead, the
+ * array of I/O locks is allocated in a separate tranche. Because those
+ * locks are not highly contentended, we lay out the array with minimal
+ * padding.
*/
size = add_size(size, mul_size(NBuffers, sizeof(LWLockMinimallyPadded)));
/* to allow aligning the above */
TestForOldSnapshot_impl(Snapshot snapshot, Relation relation)
{
if (!IsCatalogRelation(relation)
- && !RelationIsAccessibleInLogicalDecoding(relation)
- && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
+ && !RelationIsAccessibleInLogicalDecoding(relation)
+ && (snapshot)->whenTaken < GetOldSnapshotThresholdTimestamp())
ereport(ERROR,
(errcode(ERRCODE_SNAPSHOT_TOO_OLD),
errmsg("snapshot too old")));
*/
void
UpdateFreeSpaceMap(Relation rel, BlockNumber startBlkNum,
- BlockNumber endBlkNum, Size freespace)
+ BlockNumber endBlkNum, Size freespace)
{
int new_cat = fsm_space_avail_to_cat(freespace);
FSMAddress addr;
uint16 slot;
- BlockNumber blockNum;
- BlockNumber lastBlkOnPage;
+ BlockNumber blockNum;
+ BlockNumber lastBlkOnPage;
blockNum = startBlkNum;
fsm_update_recursive(rel, addr, new_cat);
/*
- * Get the last block number on this FSM page. If that's greater
- * than or equal to our endBlkNum, we're done. Otherwise, advance
- * to the first block on the next page.
+ * Get the last block number on this FSM page. If that's greater than
+ * or equal to our endBlkNum, we're done. Otherwise, advance to the
+ * first block on the next page.
*/
lastBlkOnPage = fsm_get_lastblckno(rel, addr);
if (lastBlkOnPage >= endBlkNum)
int slot;
/*
- * Get the last slot number on the given address and convert that to
- * block number
+ * Get the last slot number on the given address and convert that to block
+ * number
*/
slot = SlotsPerFSMPage - 1;
return fsm_get_heap_blk(addr, slot);
return;
/*
- * Get the parent page and our slot in the parent page, and
- * update the information in that.
+ * Get the parent page and our slot in the parent page, and update the
+ * information in that.
*/
parent = fsm_get_parent(addr, &parentslot);
fsm_set_and_search(rel, parent, parentslot, new_cat, 0);
}
/*
- * OK, the control segment looks basically valid, so we can use it to
- * get a list of segments that need to be removed.
+ * OK, the control segment looks basically valid, so we can use it to get
+ * a list of segments that need to be removed.
*/
nitems = old_control->nitems;
for (i = 0; i < nitems; ++i)
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- pgxact->delayChkpt = false; /* be sure this is cleared in abort */
+ pgxact->delayChkpt = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
/* Clear the subtransaction-XID cache too while holding the lock */
/* Walk the list and clear all XIDs. */
while (nextidx != INVALID_PGPROCNO)
{
- PGPROC *proc = &allProcs[nextidx];
- PGXACT *pgxact = &allPgXact[nextidx];
+ PGPROC *proc = &allProcs[nextidx];
+ PGXACT *pgxact = &allPgXact[nextidx];
ProcArrayEndTransactionInternal(proc, pgxact, proc->procArrayGroupMemberXid);
*/
while (wakeidx != INVALID_PGPROCNO)
{
- PGPROC *proc = &allProcs[wakeidx];
+ PGPROC *proc = &allProcs[wakeidx];
wakeidx = pg_atomic_read_u32(&proc->procArrayGroupNext);
pg_atomic_write_u32(&proc->procArrayGroupNext, INVALID_PGPROCNO);
Assert(TransactionIdIsNormal(initializedUptoXID));
/*
- * we set latestObservedXid to the xid SUBTRANS has been initialized up to,
- * so we can extend it from that point onwards in
+ * we set latestObservedXid to the xid SUBTRANS has been initialized up
+ * to, so we can extend it from that point onwards in
* RecordKnownAssignedTransactionIds, and when we get consistent in
* ProcArrayApplyRecoveryInfo().
*/
/*
* We ignore an invalid pxmin because this means that backend has
* no snapshot currently. We hold a Share lock to avoid contention
- * with users taking snapshots. That is not a problem because
- * the current xmin is always at least one higher than the latest
+ * with users taking snapshots. That is not a problem because the
+ * current xmin is always at least one higher than the latest
* removed xid, so any new snapshot would never conflict with the
* test here.
*/
static bool
shm_mq_counterparty_gone(volatile shm_mq *mq, BackgroundWorkerHandle *handle)
{
- bool detached;
- pid_t pid;
+ bool detached;
+ pid_t pid;
/* Acquire the lock just long enough to check the pointer. */
SpinLockAcquire(&mq->mq_mutex);
* We're already behind, so clear a path as quickly as possible.
*/
VirtualTransactionId *backends;
+
backends = GetLockConflicts(&locktag, AccessExclusiveLock);
ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_LOCK);
uint32 partition = LockHashPartition(hashcode);
/*
- * It might seem unsafe to access proclock->groupLeader without a lock,
- * but it's not really. Either we are initializing a proclock on our
- * own behalf, in which case our group leader isn't changing because
- * the group leader for a process can only ever be changed by the
- * process itself; or else we are transferring a fast-path lock to the
- * main lock table, in which case that process can't change it's lock
- * group leader without first releasing all of its locks (and in
+ * It might seem unsafe to access proclock->groupLeader without a
+ * lock, but it's not really. Either we are initializing a proclock
+ * on our own behalf, in which case our group leader isn't changing
+ * because the group leader for a process can only ever be changed by
+ * the process itself; or else we are transferring a fast-path lock to
+ * the main lock table, in which case that process can't change it's
+ * lock group leader without first releasing all of its locks (and in
* particular the one we are currently transferring).
*/
proclock->groupLeader = proc->lockGroupLeader != NULL ?
}
/*
- * Rats. Something conflicts. But it could still be my own lock, or
- * a lock held by another member of my locking group. First, figure out
- * how many conflicts remain after subtracting out any locks I hold
- * myself.
+ * Rats. Something conflicts. But it could still be my own lock, or a
+ * lock held by another member of my locking group. First, figure out how
+ * many conflicts remain after subtracting out any locks I hold myself.
*/
myLocks = proclock->holdMask;
for (i = 1; i <= numLockModes; i++)
/*
* Locks held in conflicting modes by members of our own lock group are
* not real conflicts; we can subtract those out and see if we still have
- * a conflict. This is O(N) in the number of processes holding or awaiting
- * locks on this object. We could improve that by making the shared memory
- * state more complex (and larger) but it doesn't seem worth it.
+ * a conflict. This is O(N) in the number of processes holding or
+ * awaiting locks on this object. We could improve that by making the
+ * shared memory state more complex (and larger) but it doesn't seem worth
+ * it.
*/
procLocks = &(lock->procLocks);
otherproclock = (PROCLOCK *)
proclock->groupLeader == otherproclock->groupLeader &&
(otherproclock->holdMask & conflictMask) != 0)
{
- int intersectMask = otherproclock->holdMask & conflictMask;
+ int intersectMask = otherproclock->holdMask & conflictMask;
for (i = 1; i <= numLockModes; i++)
{
*
* proc->databaseId is set at backend startup time and never changes
* thereafter, so it might be safe to perform this test before
- * acquiring &proc->backendLock. In particular, it's certainly safe to
- * assume that if the target backend holds any fast-path locks, it
+ * acquiring &proc->backendLock. In particular, it's certainly safe
+ * to assume that if the target backend holds any fast-path locks, it
* must have performed a memory-fencing operation (in particular, an
* LWLock acquisition) since setting proc->databaseId. However, it's
* less clear that our backend is certain to have performed a memory
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%d: %s(%s): excl %u shared %u haswaiters %u waiters %u rOK %d",
- MyProcPid,
- where, MainLWLockNames[id],
- (state & LW_VAL_EXCLUSIVE) != 0,
- state & LW_SHARED_MASK,
- (state & LW_FLAG_HAS_WAITERS) != 0,
- pg_atomic_read_u32(&lock->nwaiters),
- (state & LW_FLAG_RELEASE_OK) != 0)));
+ MyProcPid,
+ where, MainLWLockNames[id],
+ (state & LW_VAL_EXCLUSIVE) != 0,
+ state & LW_SHARED_MASK,
+ (state & LW_FLAG_HAS_WAITERS) != 0,
+ pg_atomic_read_u32(&lock->nwaiters),
+ (state & LW_FLAG_RELEASE_OK) != 0)));
else
ereport(LOG,
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%d: %s(%s %d): excl %u shared %u haswaiters %u waiters %u rOK %d",
- MyProcPid,
- where, T_NAME(lock), id,
- (state & LW_VAL_EXCLUSIVE) != 0,
- state & LW_SHARED_MASK,
- (state & LW_FLAG_HAS_WAITERS) != 0,
- pg_atomic_read_u32(&lock->nwaiters),
- (state & LW_FLAG_RELEASE_OK) != 0)));
+ MyProcPid,
+ where, T_NAME(lock), id,
+ (state & LW_VAL_EXCLUSIVE) != 0,
+ state & LW_SHARED_MASK,
+ (state & LW_FLAG_HAS_WAITERS) != 0,
+ pg_atomic_read_u32(&lock->nwaiters),
+ (state & LW_FLAG_RELEASE_OK) != 0)));
}
}
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%s(%s): %s", where,
- MainLWLockNames[id], msg)));
+ MainLWLockNames[id], msg)));
else
ereport(LOG,
(errhidestmt(true),
errhidecontext(true),
errmsg_internal("%s(%s %d): %s", where,
- T_NAME(lock), id, msg)));
+ T_NAME(lock), id, msg)));
}
}
/*
* It is quite possible that user has registered tranche in one of the
- * backends (e.g. by allocating lwlocks in dynamic shared memory) but
- * not all of them, so we can't assume the tranche is registered here.
+ * backends (e.g. by allocating lwlocks in dynamic shared memory) but not
+ * all of them, so we can't assume the tranche is registered here.
*/
if (eventId >= LWLockTranchesAllocated ||
LWLockTrancheArray[eventId]->name == NULL)
void
InitProcess(void)
{
- PGPROC * volatile * procgloballist;
+ PGPROC *volatile * procgloballist;
/*
* ProcGlobal should be set up already (if we are a backend, we inherit
MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
/*
- * Cross-check that the PGPROC is of the type we expect; if this were
- * not the case, it would get returned to the wrong list.
+ * Cross-check that the PGPROC is of the type we expect; if this were not
+ * the case, it would get returned to the wrong list.
*/
Assert(MyProc->procgloballist == procgloballist);
ProcKill(int code, Datum arg)
{
PGPROC *proc;
- PGPROC * volatile * procgloballist;
+ PGPROC *volatile * procgloballist;
Assert(MyProc != NULL);
*
* A compiled dictionary is stored in the IspellDict structure. Compilation of
* a dictionary is divided into the several steps:
- * - NIImportDictionary() - stores each word of a .dict file in the
- * temporary Spell field.
- * - NIImportAffixes() - stores affix rules of an .affix file in the
- * Affix field (not temporary) if an .affix file has the Ispell format.
- * -> NIImportOOAffixes() - stores affix rules if an .affix file has the
- * Hunspell format. The AffixData field is initialized if AF parameter
- * is defined.
- * - NISortDictionary() - builds a prefix tree (Trie) from the words list
- * and stores it in the Dictionary field. The words list is got from the
- * Spell field. The AffixData field is initialized if AF parameter is not
- * defined.
- * - NISortAffixes():
- * - builds a list of compond affixes from the affix list and stores it
- * in the CompoundAffix.
- * - builds prefix trees (Trie) from the affix list for prefixes and suffixes
- * and stores them in Suffix and Prefix fields.
- * The affix list is got from the Affix field.
+ * - NIImportDictionary() - stores each word of a .dict file in the
+ * temporary Spell field.
+ * - NIImportAffixes() - stores affix rules of an .affix file in the
+ * Affix field (not temporary) if an .affix file has the Ispell format.
+ * -> NIImportOOAffixes() - stores affix rules if an .affix file has the
+ * Hunspell format. The AffixData field is initialized if AF parameter
+ * is defined.
+ * - NISortDictionary() - builds a prefix tree (Trie) from the words list
+ * and stores it in the Dictionary field. The words list is got from the
+ * Spell field. The AffixData field is initialized if AF parameter is not
+ * defined.
+ * - NISortAffixes():
+ * - builds a list of compond affixes from the affix list and stores it
+ * in the CompoundAffix.
+ * - builds prefix trees (Trie) from the affix list for prefixes and suffixes
+ * and stores them in Suffix and Prefix fields.
+ * The affix list is got from the Affix field.
*
* Memory management
* -----------------
cmpspellaffix(const void *s1, const void *s2)
{
return (strcmp((*(SPELL *const *) s1)->p.flag,
- (*(SPELL *const *) s2)->p.flag));
+ (*(SPELL *const *) s2)->p.flag));
}
static int
cmpcmdflag(const void *f1, const void *f2)
{
- CompoundAffixFlag *fv1 = (CompoundAffixFlag *) f1,
- *fv2 = (CompoundAffixFlag *) f2;
+ CompoundAffixFlag *fv1 = (CompoundAffixFlag *) f1,
+ *fv2 = (CompoundAffixFlag *) f2;
Assert(fv1->flagMode == fv2->flagMode);
*
* Depending on the flagMode an affix string can have the following format:
* - FM_CHAR: ABCD
- * Here we have 4 flags: A, B, C and D
+ * Here we have 4 flags: A, B, C and D
* - FM_LONG: ABCDE*
- * Here we have 3 flags: AB, CD and E*
+ * Here we have 3 flags: AB, CD and E*
* - FM_NUM: 200,205,50
- * Here we have 3 flags: 200, 205 and 50
+ * Here we have 3 flags: 200, 205 and 50
*
* Conf: current dictionary.
* sflagset: the set of affix flags. Returns a reference to the start of a next
- * affix flag.
+ * affix flag.
* sflag: returns an affix flag from sflagset.
*/
static void
maxstep = (Conf->flagMode == FM_LONG) ? 2 : 1;
- while(**sflagset)
+ while (**sflagset)
{
switch (Conf->flagMode)
{
{
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid character in affix flag \"%s\"",
- *sflagset)));
+ errmsg("invalid character in affix flag \"%s\"",
+ *sflagset)));
}
*sflagset += pg_mblen(*sflagset);
if (Conf->flagMode == FM_LONG && maxstep > 0)
ereport(ERROR,
- (errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid affix flag \"%s\" with long flag value", sbuf)));
+ (errcode(ERRCODE_CONFIG_FILE_ERROR),
+ errmsg("invalid affix flag \"%s\" with long flag value", sbuf)));
*sflag = '\0';
}
{
char *s,
*pstr;
+
/* Set of affix flags */
const char *flag;
* meter/GMD
*
* The affix rule with the flag S:
- * SFX S y ies [^aeiou]y
+ * SFX S y ies [^aeiou]y
* is not presented here.
*
* The affix rule with the flag M:
- * SFX M 0 's .
+ * SFX M 0 's .
* is presented here.
*
* Conf: current dictionary.
if (flag == 0)
{
/*
- * The word can be formed only with another word.
- * And in the flag parameter there is not a sign
- * that we search compound words.
+ * The word can be formed only with another word. And
+ * in the flag parameter there is not a sign that we
+ * search compound words.
*/
if (StopMiddle->compoundflag & FF_COMPOUNDONLY)
return 0;
* Conf: current dictionary.
* flag: affix flag ('\' in the below example).
* flagflags: set of flags from the flagval field for this affix rule. This set
- * is listed after '/' character in the added string (repl).
+ * is listed after '/' character in the added string (repl).
*
- * For example L flag in the hunspell_sample.affix:
- * SFX \ 0 Y/L [^Y]
+ * For example L flag in the hunspell_sample.affix:
+ * SFX \ 0 Y/L [^Y]
*
* mask: condition for search ('[^Y]' in the above example).
* find: stripping characters from beginning (at prefix) or end (at suffix)
- * of the word ('0' in the above example, 0 means that there is not
- * stripping character).
+ * of the word ('0' in the above example, 0 means that there is not
+ * stripping character).
* repl: adding string after stripping ('Y' in the above example).
* type: FF_SUFFIX or FF_PREFIX.
*/
static void
-NIAddAffix(IspellDict *Conf, const char* flag, char flagflags, const char *mask,
- const char *find, const char *repl, int type)
+NIAddAffix(IspellDict *Conf, const char *flag, char flagflags, const char *mask,
+ const char *find, const char *repl, int type)
{
AFFIX *Affix;
{
if (Conf->flagMode == FM_NUM)
{
- char *next;
- int i;
+ char *next;
+ int i;
i = strtol(s, &next, 10);
if (s == next || errno == ERANGE)
static void
addCompoundAffixFlagValue(IspellDict *Conf, char *s, uint32 val)
{
- CompoundAffixFlag *newValue;
- char sbuf[BUFSIZ];
- char *sflag;
- int clen;
+ CompoundAffixFlag *newValue;
+ char sbuf[BUFSIZ];
+ char *sflag;
+ int clen;
while (*s && t_isspace(s))
s += pg_mblen(s);
Conf->mCompoundAffixFlag *= 2;
Conf->CompoundAffixFlags = (CompoundAffixFlag *)
repalloc((void *) Conf->CompoundAffixFlags,
- Conf->mCompoundAffixFlag * sizeof(CompoundAffixFlag));
+ Conf->mCompoundAffixFlag * sizeof(CompoundAffixFlag));
}
else
{
static int
getCompoundAffixFlagValue(IspellDict *Conf, char *s)
{
- uint32 flag = 0;
+ uint32 flag = 0;
CompoundAffixFlag *found,
- key;
- char sflag[BUFSIZ];
- char *flagcur;
+ key;
+ char sflag[BUFSIZ];
+ char *flagcur;
if (Conf->nCompoundAffixFlag == 0)
return 0;
{
if (Conf->useFlagAliases && *s != '\0')
{
- int curaffix;
- char *end;
+ int curaffix;
+ char *end;
curaffix = strtol(s, &end, 10);
if (s == end || errno == ERANGE)
errmsg("invalid affix alias \"%s\"", s)));
if (curaffix > 0 && curaffix <= Conf->nAffixData)
+
/*
- * Do not subtract 1 from curaffix
- * because empty string was added in NIImportOOAffixes
+ * Do not subtract 1 from curaffix because empty string was added
+ * in NIImportOOAffixes
*/
return Conf->AffixData[curaffix];
else
Conf->flagMode = FM_NUM;
else if (STRNCMP(s, "default") != 0)
ereport(ERROR,
- (errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("Ispell dictionary supports only default, "
- "long and num flag value")));
+ (errcode(ERRCODE_CONFIG_FILE_ERROR),
+ errmsg("Ispell dictionary supports only default, "
+ "long and num flag value")));
}
}
naffix = atoi(sflag);
if (naffix == 0)
ereport(ERROR,
- (errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("invalid number of flag vector aliases")));
+ (errcode(ERRCODE_CONFIG_FILE_ERROR),
+ errmsg("invalid number of flag vector aliases")));
/* Also reserve place for empty flag set */
naffix++;
static uint32
makeCompoundFlags(IspellDict *Conf, int affix)
{
- char *str = Conf->AffixData[affix];
+ char *str = Conf->AffixData[affix];
+
return (getCompoundAffixFlagValue(Conf, str) & FF_COMPOUNDFLAGMASK);
}
/* compress affixes */
/*
- * If we use flag aliases then we need to use Conf->AffixData filled
- * in the NIImportOOAffixes().
+ * If we use flag aliases then we need to use Conf->AffixData filled in
+ * the NIImportOOAffixes().
*/
if (Conf->useFlagAliases)
{
for (i = 0; i < Conf->nspell; i++)
{
- char *end;
+ char *end;
if (*Conf->Spell[i]->p.flag != '\0')
{
curaffix++;
Assert(curaffix < naffix);
Conf->AffixData[curaffix] = cpstrdup(Conf,
- Conf->Spell[i]->p.flag);
+ Conf->Spell[i]->p.flag);
}
Conf->Spell[i]->p.d.affix = curaffix;
if (CheckAffix(newword, swrdlen, prefix->aff[j], flag, pnewword, &baselen))
{
/* prefix success */
- char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ?
- VoidString : prefix->aff[j]->flag;
+ char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ?
+ VoidString : prefix->aff[j]->flag;
if (FindWord(Conf, pnewword, ff, flag))
cur += addToResult(forms, cur, pnewword);
typedef struct MorphOpaque
{
- Oid cfg_id;
- int qoperator; /* query operator */
+ Oid cfg_id;
+ int qoperator; /* query operator */
} MorphOpaque;
static void
pushval_morph(Datum opaque, TSQueryParserState state, char *strval, int lenval, int16 weight, bool prefix)
{
- int32 count = 0;
- ParsedText prs;
- uint32 variant,
- pos = 0,
- cntvar = 0,
- cntpos = 0,
- cnt = 0;
- MorphOpaque *data = (MorphOpaque *) DatumGetPointer(opaque);
+ int32 count = 0;
+ ParsedText prs;
+ uint32 variant,
+ pos = 0,
+ cntvar = 0,
+ cntpos = 0,
+ cnt = 0;
+ MorphOpaque *data = (MorphOpaque *) DatumGetPointer(opaque);
prs.lenwords = 4;
prs.curwords = 0;
while (count < prs.curwords)
{
/*
- * Were any stop words removed? If so, fill empty positions
- * with placeholders linked by an appropriate operator.
+ * Were any stop words removed? If so, fill empty positions with
+ * placeholders linked by an appropriate operator.
*/
if (pos > 0 && pos + 1 < prs.words[count].pos.pos)
{
prs.words[count].word,
prs.words[count].len,
weight,
- ((prs.words[count].flags & TSL_PREFIX) || prefix));
+ ((prs.words[count].flags & TSL_PREFIX) || prefix));
pfree(prs.words[count].word);
if (cnt)
pushOperator(state, OP_AND, 0);
Datum
to_tsquery_byid(PG_FUNCTION_ARGS)
{
- text *in = PG_GETARG_TEXT_P(1);
- TSQuery query;
- MorphOpaque data;
+ text *in = PG_GETARG_TEXT_P(1);
+ TSQuery query;
+ MorphOpaque data;
data.cfg_id = PG_GETARG_OID(0);
data.qoperator = OP_AND;
Datum
plainto_tsquery_byid(PG_FUNCTION_ARGS)
{
- text *in = PG_GETARG_TEXT_P(1);
- TSQuery query;
- MorphOpaque data;
+ text *in = PG_GETARG_TEXT_P(1);
+ TSQuery query;
+ MorphOpaque data;
data.cfg_id = PG_GETARG_OID(0);
data.qoperator = OP_AND;
Datum
phraseto_tsquery_byid(PG_FUNCTION_ARGS)
{
- text *in = PG_GETARG_TEXT_P(1);
- TSQuery query;
- MorphOpaque data;
+ text *in = PG_GETARG_TEXT_P(1);
+ TSQuery query;
+ MorphOpaque data;
data.cfg_id = PG_GETARG_OID(0);
data.qoperator = OP_PHRASE;
checkcondition_HL(void *opaque, QueryOperand *val, ExecPhraseData *data)
{
int i;
- hlCheck *checkval = (hlCheck *) opaque;
+ hlCheck *checkval = (hlCheck *) opaque;
for (i = 0; i < checkval->len; i++)
{
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("role \"%s\" is reserved",
- role->rolename),
+ role->rolename),
errdetail("%s", detail_msg)));
else
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("role \"%s\" is reserved",
- role->rolename)));
+ role->rolename)));
}
}
Size
datumEstimateSpace(Datum value, bool isnull, bool typByVal, int typLen)
{
- Size sz = sizeof(int);
+ Size sz = sizeof(int);
if (!isnull)
{
else if (VARATT_IS_EXTERNAL_EXPANDED(value))
{
ExpandedObjectHeader *eoh = DatumGetEOHP(value);
+
sz += EOH_get_flat_size(eoh);
}
else
char **start_address)
{
ExpandedObjectHeader *eoh = NULL;
- int header;
+ int header;
/* Write header word. */
if (isnull)
Datum
datumRestore(char **start_address, bool *isnull)
{
- int header;
- void *d;
+ int header;
+ void *d;
/* Read header word. */
memcpy(&header, *start_address, sizeof(int));
{
Numeric x;
Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(10)));
+ Int32GetDatum(10)));
Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(-Num.multi)));
+ Int32GetDatum(-Num.multi)));
x = DatumGetNumeric(DirectFunctionCall2(numeric_power,
NumericGetDatum(a),
{
double low;
double high;
-} Range;
+} Range;
typedef struct
{
Range left;
Range right;
-} RangeBox;
+} RangeBox;
typedef struct
{
RangeBox range_box_x;
RangeBox range_box_y;
-} RectBox;
+} RectBox;
/*
* Calculate the quadrant
static RectBox *
initRectBox(void)
{
- RectBox *rect_box = (RectBox *) palloc(sizeof(RectBox));
+ RectBox *rect_box = (RectBox *) palloc(sizeof(RectBox));
double infinity = get_float8_infinity();
rect_box->range_box_x.left.low = -infinity;
static RectBox *
nextRectBox(RectBox *rect_box, RangeBox *centroid, uint8 quadrant)
{
- RectBox *next_rect_box = (RectBox *) palloc(sizeof(RectBox));
+ RectBox *next_rect_box = (RectBox *) palloc(sizeof(RectBox));
memcpy(next_rect_box, rect_box, sizeof(RectBox));
overlap2D(RangeBox *range_box, Range *query)
{
return FPge(range_box->right.high, query->low) &&
- FPle(range_box->left.low, query->high);
+ FPle(range_box->left.low, query->high);
}
/* Can any rectangle from rect_box overlap with this argument? */
overlap4D(RectBox *rect_box, RangeBox *query)
{
return overlap2D(&rect_box->range_box_x, &query->left) &&
- overlap2D(&rect_box->range_box_y, &query->right);
+ overlap2D(&rect_box->range_box_y, &query->right);
}
/* Can any range from range_box contain this argument? */
contain2D(RangeBox *range_box, Range *query)
{
return FPge(range_box->right.high, query->high) &&
- FPle(range_box->left.low, query->low);
+ FPle(range_box->left.low, query->low);
}
/* Can any rectangle from rect_box contain this argument? */
static bool
-contain4D(RectBox *rect_box, RangeBox * query)
+contain4D(RectBox *rect_box, RangeBox *query)
{
return contain2D(&rect_box->range_box_x, &query->left) &&
- contain2D(&rect_box->range_box_y, &query->right);
+ contain2D(&rect_box->range_box_y, &query->right);
}
/* Can any range from range_box be contained by this argument? */
contained2D(RangeBox *range_box, Range *query)
{
return FPle(range_box->left.low, query->high) &&
- FPge(range_box->left.high, query->low) &&
- FPle(range_box->right.low, query->high) &&
- FPge(range_box->right.high, query->low);
+ FPge(range_box->left.high, query->low) &&
+ FPle(range_box->right.low, query->high) &&
+ FPge(range_box->right.high, query->low);
}
/* Can any rectangle from rect_box be contained by this argument? */
contained4D(RectBox *rect_box, RangeBox *query)
{
return contained2D(&rect_box->range_box_x, &query->left) &&
- contained2D(&rect_box->range_box_y, &query->right);
+ contained2D(&rect_box->range_box_y, &query->right);
}
/* Can any range from range_box to be lower than this argument? */
lower2D(RangeBox *range_box, Range *query)
{
return FPlt(range_box->left.low, query->low) &&
- FPlt(range_box->right.low, query->low);
+ FPlt(range_box->right.low, query->low);
}
/* Can any range from range_box to be higher than this argument? */
higher2D(RangeBox *range_box, Range *query)
{
return FPgt(range_box->left.high, query->high) &&
- FPgt(range_box->right.high, query->high);
+ FPgt(range_box->right.high, query->high);
}
/* Can any rectangle from rect_box be left of this argument? */
Datum
spg_box_quad_picksplit(PG_FUNCTION_ARGS)
{
- spgPickSplitIn *in = (spgPickSplitIn *) PG_GETARG_POINTER(0);
- spgPickSplitOut *out = (spgPickSplitOut *) PG_GETARG_POINTER(1);
+ spgPickSplitIn *in = (spgPickSplitIn *) PG_GETARG_POINTER(0);
+ spgPickSplitOut *out = (spgPickSplitOut *) PG_GETARG_POINTER(1);
BOX *centroid;
int median,
i;
/* Calculate median of all 4D coordinates */
for (i = 0; i < in->nTuples; i++)
{
- BOX *box = DatumGetBoxP(in->datums[i]);
+ BOX *box = DatumGetBoxP(in->datums[i]);
lowXs[i] = box->low.x;
highXs[i] = box->high.x;
out->leafTupleDatums = palloc(sizeof(Datum) * in->nTuples);
/*
- * Assign ranges to corresponding nodes according to quadrants
- * relative to the "centroid" range
+ * Assign ranges to corresponding nodes according to quadrants relative to
+ * the "centroid" range
*/
for (i = 0; i < in->nTuples; i++)
{
- BOX *box = DatumGetBoxP(in->datums[i]);
- uint8 quadrant = getQuadrant(centroid, box);
+ BOX *box = DatumGetBoxP(in->datums[i]);
+ uint8 quadrant = getQuadrant(centroid, box);
out->leafTupleDatums[i] = BoxPGetDatum(box);
out->mapTuplesToNodes[i] = quadrant;
{
spgInnerConsistentIn *in = (spgInnerConsistentIn *) PG_GETARG_POINTER(0);
spgInnerConsistentOut *out = (spgInnerConsistentOut *) PG_GETARG_POINTER(1);
- int i;
- MemoryContext old_ctx;
- RectBox *rect_box;
- uint8 quadrant;
- RangeBox *centroid,
- **queries;
+ int i;
+ MemoryContext old_ctx;
+ RectBox *rect_box;
+ uint8 quadrant;
+ RangeBox *centroid,
+ **queries;
if (in->allTheSame)
{
}
/*
- * We are saving the traversal value or initialize it an unbounded
- * one, if we have just begun to walk the tree.
+ * We are saving the traversal value or initialize it an unbounded one, if
+ * we have just begun to walk the tree.
*/
if (in->traversalValue)
rect_box = in->traversalValue;
rect_box = initRectBox();
/*
- * We are casting the prefix and queries to RangeBoxes for ease of
- * the following operations.
+ * We are casting the prefix and queries to RangeBoxes for ease of the
+ * following operations.
*/
centroid = getRangeBox(DatumGetBoxP(in->prefixDatum));
queries = (RangeBox **) palloc(in->nkeys * sizeof(RangeBox *));
out->traversalValues = (void **) palloc(sizeof(void *) * in->nNodes);
/*
- * We switch memory context, because we want to allocate memory for
- * new traversal values (next_rect_box) and pass these pieces of
- * memory to further call of this function.
+ * We switch memory context, because we want to allocate memory for new
+ * traversal values (next_rect_box) and pass these pieces of memory to
+ * further call of this function.
*/
old_ctx = MemoryContextSwitchTo(in->traversalMemoryContext);
for (quadrant = 0; quadrant < in->nNodes; quadrant++)
{
- RectBox *next_rect_box = nextRectBox(rect_box, centroid, quadrant);
+ RectBox *next_rect_box = nextRectBox(rect_box, centroid, quadrant);
bool flag = true;
for (i = 0; i < in->nkeys; i++)
else
{
/*
- * If this node is not selected, we don't need to keep
- * the next traversal value in the memory context.
+ * If this node is not selected, we don't need to keep the next
+ * traversal value in the memory context.
*/
pfree(next_rect_box);
}
case jbvBool:
if (aScalar->val.boolean == bScalar->val.boolean)
return 0;
- else if (aScalar->val.boolean >bScalar->val.boolean)
+ else if (aScalar->val.boolean > bScalar->val.boolean)
return 1;
else
return -1;
/* Handle negative subscript */
if (element < 0)
{
- uint32 nelements = JB_ROOT_COUNT(jb);
+ uint32 nelements = JB_ROOT_COUNT(jb);
if (-element > nelements)
PG_RETURN_NULL();
/* Handle negative subscript */
if (element < 0)
{
- uint32 nelements = JB_ROOT_COUNT(jb);
+ uint32 nelements = JB_ROOT_COUNT(jb);
if (-element > nelements)
PG_RETURN_NULL();
_state->path_indexes[lex_level] != INT_MIN)
{
/* Negative subscript -- convert to positive-wise subscript */
- int nelements = json_count_array_elements(_state->lex);
+ int nelements = json_count_array_elements(_state->lex);
if (-_state->path_indexes[lex_level] <= nelements)
_state->path_indexes[lex_level] += nelements;
{
/*
* Special case: we should match the entire array. We only need this
- * at the outermost level because at nested levels the match will
- * have been started by the outer field or array element callback.
+ * at the outermost level because at nested levels the match will have
+ * been started by the outer field or array element callback.
*/
_state->result_start = _state->lex->token_start;
}
*it2;
/*
- * If one of the jsonb is empty, just return the other if it's not
- * scalar and both are of the same kind. If it's a scalar or they are
- * of different kinds we need to perform the concatenation even if one is
+ * If one of the jsonb is empty, just return the other if it's not scalar
+ * and both are of the same kind. If it's a scalar or they are of
+ * different kinds we need to perform the concatenation even if one is
* empty.
*/
if (JB_ROOT_IS_OBJECT(jb1) == JB_ROOT_IS_OBJECT(jb2))
it = JsonbIteratorInit(&in->root);
r = JsonbIteratorNext(&it, &v, false);
- Assert (r == WJB_BEGIN_ARRAY);
+ Assert(r == WJB_BEGIN_ARRAY);
n = v.val.array.nElems;
if (idx < 0)
if (level == path_len - 1)
{
/*
- * called from jsonb_insert(), it forbids redefining
- * an existsing value
+ * called from jsonb_insert(), it forbids redefining an
+ * existsing value
*/
if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_INSERT_AFTER))
ereport(ERROR,
errhint("Try using the function jsonb_set "
"to replace key value.")));
- r = JsonbIteratorNext(it, &v, true); /* skip value */
+ r = JsonbIteratorNext(it, &v, true); /* skip value */
if (!(op_type & JB_PATH_DELETE))
{
(void) pushJsonbValue(st, WJB_KEY, &k);
/*
* We should keep current value only in case of
- * JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER
- * because otherwise it should be deleted or replaced
+ * JB_PATH_INSERT_BEFORE or JB_PATH_INSERT_AFTER because
+ * otherwise it should be deleted or replaced
*/
if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_INSERT_BEFORE))
(void) pushJsonbValue(st, r, &v);
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("string is not a valid identifier: \"%s\"",
text_to_cstring(qualname)),
- errdetail("No valid identifier before \".\".")));
+ errdetail("No valid identifier before \".\".")));
else if (after_dot)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("string is not a valid identifier: \"%s\"",
text_to_cstring(qualname)),
- errdetail("No valid identifier after \".\".")));
+ errdetail("No valid identifier after \".\".")));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
Datum
numeric_combine(PG_FUNCTION_ARGS)
{
- NumericAggState *state1;
- NumericAggState *state2;
- MemoryContext agg_context;
- MemoryContext old_context;
+ NumericAggState *state1;
+ NumericAggState *state2;
+ MemoryContext agg_context;
+ MemoryContext old_context;
if (!AggCheckCallContext(fcinfo, &agg_context))
elog(ERROR, "aggregate function called in non-aggregate context");
state1->NaNcount += state2->NaNcount;
/*
- * These are currently only needed for moving aggregates, but let's
- * do the right thing anyway...
+ * These are currently only needed for moving aggregates, but let's do
+ * the right thing anyway...
*/
if (state2->maxScale > state1->maxScale)
{
Datum
numeric_avg_combine(PG_FUNCTION_ARGS)
{
- NumericAggState *state1;
- NumericAggState *state2;
- MemoryContext agg_context;
- MemoryContext old_context;
+ NumericAggState *state1;
+ NumericAggState *state2;
+ MemoryContext agg_context;
+ MemoryContext old_context;
if (!AggCheckCallContext(fcinfo, &agg_context))
elog(ERROR, "aggregate function called in non-aggregate context");
state1->NaNcount += state2->NaNcount;
/*
- * These are currently only needed for moving aggregates, but let's
- * do the right thing anyway...
+ * These are currently only needed for moving aggregates, but let's do
+ * the right thing anyway...
*/
if (state2->maxScale > state1->maxScale)
{
Datum
numeric_avg_serialize(PG_FUNCTION_ARGS)
{
- NumericAggState *state;
- StringInfoData buf;
- Datum temp;
- bytea *sumX;
- bytea *result;
+ NumericAggState *state;
+ StringInfoData buf;
+ Datum temp;
+ bytea *sumX;
+ bytea *result;
/* Ensure we disallow calling when not in aggregate context */
if (!AggCheckCallContext(fcinfo, NULL))
pq_sendbytes(&buf, VARDATA(sumX), VARSIZE(sumX) - VARHDRSZ);
/* maxScale */
- pq_sendint(&buf, state->maxScale, 4);
+ pq_sendint(&buf, state->maxScale, 4);
/* maxScaleCount */
pq_sendint64(&buf, state->maxScaleCount);
/*
* numeric_avg_deserialize
- * Deserialize bytea into NumericAggState for numeric aggregates that
+ * Deserialize bytea into NumericAggState for numeric aggregates that
* don't require sumX2. Deserializes bytea into NumericAggState using the
* standard pq API.
*
Datum
numeric_avg_deserialize(PG_FUNCTION_ARGS)
{
- bytea *sstate = PG_GETARG_BYTEA_P(0);
- NumericAggState *result;
- Datum temp;
- StringInfoData buf;
+ bytea *sstate = PG_GETARG_BYTEA_P(0);
+ NumericAggState *result;
+ Datum temp;
+ StringInfoData buf;
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
Datum
numeric_serialize(PG_FUNCTION_ARGS)
{
- NumericAggState *state;
- StringInfoData buf;
- Datum temp;
- bytea *sumX;
- bytea *sumX2;
- bytea *result;
+ NumericAggState *state;
+ StringInfoData buf;
+ Datum temp;
+ bytea *sumX;
+ bytea *sumX2;
+ bytea *result;
/* Ensure we disallow calling when not in aggregate context */
if (!AggCheckCallContext(fcinfo, NULL))
pq_sendbytes(&buf, VARDATA(sumX2), VARSIZE(sumX2) - VARHDRSZ);
/* maxScale */
- pq_sendint(&buf, state->maxScale, 4);
+ pq_sendint(&buf, state->maxScale, 4);
/* maxScaleCount */
pq_sendint64(&buf, state->maxScaleCount);
Datum
numeric_deserialize(PG_FUNCTION_ARGS)
{
- bytea *sstate = PG_GETARG_BYTEA_P(0);
- NumericAggState *result;
- Datum temp;
- StringInfoData buf;
+ bytea *sstate = PG_GETARG_BYTEA_P(0);
+ NumericAggState *result;
+ Datum temp;
+ StringInfoData buf;
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
{
PolyNumAggState *state1;
PolyNumAggState *state2;
- MemoryContext agg_context;
- MemoryContext old_context;
+ MemoryContext agg_context;
+ MemoryContext old_context;
if (!AggCheckCallContext(fcinfo, &agg_context))
elog(ERROR, "aggregate function called in non-aggregate context");
Datum
numeric_poly_serialize(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
- StringInfoData buf;
- bytea *sumX;
- bytea *sumX2;
- bytea *result;
+ PolyNumAggState *state;
+ StringInfoData buf;
+ bytea *sumX;
+ bytea *sumX2;
+ bytea *result;
/* Ensure we disallow calling when not in aggregate context */
if (!AggCheckCallContext(fcinfo, NULL))
free_var(&num);
#else
temp = DirectFunctionCall1(numeric_send,
- NumericGetDatum(make_result(&state->sumX)));
+ NumericGetDatum(make_result(&state->sumX)));
sumX = DatumGetByteaP(temp);
temp = DirectFunctionCall1(numeric_send,
- NumericGetDatum(make_result(&state->sumX2)));
+ NumericGetDatum(make_result(&state->sumX2)));
sumX2 = DatumGetByteaP(temp);
#endif
}
Datum
numeric_poly_deserialize(PG_FUNCTION_ARGS)
{
- bytea *sstate = PG_GETARG_BYTEA_P(0);
- PolyNumAggState *result;
- Datum sumX;
- Datum sumX2;
- StringInfoData buf;
+ bytea *sstate = PG_GETARG_BYTEA_P(0);
+ PolyNumAggState *result;
+ Datum sumX;
+ Datum sumX2;
+ StringInfoData buf;
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
/* sumX2 */
sumX2 = DirectFunctionCall3(numeric_recv,
- PointerGetDatum(&buf),
- InvalidOid,
- -1);
+ PointerGetDatum(&buf),
+ InvalidOid,
+ -1);
#ifdef HAVE_INT128
{
- NumericVar num;
+ NumericVar num;
init_var(&num);
set_var_from_num(DatumGetNumeric(sumX), &num);
Datum
int8_avg_combine(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state1;
- PolyNumAggState *state2;
- MemoryContext agg_context;
- MemoryContext old_context;
+ PolyNumAggState *state1;
+ PolyNumAggState *state2;
+ MemoryContext agg_context;
+ MemoryContext old_context;
if (!AggCheckCallContext(fcinfo, &agg_context))
elog(ERROR, "aggregate function called in non-aggregate context");
Datum
int8_avg_serialize(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
- StringInfoData buf;
- bytea *sumX;
- bytea *result;
+ PolyNumAggState *state;
+ StringInfoData buf;
+ bytea *sumX;
+ bytea *result;
/* Ensure we disallow calling when not in aggregate context */
if (!AggCheckCallContext(fcinfo, NULL))
sumX = DatumGetByteaP(temp);
#else
temp = DirectFunctionCall1(numeric_send,
- NumericGetDatum(make_result(&state->sumX)));
+ NumericGetDatum(make_result(&state->sumX)));
sumX = DatumGetByteaP(temp);
#endif
}
Datum
int8_avg_deserialize(PG_FUNCTION_ARGS)
{
- bytea *sstate = PG_GETARG_BYTEA_P(0);
- PolyNumAggState *result;
- StringInfoData buf;
- Datum temp;
+ bytea *sstate = PG_GETARG_BYTEA_P(0);
+ PolyNumAggState *result;
+ StringInfoData buf;
+ Datum temp;
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
#ifdef HAVE_INT128
{
- NumericVar num;
+ NumericVar num;
init_var(&num);
set_var_from_num(DatumGetNumeric(temp), &num);
#include "utils/inet.h"
#include "utils/timestamp.h"
-#define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32 *)&(var))))
+#define UINT32_ACCESS_ONCE(var) ((uint32)(*((volatile uint32 *)&(var))))
/* bogus ... these externs should be in a header file */
extern Datum pg_stat_get_numscans(PG_FUNCTION_ARGS);
int num_backends = pgstat_fetch_stat_numbackends();
int curr_backend;
char *cmd = text_to_cstring(PG_GETARG_TEXT_PP(0));
- ProgressCommandType cmdtype;
+ ProgressCommandType cmdtype;
TupleDesc tupdesc;
Tuplestorestate *tupstore;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
/* 1-based index */
for (curr_backend = 1; curr_backend <= num_backends; curr_backend++)
{
- LocalPgBackendStatus *local_beentry;
- PgBackendStatus *beentry;
+ LocalPgBackendStatus *local_beentry;
+ PgBackendStatus *beentry;
Datum values[PG_STAT_GET_PROGRESS_COLS];
bool nulls[PG_STAT_GET_PROGRESS_COLS];
int i;
if (has_privs_of_role(GetUserId(), beentry->st_userid))
{
values[2] = ObjectIdGetDatum(beentry->st_progress_command_target);
- for(i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
- values[i+3] = Int64GetDatum(beentry->st_progress_param[i]);
+ for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
+ values[i + 3] = Int64GetDatum(beentry->st_progress_param[i]);
}
else
{
nulls[2] = true;
for (i = 0; i < PGSTAT_NUM_PROGRESS_PARAM; i++)
- nulls[i+3] = true;
+ nulls[i + 3] = true;
}
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
proc = BackendPidGetProc(beentry->st_procpid);
if (proc != NULL)
{
- uint32 raw_wait_event;
+ uint32 raw_wait_event;
raw_wait_event = UINT32_ACCESS_ONCE(proc->wait_event_info);
wait_event_type = pgstat_get_wait_event_type(raw_wait_event);
/* Save previous prefix if needed */
if (needPrevious)
{
- Datum previousCentroid;
+ Datum previousCentroid;
- /* We know, that in->prefixDatum in this place is varlena,
+ /*
+ * We know, that in->prefixDatum in this place is varlena,
* because it's range
*/
previousCentroid = datumCopy(in->prefixDatum, false, -1);
- out->traversalValues[out->nNodes] = (void *)previousCentroid;
+ out->traversalValues[out->nNodes] = (void *) previousCentroid;
}
out->nodeNumbers[out->nNodes] = i - 1;
out->nNodes++;
int j;
/*
- * if any val requiring a weight is used or caller
- * needs position information then set recheck flag
+ * if any val requiring a weight is used or caller needs position
+ * information then set recheck flag
*/
if (val->weight != 0 || data != NULL)
*gcv->need_recheck = true;
return !result;
case OP_PHRASE:
+
/*
- * GIN doesn't contain any information about positions,
- * treat OP_PHRASE as OP_AND with recheck requirement
+ * GIN doesn't contain any information about positions, treat
+ * OP_PHRASE as OP_AND with recheck requirement
*/
*gcv->need_recheck = true;
/* FALL THRU */
#include "utils/pg_crc.h"
/* FTS operator priorities, see ts_type.h */
-const int tsearch_op_priority[OP_COUNT] =
+const int tsearch_op_priority[OP_COUNT] =
{
- 3, /* OP_NOT */
- 2, /* OP_AND */
- 1, /* OP_OR */
- 4 /* OP_PHRASE */
+ 3, /* OP_NOT */
+ 2, /* OP_AND */
+ 1, /* OP_OR */
+ 4 /* OP_PHRASE */
};
struct TSQueryParserStateData
PHRASE_CLOSE,
PHRASE_ERR,
PHRASE_FINISH
- } state = PHRASE_OPEN;
+ } state = PHRASE_OPEN;
- char *ptr = buf;
- char *endptr;
- long l = 1;
+ char *ptr = buf;
+ char *endptr;
+ long l = 1;
while (*ptr)
{
- switch(state)
+ switch (state)
{
case PHRASE_OPEN:
Assert(t_iseq(ptr, '<'));
}
}
- err:
+err:
*distance = -1;
return buf;
}
PushFunction pushval,
Datum opaque)
{
- int8 operator = 0;
- ts_tokentype type;
- int lenval = 0;
- char *strval = NULL;
+ int8 operator = 0;
+ ts_tokentype type;
+ int lenval = 0;
+ char *strval = NULL;
struct
{
- int8 op;
- int16 distance;
- } opstack[STACKDEPTH];
- int lenstack = 0;
- int16 weight = 0;
- bool prefix;
+ int8 op;
+ int16 distance;
+ } opstack[STACKDEPTH];
+ int lenstack = 0;
+ int16 weight = 0;
+ bool prefix;
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
}
else if (ptr[*pos].type == QI_VALSTOP)
{
- *needcleanup = true; /* we'll have to remove stop words */
+ *needcleanup = true; /* we'll have to remove stop words */
(*pos)++;
}
else
if (ptr[*pos].qoperator.oper == OP_NOT)
{
- ptr[*pos].qoperator.left = 1; /* fixed offset */
+ ptr[*pos].qoperator.left = 1; /* fixed offset */
(*pos)++;
/* process the only argument */
}
else
{
- QueryOperator *curitem = &ptr[*pos].qoperator;
- int tmp = *pos; /* save current position */
+ QueryOperator *curitem = &ptr[*pos].qoperator;
+ int tmp = *pos; /* save current position */
Assert(curitem->oper == OP_AND ||
curitem->oper == OP_OR ||
curitem->oper == OP_PHRASE);
if (curitem->oper == OP_PHRASE)
- *needcleanup = true; /* push OP_PHRASE down later */
+ *needcleanup = true; /* push OP_PHRASE down later */
(*pos)++;
i = 0;
foreach(cell, state.polstr)
{
- QueryItem *item = (QueryItem *) lfirst(cell);
+ QueryItem *item = (QueryItem *) lfirst(cell);
switch (item->type)
{
findoprnd(ptr, query->size, &needcleanup);
/*
- * QI_VALSTOP nodes should be cleaned and
- * and OP_PHRASE should be pushed down
+ * QI_VALSTOP nodes should be cleaned and and OP_PHRASE should be pushed
+ * down
*/
if (needcleanup)
return cleanup_fakeval_and_phrase(query);
}
else if (in->curpol->qoperator.oper == OP_NOT)
{
- int priority = PRINT_PRIORITY(in->curpol);
+ int priority = PRINT_PRIORITY(in->curpol);
if (priority < parentPriority)
{
in->curpol++;
if (priority < parentPriority ||
(op == OP_PHRASE &&
- (priority == parentPriority || /* phrases are not commutative! */
- parentPriority == OP_PRIORITY(OP_AND))))
+ (priority == parentPriority || /* phrases are not
+ * commutative! */
+ parentPriority == OP_PRIORITY(OP_AND))))
{
needParenthesis = true;
RESIZEBUF(in, 2);
infix(in, priority);
/* print operator & right operand */
- RESIZEBUF(in, 3 + (2 + 10 /* distance */) + (nrm.cur - nrm.buf));
+ RESIZEBUF(in, 3 + (2 + 10 /* distance */ ) + (nrm.cur - nrm.buf));
switch (op)
{
case OP_OR:
nrm.cur = nrm.buf = (char *) palloc(sizeof(char) * nrm.buflen);
*(nrm.cur) = '\0';
nrm.op = GETOPERAND(query);
- infix(&nrm, -1 /* lowest priority */);
+ infix(&nrm, -1 /* lowest priority */ );
PG_FREE_IF_COPY(query, 0);
PG_RETURN_CSTRING(nrm.buf);
Datum
tsqueryrecv(PG_FUNCTION_ARGS)
{
- StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- TSQuery query;
- int i,
- len;
- QueryItem *item;
- int datalen;
- char *ptr;
- uint32 size;
- const char **operands;
- bool needcleanup;
+ StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+ TSQuery query;
+ int i,
+ len;
+ QueryItem *item;
+ int datalen;
+ char *ptr;
+ uint32 size;
+ const char **operands;
+ bool needcleanup;
size = pq_getmsgint(buf, sizeof(uint32));
if (size > (MaxAllocSize / sizeof(QueryItem)))
static NODE *
clean_fakeval_intree(NODE *node, char *result, int *adddistance)
{
- char lresult = V_UNKNOWN,
- rresult = V_UNKNOWN;
+ char lresult = V_UNKNOWN,
+ rresult = V_UNKNOWN;
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
}
else
{
- NODE *res = node;
- int ndistance, ldistance = 0, rdistance = 0;
+ NODE *res = node;
+ int ndistance,
+ ldistance = 0,
+ rdistance = 0;
ndistance = (node->valnode->qoperator.oper == OP_PHRASE) ?
- node->valnode->qoperator.distance :
- 0;
+ node->valnode->qoperator.distance :
+ 0;
- node->left = clean_fakeval_intree(node->left,
- &lresult,
- ndistance ? &ldistance : NULL);
+ node->left = clean_fakeval_intree(node->left,
+ &lresult,
+ ndistance ? &ldistance : NULL);
node->right = clean_fakeval_intree(node->right,
&rresult,
ndistance ? &rdistance : NULL);
/*
- * ndistance, ldistance and rdistance are greater than zero
- * if their corresponding nodes are OP_PHRASE
+ * ndistance, ldistance and rdistance are greater than zero if their
+ * corresponding nodes are OP_PHRASE
*/
if (lresult == V_STOP && rresult == V_STOP)
else if (lresult == V_STOP)
{
res = node->right;
+
/*
- * propagate distance from current node to the
- * right upper subtree.
+ * propagate distance from current node to the right upper
+ * subtree.
*/
if (adddistance && ndistance)
*adddistance = rdistance;
else if (rresult == V_STOP)
{
res = node->left;
+
/*
* propagate distance from current node to the upper tree.
*/
static NODE *
copyNODE(NODE *node)
{
- NODE *cnode = palloc(sizeof(NODE));
+ NODE *cnode = palloc(sizeof(NODE));
/* since this function recurses, it could be driven to stack overflow. */
check_stack_depth();
static NODE *
makeNODE(int8 op, NODE *left, NODE *right)
{
- NODE *node = palloc(sizeof(NODE));
+ NODE *node = palloc(sizeof(NODE));
/* zeroing allocation to prevent difference in unused bytes */
node->valnode = palloc0(sizeof(QueryItem));
* <-> (<n>) operation since it's needed solely for the phrase operator.
*
* Rules:
- * a <-> (b | c) => (a <-> b) | (a <-> c)
- * (a | b) <-> c => (a <-> c) | (b <-> c)
- * a <-> !b => a & !(a <-> b)
- * !a <-> b => b & !(a <-> b)
+ * a <-> (b | c) => (a <-> b) | (a <-> c)
+ * (a | b) <-> c => (a <-> c) | (b <-> c)
+ * a <-> !b => a & !(a <-> b)
+ * !a <-> b => b & !(a <-> b)
*
* Warnings for readers:
* a <-> b != b <-> a
*
- * a <n> (b <n> c) != (a <n> b) <n> c since the phrase lengths are:
+ * a <n> (b <n> c) != (a <n> b) <n> c since the phrase lengths are:
* n 2n-1
*/
static NODE *
{
/* eliminate NOT sequence */
while (node->valnode->type == QI_OPR &&
- node->valnode->qoperator.oper == node->right->valnode->qoperator.oper)
+ node->valnode->qoperator.oper == node->right->valnode->qoperator.oper)
{
node = node->right->right;
}
}
else if (node->valnode->qoperator.oper == OP_PHRASE)
{
- int16 distance;
- NODE *X;
+ int16 distance;
+ NODE *X;
node->left = normalize_phrase_tree(node->left);
node->right = normalize_phrase_tree(node->right);
if (NODE_PRIORITY(node) <= NODE_PRIORITY(node->right) &&
NODE_PRIORITY(node) <= NODE_PRIORITY(node->left))
- return node;
+ return node;
/*
- * We can't swap left-right and works only with left child
- * because of a <-> b != b <-> a
+ * We can't swap left-right and works only with left child because of
+ * a <-> b != b <-> a
*/
distance = node->valnode->qoperator.distance;
/* no-op */
break;
default:
- elog(ERROR,"Wrong type of tsquery node: %d",
- node->right->valnode->qoperator.oper);
+ elog(ERROR, "Wrong type of tsquery node: %d",
+ node->right->valnode->qoperator.oper);
}
}
* if the node is still OP_PHRASE, check the left subtree,
* otherwise the whole node will be transformed later.
*/
- switch(node->left->valnode->qoperator.oper)
+ switch (node->left->valnode->qoperator.oper)
{
case OP_AND:
- /* (a & b) <-> c => (a <-> c) & (b <-> c) */
+ /* (a & b) <-> c => (a <-> c) & (b <-> c) */
node = makeNODE(OP_AND,
makeNODE(OP_PHRASE,
node->left->left,
/* no-op */
break;
default:
- elog(ERROR,"Wrong type of tsquery node: %d",
- node->left->valnode->qoperator.oper);
+ elog(ERROR, "Wrong type of tsquery node: %d",
+ node->left->valnode->qoperator.oper);
}
}
/* continue transformation */
node = normalize_phrase_tree(node);
}
- else /* AND or OR */
+ else /* AND or OR */
{
node->left = normalize_phrase_tree(node->left);
node->right = normalize_phrase_tree(node->right);
static int32
calcstrlen(NODE *node)
{
- int32 size = 0;
+ int32 size = 0;
if (node->valnode->type == QI_VAL)
{
tsquery_phrase(PG_FUNCTION_ARGS)
{
PG_RETURN_POINTER(DirectFunctionCall3(
- tsquery_phrase_distance,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1),
- Int32GetDatum(1)));
+ tsquery_phrase_distance,
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1),
+ Int32GetDatum(1)));
}
Datum
/* XXX: What about NOT? */
res = (item->type == QI_OPR && (item->qoperator.oper == OP_AND ||
item->qoperator.oper == OP_PHRASE)) ?
- calc_rank_and(w, t, q) :
- calc_rank_or(w, t, q);
+ calc_rank_and(w, t, q) :
+ calc_rank_or(w, t, q);
if (res < 0)
res = 1e-20f;
typedef struct
{
- union {
- struct { /* compiled doc representation */
+ union
+ {
+ struct
+ { /* compiled doc representation */
QueryItem **items;
int16 nitem;
- } query;
- struct { /* struct is used for preparing doc representation */
+ } query;
+ struct
+ { /* struct is used for preparing doc
+ * representation */
QueryItem *item;
WordEntry *entry;
- } map;
- } data;
- WordEntryPos pos;
+ } map;
+ } data;
+ WordEntryPos pos;
} DocRepresentation;
static int
#define MAXQROPOS MAXENTRYPOS
typedef struct
{
- bool operandexists;
- bool reverseinsert; /* indicates insert order,
- true means descending order */
- uint32 npos;
- WordEntryPos pos[MAXQROPOS];
+ bool operandexists;
+ bool reverseinsert; /* indicates insert order, true means
+ * descending order */
+ uint32 npos;
+ WordEntryPos pos[MAXQROPOS];
} QueryRepresentationOperand;
typedef struct
{
- TSQuery query;
+ TSQuery query;
QueryRepresentationOperand *operandData;
} QueryRepresentation;
static bool
checkcondition_QueryOperand(void *checkval, QueryOperand *val, ExecPhraseData *data)
{
- QueryRepresentation *qr = (QueryRepresentation *) checkval;
- QueryRepresentationOperand *opData = QR_GET_OPERAND_DATA(qr, val);
+ QueryRepresentation *qr = (QueryRepresentation *) checkval;
+ QueryRepresentationOperand *opData = QR_GET_OPERAND_DATA(qr, val);
if (!opData->operandexists)
return false;
static void
resetQueryRepresentation(QueryRepresentation *qr, bool reverseinsert)
{
- int i;
+ int i;
- for(i = 0; i < qr->query->size; i++)
+ for (i = 0; i < qr->query->size; i++)
{
qr->operandData[i].operandexists = false;
qr->operandData[i].reverseinsert = reverseinsert;
static void
fillQueryRepresentationData(QueryRepresentation *qr, DocRepresentation *entry)
{
- int i;
- int lastPos;
+ int i;
+ int lastPos;
QueryRepresentationOperand *opData;
for (i = 0; i < entry->data.query.nitem; i++)
}
lastPos = opData->reverseinsert ?
- (MAXQROPOS - opData->npos) :
- (opData->npos - 1);
+ (MAXQROPOS - opData->npos) :
+ (opData->npos - 1);
if (WEP_GETPOS(opData->pos[lastPos]) != WEP_GETPOS(entry->pos))
{
lastPos = opData->reverseinsert ?
- (MAXQROPOS - 1 - opData->npos) :
- (opData->npos);
+ (MAXQROPOS - 1 - opData->npos) :
+ (opData->npos);
opData->pos[lastPos] = entry->pos;
opData->npos++;
static bool
Cover(DocRepresentation *doc, int len, QueryRepresentation *qr, CoverExt *ext)
{
- DocRepresentation *ptr;
- int lastpos = ext->pos;
- bool found = false;
+ DocRepresentation *ptr;
+ int lastpos = ext->pos;
+ bool found = false;
/*
* since this function recurses, it could be driven to stack overflow.
WordEntry *entry,
*firstentry;
WordEntryPos *post;
- int32 dimt, /* number of 'post' items */
+ int32 dimt, /* number of 'post' items */
j,
i,
nitem;
doc = (DocRepresentation *) palloc(sizeof(DocRepresentation) * len);
/*
- * Iterate through query to make DocRepresentaion for words and it's entries
- * satisfied by query
+ * Iterate through query to make DocRepresentaion for words and it's
+ * entries satisfied by query
*/
for (i = 0; i < qr->query->size; i++)
{
if (cur > 0)
{
- DocRepresentation *rptr = doc + 1,
- *wptr = doc,
- storage;
+ DocRepresentation *rptr = doc + 1,
+ *wptr = doc,
+ storage;
/*
* Sort representation in ascending order by pos and entry
while (rptr - doc < cur)
{
- if (rptr->pos == (rptr-1)->pos &&
- rptr->data.map.entry == (rptr-1)->data.map.entry)
+ if (rptr->pos == (rptr - 1)->pos &&
+ rptr->data.map.entry == (rptr - 1)->data.map.entry)
{
storage.data.query.items[storage.data.query.nitem] = rptr->data.map.item;
storage.data.query.nitem++;
qr.query = query;
qr.operandData = (QueryRepresentationOperand *)
- palloc0(sizeof(QueryRepresentationOperand) * query->size);
+ palloc0(sizeof(QueryRepresentationOperand) * query->size);
doc = get_docrep(txt, &qr, &doclen);
if (!doc)
#define STATHDRSIZE (offsetof(TSVectorStat, data))
static Datum tsvector_update_trigger(PG_FUNCTION_ARGS, bool config_column);
-static int tsvector_bsearch(const TSVector tsv, char *lexeme, int lexeme_len);
+static int tsvector_bsearch(const TSVector tsv, char *lexeme, int lexeme_len);
/*
* Order: haspos, len, word, for all positions (pos, weight)
switch (char_weight)
{
- case 'A': case 'a':
+ case 'A':
+ case 'a':
weight = 3;
break;
- case 'B': case 'b':
+ case 'B':
+ case 'b':
weight = 2;
break;
- case 'C': case 'c':
+ case 'C':
+ case 'c':
weight = 1;
break;
- case 'D': case 'd':
+ case 'D':
+ case 'd':
weight = 0;
break;
default:
&dlexemes, &nulls, &nlexemes);
/*
- * Assuming that lexemes array is significantly shorter than tsvector
- * we can iterate through lexemes performing binary search
- * of each lexeme from lexemes in tsvector.
+ * Assuming that lexemes array is significantly shorter than tsvector we
+ * can iterate through lexemes performing binary search of each lexeme
+ * from lexemes in tsvector.
*/
for (i = 0; i < nlexemes; i++)
{
- char *lex;
- int lex_len,
- lex_pos;
+ char *lex;
+ int lex_len,
+ lex_pos;
if (nulls[i])
ereport(ERROR,
if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0)
{
WordEntryPos *p = POSDATAPTR(tsout, entry + lex_pos);
+
while (j--)
{
WEP_SETWEIGHT(*p, weight);
while (StopLow < StopHigh)
{
- StopMiddle = (StopLow + StopHigh)/2;
+ StopMiddle = (StopLow + StopHigh) / 2;
cmp = tsCompareString(lexeme, lexeme_len,
- STRPTR(tsv) + arrin[StopMiddle].pos,
- arrin[StopMiddle].len,
- false);
+ STRPTR(tsv) + arrin[StopMiddle].pos,
+ arrin[StopMiddle].len,
+ false);
if (cmp < 0)
StopHigh = StopMiddle;
else if (cmp > 0)
StopLow = StopMiddle + 1;
- else /* found it */
+ else /* found it */
return StopMiddle;
}
*arrout;
char *data = STRPTR(tsv),
*dataout;
- int i, j, k,
+ int i,
+ j,
+ k,
curoff;
/*
* Here we overestimates tsout size, since we don't know exact size
- * occupied by positions and weights. We will set exact size later
- * after a pass through TSVector.
+ * occupied by positions and weights. We will set exact size later after a
+ * pass through TSVector.
*/
tsout = (TSVector) palloc0(VARSIZE(tsv));
arrout = ARRPTR(tsout);
{
/*
* Here we should check whether current i is present in
- * indices_to_delete or not. Since indices_to_delete is already
- * sorted we can advance it index only when we have match.
+ * indices_to_delete or not. Since indices_to_delete is already sorted
+ * we can advance it index only when we have match.
*/
- if (k < indices_count && i == indices_to_delete[k]){
+ if (k < indices_count && i == indices_to_delete[k])
+ {
k++;
continue;
}
curoff += arrin[i].len;
if (arrin[i].haspos)
{
- int len = POSDATALEN(tsv, arrin+i) * sizeof(WordEntryPos) +
- sizeof(uint16);
+ int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos) +
+ sizeof(uint16);
+
curoff = SHORTALIGN(curoff);
memcpy(dataout + curoff,
STRPTR(tsv) + SHORTALIGN(arrin[i].pos + arrin[i].len),
}
/*
- * After the pass through TSVector k should equals exactly to indices_count.
- * If it isn't then the caller provided us with indices outside of
- * [0, tsv->size) range and estimation of tsout's size is wrong.
+ * After the pass through TSVector k should equals exactly to
+ * indices_count. If it isn't then the caller provided us with indices
+ * outside of [0, tsv->size) range and estimation of tsout's size is
+ * wrong.
*/
Assert(k == indices_count);
TSVector tsin = PG_GETARG_TSVECTOR(0),
tsout;
ArrayType *lexemes = PG_GETARG_ARRAYTYPE_P(1);
- int i, nlex,
+ int i,
+ nlex,
skip_count,
*skip_indices;
Datum *dlexemes;
&dlexemes, &nulls, &nlex);
/*
- * In typical use case array of lexemes to delete is relatively small.
- * So here we optimizing things for that scenario: iterate through lexarr
+ * In typical use case array of lexemes to delete is relatively small. So
+ * here we optimizing things for that scenario: iterate through lexarr
* performing binary search of each lexeme from lexarr in tsvector.
*/
skip_indices = palloc0(nlex * sizeof(int));
for (i = skip_count = 0; i < nlex; i++)
{
- char *lex;
- int lex_len,
- lex_pos;
+ char *lex;
+ int lex_len,
+ lex_pos;
if (nulls[i])
ereport(ERROR,
/*
* Expand tsvector as table with following columns:
- * lexeme: lexeme text
- * positions: integer array of lexeme positions
- * weights: char array of weights corresponding to positions
+ * lexeme: lexeme text
+ * positions: integer array of lexeme positions
+ * weights: char array of weights corresponding to positions
*/
Datum
tsvector_unnest(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- TSVector tsin;
+ FuncCallContext *funcctx;
+ TSVector tsin;
if (SRF_IS_FIRSTCALL())
{
Datum values[3];
values[0] = PointerGetDatum(
- cstring_to_text_with_len(data + arrin[i].pos, arrin[i].len)
- );
+ cstring_to_text_with_len(data + arrin[i].pos, arrin[i].len)
+ );
if (arrin[i].haspos)
{
/*
* Internally tsvector stores position and weight in the same
- * uint16 (2 bits for weight, 14 for position). Here we extract that
- * in two separate arrays.
+ * uint16 (2 bits for weight, 14 for position). Here we extract
+ * that in two separate arrays.
*/
posv = _POSVECPTR(tsin, arrin + i);
positions = palloc(posv->npos * sizeof(Datum));
- weights = palloc(posv->npos * sizeof(Datum));
+ weights = palloc(posv->npos * sizeof(Datum));
for (j = 0; j < posv->npos; j++)
{
positions[j] = Int16GetDatum(WEP_GETPOS(posv->pos[j]));
weight = 'D' - WEP_GETWEIGHT(posv->pos[j]);
weights[j] = PointerGetDatum(
- cstring_to_text_with_len(&weight, 1)
- );
+ cstring_to_text_with_len(&weight, 1)
+ );
}
values[1] = PointerGetDatum(
- construct_array(positions, posv->npos, INT2OID, 2, true, 's'));
+ construct_array(positions, posv->npos, INT2OID, 2, true, 's'));
values[2] = PointerGetDatum(
- construct_array(weights, posv->npos, TEXTOID, -1, false, 'i'));
+ construct_array(weights, posv->npos, TEXTOID, -1, false, 'i'));
}
else
{
Datum
tsvector_to_array(PG_FUNCTION_ARGS)
{
- TSVector tsin = PG_GETARG_TSVECTOR(0);
- WordEntry *arrin = ARRPTR(tsin);
- Datum *elements;
- int i;
- ArrayType *array;
+ TSVector tsin = PG_GETARG_TSVECTOR(0);
+ WordEntry *arrin = ARRPTR(tsin);
+ Datum *elements;
+ int i;
+ ArrayType *array;
elements = palloc(tsin->size * sizeof(Datum));
for (i = 0; i < tsin->size; i++)
{
elements[i] = PointerGetDatum(
- cstring_to_text_with_len(STRPTR(tsin) + arrin[i].pos, arrin[i].len)
- );
+ cstring_to_text_with_len(STRPTR(tsin) + arrin[i].pos, arrin[i].len)
+ );
}
array = construct_array(elements, tsin->size, TEXTOID, -1, false, 'i');
for (i = 0; i < nitems; i++)
{
- char *lex = VARDATA(dlexemes[i]);
- int lex_len = VARSIZE_ANY_EXHDR(dlexemes[i]);
+ char *lex = VARDATA(dlexemes[i]);
+ int lex_len = VARSIZE_ANY_EXHDR(dlexemes[i]);
memcpy(cur, lex, lex_len);
arrout[i].haspos = 0;
Datum *dweights;
bool *nulls;
int nweights;
- int i, j;
+ int i,
+ j;
int cur_pos = 0;
char mask = 0;
for (i = 0; i < nweights; i++)
{
- char char_weight;
+ char char_weight;
if (nulls[i])
ereport(ERROR,
char_weight = DatumGetChar(dweights[i]);
switch (char_weight)
{
- case 'A': case 'a':
+ case 'A':
+ case 'a':
mask = mask | 8;
break;
- case 'B': case 'b':
+ case 'B':
+ case 'b':
mask = mask | 4;
break;
- case 'C': case 'c':
+ case 'C':
+ case 'c':
mask = mask | 2;
break;
- case 'D': case 'd':
+ case 'D':
+ case 'd':
mask = mask | 1;
break;
default:
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized weight: \"%c\"", char_weight)));
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("unrecognized weight: \"%c\"", char_weight)));
}
}
for (i = j = 0; i < tsin->size; i++)
{
WordEntryPosVector *posvin,
- *posvout;
- int npos = 0;
- int k;
+ *posvout;
+ int npos = 0;
+ int k;
if (!arrin[i].haspos)
continue;
- posvin = _POSVECPTR(tsin, arrin + i);
+ posvin = _POSVECPTR(tsin, arrin + i);
posvout = (WordEntryPosVector *)
- (dataout + SHORTALIGN(cur_pos + arrin[i].len));
+ (dataout + SHORTALIGN(cur_pos + arrin[i].len));
for (k = 0; k < posvin->npos; k++)
{
memcpy(dataout + cur_pos, datain + arrin[i].pos, arrin[i].len);
posvout->npos = npos;
cur_pos += SHORTALIGN(arrin[i].len);
- cur_pos += POSDATALEN(tsout, arrout+j) * sizeof(WordEntryPos) +
- sizeof(uint16);
+ cur_pos += POSDATALEN(tsout, arrout + j) * sizeof(WordEntryPos) +
+ sizeof(uint16);
j++;
}
checkclass_str(CHKVAL *chkval, WordEntry *entry, QueryOperand *val,
ExecPhraseData *data)
{
- bool result = false;
+ bool result = false;
if (entry->haspos && (val->weight || data))
{
- WordEntryPosVector *posvec;
+ WordEntryPosVector *posvec;
/*
* We can't use the _POSVECPTR macro here because the pointer to the
if (val->weight && data)
{
- WordEntryPos *posvec_iter = posvec->pos;
- WordEntryPos *dptr;
+ WordEntryPos *posvec_iter = posvec->pos;
+ WordEntryPos *dptr;
/*
* Filter position information by weights
}
else if (val->weight)
{
- WordEntryPos *posvec_iter = posvec->pos;
+ WordEntryPos *posvec_iter = posvec->pos;
/* Is there a position with a matching weight? */
while (posvec_iter < posvec->pos + posvec->npos)
if (val->weight & (1 << WEP_GETWEIGHT(*posvec_iter)))
{
result = true;
- break; /* no need to go further */
+ break; /* no need to go further */
}
posvec_iter++;
}
}
- else /* data != NULL */
+ else /* data != NULL */
{
data->npos = posvec->npos;
- data->pos = posvec->pos;
+ data->pos = posvec->pos;
data->allocated = false;
result = true;
}
uniqueLongPos(WordEntryPos *pos, int npos)
{
WordEntryPos *pos_iter,
- *result;
+ *result;
if (npos <= 1)
return npos;
if ((!res || data) && val->prefix)
{
- WordEntryPos *allpos = NULL;
- int npos = 0,
- totalpos = 0;
+ WordEntryPos *allpos = NULL;
+ int npos = 0,
+ totalpos = 0;
+
/*
* there was a failed exact search, so we should scan further to find
* a prefix match. We also need to do so if caller needs position info
}
else
{
- ExecPhraseData Ldata = {0, false, NULL},
- Rdata = {0, false, NULL};
- WordEntryPos *Lpos,
- *Rpos,
- *pos_iter = NULL;
+ ExecPhraseData Ldata = {0, false, NULL},
+ Rdata = {0, false, NULL};
+ WordEntryPos *Lpos,
+ *Rpos,
+ *pos_iter = NULL;
Assert(curitem->qoperator.oper == OP_PHRASE);
return false;
/*
- * if at least one of the operands has no position
- * information, fallback to AND operation.
+ * if at least one of the operands has no position information,
+ * fallback to AND operation.
*/
if (Ldata.npos == 0 || Rdata.npos == 0)
return true;
/*
- * Result of the operation is a list of the
- * corresponding positions of RIGHT operand.
+ * Result of the operation is a list of the corresponding positions of
+ * RIGHT operand.
*/
if (data)
{
if (!Rdata.allocated)
+
/*
- * OP_PHRASE is based on the OP_AND, so the number of resulting
- * positions could not be greater than the total amount of operands.
+ * OP_PHRASE is based on the OP_AND, so the number of
+ * resulting positions could not be greater than the total
+ * amount of operands.
*/
data->pos = palloc(sizeof(WordEntryPos) * Min(Ldata.npos, Rdata.npos));
else
*pos_iter = WEP_GETPOS(*Rpos);
pos_iter++;
- break; /* We need to build a unique result
- * array, so go to the next Rpos */
+ break; /* We need to build a unique result
+ * array, so go to the next Rpos */
}
else
{
else
{
/*
- * Go to the next Rpos, because Lpos
- * is ahead of the current Rpos
+ * Go to the next Rpos, because Lpos is ahead of the
+ * current Rpos
*/
break;
}
*/
bool
TS_execute(QueryItem *curitem, void *checkval, bool calcnot,
- bool (*chkcond) (void *checkval, QueryOperand *val, ExecPhraseData *data))
+ bool (*chkcond) (void *checkval, QueryOperand *val, ExecPhraseData *data))
{
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
if (curitem->type == QI_VAL)
return chkcond(checkval, (QueryOperand *) curitem,
- NULL /* we don't need position info */);
+ NULL /* we don't need position info */ );
switch (curitem->qoperator.oper)
{
return false;
case OP_PHRASE:
+
/*
* Treat OP_PHRASE as OP_AND here
*/
if (SPI_tuptable == NULL ||
SPI_tuptable->tupdesc->natts != 1 ||
!IsBinaryCoercible(SPI_gettypeid(SPI_tuptable->tupdesc, 1),
- TSVECTOROID))
+ TSVECTOROID))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("ts_stat query must return one tsvector column")));
errmsg("tsvector column \"%s\" does not exist",
trigger->tgargs[0])));
if (!IsBinaryCoercible(SPI_gettypeid(rel->rd_att, tsvector_attr_num),
- TSVECTOROID))
+ TSVECTOROID))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("column \"%s\" is not of tsvector type",
errmsg("configuration column \"%s\" does not exist",
trigger->tgargs[1])));
if (!IsBinaryCoercible(SPI_gettypeid(rel->rd_att, config_attr_num),
- REGCONFIGOID))
+ REGCONFIGOID))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("column \"%s\" is not of regconfig type",
static int uuid_internal_cmp(const pg_uuid_t *arg1, const pg_uuid_t *arg2);
static int uuid_fast_cmp(Datum x, Datum y, SortSupport ssup);
static int uuid_cmp_abbrev(Datum x, Datum y, SortSupport ssup);
-static bool uuid_abbrev_abort(int memtupcount, SortSupport ssup);
-static Datum uuid_abbrev_convert(Datum original, SortSupport ssup);
+static bool uuid_abbrev_abort(int memtupcount, SortSupport ssup);
+static Datum uuid_abbrev_convert(Datum original, SortSupport ssup);
Datum
uuid_in(PG_FUNCTION_ARGS)
Datum
uuid_sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = uuid_fast_cmp;
ssup->ssup_extra = NULL;
if (ssup->abbreviate)
{
- uuid_sortsupport_state *uss;
- MemoryContext oldcontext;
+ uuid_sortsupport_state *uss;
+ MemoryContext oldcontext;
oldcontext = MemoryContextSwitchTo(ssup->ssup_cxt);
static bool
uuid_abbrev_abort(int memtupcount, SortSupport ssup)
{
- uuid_sortsupport_state *uss = ssup->ssup_extra;
- double abbr_card;
+ uuid_sortsupport_state *uss = ssup->ssup_extra;
+ double abbr_card;
if (memtupcount < 10000 || uss->input_count < 10000 || !uss->estimating)
return false;
/*
* Target minimum cardinality is 1 per ~2k of non-null inputs. 0.5 row
* fudge factor allows us to abort earlier on genuinely pathological data
- * where we've had exactly one abbreviated value in the first 2k (non-null)
- * rows.
+ * where we've had exactly one abbreviated value in the first 2k
+ * (non-null) rows.
*/
if (abbr_card < uss->input_count / 2000.0 + 0.5)
{
if (trace_sort)
elog(LOG,
"uuid_abbrev: aborting abbreviation at cardinality %f"
- " below threshold %f after " INT64_FORMAT " values (%d rows)",
+ " below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count,
memtupcount);
#endif
static Datum
uuid_abbrev_convert(Datum original, SortSupport ssup)
{
- uuid_sortsupport_state *uss = ssup->ssup_extra;
- pg_uuid_t *authoritative = DatumGetUUIDP(original);
- Datum res;
+ uuid_sortsupport_state *uss = ssup->ssup_extra;
+ pg_uuid_t *authoritative = DatumGetUUIDP(original);
+ Datum res;
memcpy(&res, authoritative->data, sizeof(Datum));
uss->input_count += 1;
* Byteswap on little-endian machines.
*
* This is needed so that uuid_cmp_abbrev() (an unsigned integer 3-way
- * comparator) works correctly on all platforms. If we didn't do this, the
- * comparator would have to call memcmp() with a pair of pointers to the
- * first byte of each abbreviated key, which is slower.
+ * comparator) works correctly on all platforms. If we didn't do this,
+ * the comparator would have to call memcmp() with a pair of pointers to
+ * the first byte of each abbreviated key, which is slower.
*/
res = DatumBigEndianToNative(res);
/* should not get here, because of previous check */
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized format() type specifier \"%c\"",
- *cp),
- errhint("For a single \"%%\" use \"%%%%\".")));
+ errmsg("unrecognized format() type specifier \"%c\"",
+ *cp),
+ errhint("For a single \"%%\" use \"%%%%\".")));
break;
}
}
char
get_rel_persistence(Oid relid)
{
- HeapTuple tp;
- Form_pg_class reltup;
- char result;
+ HeapTuple tp;
+ Form_pg_class reltup;
+ char result;
tp = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tp))
static void PlanCacheFuncCallback(Datum arg, int cacheid, uint32 hashvalue);
static void PlanCacheSysCallback(Datum arg, int cacheid, uint32 hashvalue);
static void PlanCacheUserMappingCallback(Datum arg, int cacheid,
- uint32 hashvalue);
+ uint32 hashvalue);
/*
plansource->is_valid = false;
/*
- * If we have a join pushed down to the foreign server and the current user
- * is different from the one for which the plan was created, invalidate the
- * generic plan since user mapping for the new user might make the join
- * unsafe to push down, or change which user mapping is used.
+ * If we have a join pushed down to the foreign server and the current
+ * user is different from the one for which the plan was created,
+ * invalidate the generic plan since user mapping for the new user might
+ * make the join unsafe to push down, or change which user mapping is
+ * used.
*/
if (plansource->is_valid &&
plansource->gplan &&
bool spi_pushed;
MemoryContext plan_context;
MemoryContext oldcxt = CurrentMemoryContext;
- ListCell *lc;
+ ListCell *lc;
/*
* Normally the querytree should be valid already, but if it's not,
plan->has_foreign_join = false;
foreach(lc, plist)
{
- PlannedStmt *plan_stmt = (PlannedStmt *) lfirst(lc);
+ PlannedStmt *plan_stmt = (PlannedStmt *) lfirst(lc);
if (IsA(plan_stmt, PlannedStmt))
plan->has_foreign_join =
/*
* PlanCacheUserMappingCallback
- * Syscache inval callback function for user mapping cache invalidation.
+ * Syscache inval callback function for user mapping cache invalidation.
*
- * Invalidates plans which have pushed down foreign joins.
+ * Invalidates plans which have pushed down foreign joins.
*/
static void
PlanCacheUserMappingCallback(Datum arg, int cacheid, uint32 hashvalue)
/*
* If the plan has pushed down foreign joins, those join may become
- * unsafe to push down because of user mapping changes. Invalidate only
- * the generic plan, since changes to user mapping do not invalidate the
- * parse tree.
+ * unsafe to push down because of user mapping changes. Invalidate
+ * only the generic plan, since changes to user mapping do not
+ * invalidate the parse tree.
*/
if (plansource->gplan && plansource->gplan->has_foreign_join)
plansource->gplan->is_valid = false;
bool
RelationHasUnloggedIndex(Relation rel)
{
- List *indexoidlist;
- ListCell *indexoidscan;
- bool result = false;
+ List *indexoidlist;
+ ListCell *indexoidscan;
+ bool result = false;
indexoidlist = RelationGetIndexList(rel);
* both log_line_prefix and csv logs.
*/
-static struct timeval saved_timeval;
-static bool saved_timeval_set = false;
+static struct timeval saved_timeval;
+static bool saved_timeval_set = false;
#define FORMATTED_TS_LEN 128
static char formatted_start_time[FORMATTED_TS_LEN];
* mechanisms.
*
* The log hook has access to both the translated and original English
- * error message text, which is passed through to allow it to be used
- * as a message identifier. Note that the original text is not available
- * for detail, detail_log, hint and context text elements.
+ * error message text, which is passed through to allow it to be used as a
+ * message identifier. Note that the original text is not available for
+ * detail, detail_log, hint and context text elements.
*/
if (edata->output_to_server && emit_log_hook)
(*emit_log_hook) (edata);
break;
case 'n':
{
- char strfbuf[128];
+ char strfbuf[128];
if (!saved_timeval_set)
{
}
sprintf(strfbuf, "%ld.%03d", saved_timeval.tv_sec,
- (int)(saved_timeval.tv_usec / 1000));
+ (int) (saved_timeval.tv_usec / 1000));
if (padding != 0)
appendStringInfo(buf, "%*s", padding, strfbuf);
slock_t mutex; /* spinlock */
long nentries; /* number of entries */
HASHELEMENT *freeList; /* list of free elements */
-} FreeListData;
+} FreeListData;
/*
* Header structure for a hash table --- contains all changeable info
{
HeapTuple roleTup;
Form_pg_authid rform;
- char *rname;
+ char *rname;
/*
* Don't do scans if we're bootstrapping, none of the system catalogs
* FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
#ifdef EXEC_BACKEND
+
/*
* load_hba() and load_ident() want to work within the PostmasterContext,
* so create that if it doesn't exist (which it won't). We'll delete it
{
/*
* If this is a background worker not bound to any particular
- * database, we're done now. Everything that follows only makes
- * sense if we are bound to a specific database. We do need to
- * close the transaction we started before returning.
+ * database, we're done now. Everything that follows only makes sense
+ * if we are bound to a specific database. We do need to close the
+ * transaction we started before returning.
*/
if (!bootstrap)
CommitTransactionCommand();
{
{"parallel_setup_cost", PGC_USERSET, QUERY_TUNING_COST,
gettext_noop("Sets the planner's estimate of the cost of "
- "starting up worker processes for parallel query."),
+ "starting up worker processes for parallel query."),
NULL
},
¶llel_setup_cost,
* don't re-read the config file during backend start.
*
* In EXEC_BACKEND builds, this works differently: we load all
- * non-default settings from the CONFIG_EXEC_PARAMS file during
- * backend start. In that case we must accept PGC_SIGHUP
- * settings, so as to have the same value as if we'd forked
- * from the postmaster. This can also happen when using
- * RestoreGUCState() within a background worker that needs to
- * have the same settings as the user backend that started it.
- * is_reload will be true when either situation applies.
+ * non-default settings from the CONFIG_EXEC_PARAMS file
+ * during backend start. In that case we must accept
+ * PGC_SIGHUP settings, so as to have the same value as if
+ * we'd forked from the postmaster. This can also happen when
+ * using RestoreGUCState() within a background worker that
+ * needs to have the same settings as the user backend that
+ * started it. is_reload will be true when either situation
+ * applies.
*/
if (IsUnderPostmaster && !is_reload)
return -1;
Datum
pg_config(PG_FUNCTION_ARGS)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- Tuplestorestate *tupstore;
- HeapTuple tuple;
- TupleDesc tupdesc;
- AttInMetadata *attinmeta;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- ConfigData *configdata;
- size_t configdata_len;
- char *values[2];
- int i = 0;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ Tuplestorestate *tupstore;
+ HeapTuple tuple;
+ TupleDesc tupdesc;
+ AttInMetadata *attinmeta;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ ConfigData *configdata;
+ size_t configdata_len;
+ char *values[2];
+ int i = 0;
/* check to see if caller supports us returning a tuplestore */
if (!rsinfo || !(rsinfo->allowedModes & SFRM_Materialize))
/*
* SFRM_Materialize mode expects us to return a NULL Datum. The actual
- * tuples are in our tuplestore and passed back through
- * rsinfo->setResult. rsinfo->setDesc is set to the tuple description
- * that we actually used to build our tuples with, so the caller can
- * verify we did what it was expecting.
+ * tuples are in our tuplestore and passed back through rsinfo->setResult.
+ * rsinfo->setDesc is set to the tuple description that we actually used
+ * to build our tuples with, so the caller can verify we did what it was
+ * expecting.
*/
rsinfo->setDesc = tupdesc;
MemoryContextSwitchTo(oldcontext);
Datum
pg_control_system(PG_FUNCTION_ARGS)
{
- Datum values[4];
- bool nulls[4];
- TupleDesc tupdesc;
- HeapTuple htup;
- ControlFileData *ControlFile;
+ Datum values[4];
+ bool nulls[4];
+ TupleDesc tupdesc;
+ HeapTuple htup;
+ ControlFileData *ControlFile;
/*
* Construct a tuple descriptor for the result row. This must match this
Datum
pg_control_checkpoint(PG_FUNCTION_ARGS)
{
- Datum values[19];
- bool nulls[19];
- TupleDesc tupdesc;
- HeapTuple htup;
- ControlFileData *ControlFile;
- XLogSegNo segno;
- char xlogfilename[MAXFNAMELEN];
+ Datum values[19];
+ bool nulls[19];
+ TupleDesc tupdesc;
+ HeapTuple htup;
+ ControlFileData *ControlFile;
+ XLogSegNo segno;
+ char xlogfilename[MAXFNAMELEN];
/*
* Construct a tuple descriptor for the result row. This must match this
nulls[6] = false;
values[7] = CStringGetTextDatum(psprintf("%u:%u",
- ControlFile->checkPointCopy.nextXidEpoch,
- ControlFile->checkPointCopy.nextXid));
+ ControlFile->checkPointCopy.nextXidEpoch,
+ ControlFile->checkPointCopy.nextXid));
nulls[7] = false;
values[8] = ObjectIdGetDatum(ControlFile->checkPointCopy.nextOid);
Datum
pg_control_recovery(PG_FUNCTION_ARGS)
{
- Datum values[5];
- bool nulls[5];
- TupleDesc tupdesc;
- HeapTuple htup;
- ControlFileData *ControlFile;
+ Datum values[5];
+ bool nulls[5];
+ TupleDesc tupdesc;
+ HeapTuple htup;
+ ControlFileData *ControlFile;
/*
* Construct a tuple descriptor for the result row. This must match this
Datum
pg_control_init(PG_FUNCTION_ARGS)
{
- Datum values[13];
- bool nulls[13];
- TupleDesc tupdesc;
- HeapTuple htup;
- ControlFileData *ControlFile;
+ Datum values[13];
+ bool nulls[13];
+ TupleDesc tupdesc;
+ HeapTuple htup;
+ ControlFileData *ControlFile;
/*
* Construct a tuple descriptor for the result row. This must match this
/* We can remember up to MAX_RESOWNER_LOCKS references to local locks. */
int nlocks; /* number of owned locks */
LOCALLOCK *locks[MAX_RESOWNER_LOCKS]; /* list of owned locks */
-} ResourceOwnerData;
+} ResourceOwnerData;
/*****************************************************************************
int maxTapes; /* number of tapes (Knuth's T) */
int tapeRange; /* maxTapes-1 (Knuth's P) */
MemoryContext sortcontext; /* memory context holding most sort data */
- MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
+ MemoryContext tuplecontext; /* sub-context of sortcontext for tuple data */
LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
/*
/*
* Memory for tuples is sometimes allocated in batch, rather than
- * incrementally. This implies that incremental memory accounting has been
- * abandoned. Currently, this only happens for the final on-the-fly merge
- * step. Large batch allocations can store tuples (e.g. IndexTuples)
- * without palloc() fragmentation and other overhead.
+ * incrementally. This implies that incremental memory accounting has
+ * been abandoned. Currently, this only happens for the final on-the-fly
+ * merge step. Large batch allocations can store tuples (e.g.
+ * IndexTuples) without palloc() fragmentation and other overhead.
*/
bool batchUsed;
/*
* While building initial runs, this is the current output run number
- * (starting at RUN_FIRST). Afterwards, it is the number of initial
- * runs we made.
+ * (starting at RUN_FIRST). Afterwards, it is the number of initial runs
+ * we made.
*/
int currentRun;
* just a few large allocations.
*
* Aside from the general benefits of performing fewer individual retail
- * palloc() calls, this also helps make merging more cache efficient, since
- * each tape's tuples must naturally be accessed sequentially (in sorted
- * order).
+ * palloc() calls, this also helps make merging more cache efficient,
+ * since each tape's tuples must naturally be accessed sequentially (in
+ * sorted order).
*/
int64 spacePerTape; /* Space (memory) for tuples (not slots) */
char **mergetuples; /* Each tape's memory allocation */
static void batchmemtuples(Tuplesortstate *state);
static void mergebatch(Tuplesortstate *state, int64 spacePerTape);
static void mergebatchone(Tuplesortstate *state, int srcTape,
- SortTuple *stup, bool *should_free);
+ SortTuple *stup, bool *should_free);
static void mergebatchfreetape(Tuplesortstate *state, int srcTape,
- SortTuple *rtup, bool *should_free);
+ SortTuple *rtup, bool *should_free);
static void *mergebatchalloc(Tuplesortstate *state, int tapenum, Size tuplen);
static void mergepreread(Tuplesortstate *state);
static void mergeprereadone(Tuplesortstate *state, int srcTape);
* Caller tuple (e.g. IndexTuple) memory context.
*
* A dedicated child content used exclusively for caller passed tuples
- * eases memory management. Resetting at key points reduces fragmentation.
- * Note that the memtuples array of SortTuples is allocated in the parent
- * context, not this context, because there is no need to free memtuples
- * early.
+ * eases memory management. Resetting at key points reduces
+ * fragmentation. Note that the memtuples array of SortTuples is allocated
+ * in the parent context, not this context, because there is no need to
+ * free memtuples early.
*/
tuplecontext = AllocSetContextCreate(sortcontext,
"Caller tuples",
* a pass-by-value datatype could have an abbreviated form that is cheaper
* to compare. In a tuple sort, we could support that, because we can
* always extract the original datum from the tuple is needed. Here, we
- * can't, because a datum sort only stores a single copy of the datum;
- * the "tuple" field of each sortTuple is NULL.
+ * can't, because a datum sort only stores a single copy of the datum; the
+ * "tuple" field of each sortTuple is NULL.
*/
state->sortKeys->abbreviate = !typbyval;
* ensure a consistent representation (current tuple was just
* handled). It does not matter if some dumped tuples are already
* sorted on tape, since serialized tuples lack abbreviated keys
- * (TSS_BUILDRUNS state prevents control reaching here in any
- * case).
+ * (TSS_BUILDRUNS state prevents control reaching here in any case).
*/
for (i = 0; i < state->memtupcount; i++)
{
if (isNull || !state->tuples)
{
/*
- * Set datum1 to zeroed representation for NULLs (to be consistent, and
- * to support cheap inequality tests for NULL abbreviated keys).
+ * Set datum1 to zeroed representation for NULLs (to be consistent,
+ * and to support cheap inequality tests for NULL abbreviated keys).
*/
stup.datum1 = !isNull ? val : (Datum) 0;
stup.isnull1 = isNull;
*
* Alter datum1 representation in already-copied tuples, so as to
* ensure a consistent representation (current tuple was just
- * handled). It does not matter if some dumped tuples are
- * already sorted on tape, since serialized tuples lack
- * abbreviated keys (TSS_BUILDRUNS state prevents control
- * reaching here in any case).
+ * handled). It does not matter if some dumped tuples are already
+ * sorted on tape, since serialized tuples lack abbreviated keys
+ * (TSS_BUILDRUNS state prevents control reaching here in any
+ * case).
*/
for (i = 0; i < state->memtupcount; i++)
{
SortTuple *newtup;
/*
- * Returned tuple is still counted in our memory space most
- * of the time. See mergebatchone() for discussion of why
- * caller may occasionally be required to free returned
- * tuple, and how preread memory is managed with regard to
- * edge cases more generally.
+ * Returned tuple is still counted in our memory space most of
+ * the time. See mergebatchone() for discussion of why caller
+ * may occasionally be required to free returned tuple, and
+ * how preread memory is managed with regard to edge cases
+ * more generally.
*/
*stup = state->memtuples[0];
tuplesort_heap_siftup(state, false);
/*
* memtupsize might be noticeably higher than memtupcount here in atypical
* cases. It seems slightly preferable to not allow recent outliers to
- * impact this determination. Note that caller's trace_sort output reports
- * memtupcount instead.
+ * impact this determination. Note that caller's trace_sort output
+ * reports memtupcount instead.
*/
if (state->memtupsize <= replacement_sort_tuples)
return true;
state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
/*
- * Give replacement selection a try based on user setting. There will
- * be a switch to a simple hybrid sort-merge strategy after the first
- * run (iff we could not output one long run).
+ * Give replacement selection a try based on user setting. There will be
+ * a switch to a simple hybrid sort-merge strategy after the first run
+ * (iff we could not output one long run).
*/
state->replaceActive = useselection(state);
elog(LOG, "replacement selection will sort %d first run tuples",
state->memtupcount);
#endif
- state->memtupcount = 0; /* make the heap empty */
+ state->memtupcount = 0; /* make the heap empty */
for (j = 0; j < ntuples; j++)
{
int usedSlots;
/*
- * Report how effective batchmemtuples() was in balancing
- * the number of slots against the need for memory for the
+ * Report how effective batchmemtuples() was in balancing the
+ * number of slots against the need for memory for the
* underlying tuples (e.g. IndexTuples). The big preread of
* all tapes when switching to FINALMERGE state should be
* fairly representative of memory utilization during the
static void
batchmemtuples(Tuplesortstate *state)
{
- int64 refund;
- int64 availMemLessRefund;
- int memtupsize = state->memtupsize;
+ int64 refund;
+ int64 availMemLessRefund;
+ int memtupsize = state->memtupsize;
/* For simplicity, assume no memtuples are actually currently counted */
Assert(state->memtupcount == 0);
/*
* To establish balanced memory use after refunding palloc overhead,
* temporarily have our accounting indicate that we've allocated all
- * memory we're allowed to less that refund, and call grow_memtuples()
- * to have it increase the number of slots.
+ * memory we're allowed to less that refund, and call grow_memtuples() to
+ * have it increase the number of slots.
*/
state->growmemtuples = true;
USEMEM(state, availMemLessRefund);
#ifdef TRACE_SORT
if (trace_sort)
{
- Size OldKb = (memtupsize * sizeof(SortTuple) + 1023) / 1024;
- Size NewKb = (state->memtupsize * sizeof(SortTuple) + 1023) / 1024;
+ Size OldKb = (memtupsize * sizeof(SortTuple) + 1023) / 1024;
+ Size NewKb = (state->memtupsize * sizeof(SortTuple) + 1023) / 1024;
elog(LOG, "grew memtuples %1.2fx from %d (%zu KB) to %d (%zu KB) for final merge",
(double) NewKb / (double) OldKb,
static void
mergebatch(Tuplesortstate *state, int64 spacePerTape)
{
- int srcTape;
+ int srcTape;
Assert(state->activeTapes > 0);
Assert(state->tuples);
*/
if (!state->mergeoverflow[srcTape])
{
- Size tupLen;
+ Size tupLen;
/*
* Mark tuple buffer range for reuse, but be careful to move final,
- * tail tuple to start of space for next run so that it's available
- * to caller when stup is returned, and remains available at least
- * until the next tuple is requested.
+ * tail tuple to start of space for next run so that it's available to
+ * caller when stup is returned, and remains available at least until
+ * the next tuple is requested.
*/
tupLen = state->mergecurrent[srcTape] - state->mergetail[srcTape];
state->mergecurrent[srcTape] = state->mergetuples[srcTape];
state->mergetuples[tapenum] + state->spacePerTape)
{
/*
- * Usual case -- caller is returned pointer into its tape's buffer, and
- * an offset from that point is recorded as where tape has consumed up
- * to for current round of preloading.
+ * Usual case -- caller is returned pointer into its tape's buffer,
+ * and an offset from that point is recorded as where tape has
+ * consumed up to for current round of preloading.
*/
ret = state->mergetail[tapenum] = state->mergecurrent[tapenum];
state->mergecurrent[tapenum] += reserve_tuplen;
if (state->replaceActive)
{
/*
- * Still holding out for a case favorable to replacement selection.
- * Still incrementally spilling using heap.
+ * Still holding out for a case favorable to replacement
+ * selection. Still incrementally spilling using heap.
*
* Dump the heap's frontmost entry, and sift up to remove it from
* the heap.
else
{
/*
- * Once committed to quicksorting runs, never incrementally
- * spill
+ * Once committed to quicksorting runs, never incrementally spill
*/
dumpbatch(state, alltuples);
break;
}
/*
- * If top run number has changed, we've finished the current run
- * (this can only be the first run), and will no longer spill
- * incrementally.
+ * If top run number has changed, we've finished the current run (this
+ * can only be the first run), and will no longer spill incrementally.
*/
if (state->memtupcount == 0 ||
state->memtuples[0].tupindex == HEAP_RUN_NEXT)
state->currentRun, state->destTape,
pg_rusage_show(&state->ru_start));
#endif
+
/*
* Done if heap is empty, which is possible when there is only one
* long run.
* remaining tuples are loaded into memory, just before input was
* exhausted.
*
- * In general, short final runs are quite possible. Rather than
- * allowing a special case where there was a superfluous
- * selectnewtape() call (i.e. a call with no subsequent run actually
- * written to destTape), we prefer to write out a 0 tuple run.
+ * In general, short final runs are quite possible. Rather than allowing
+ * a special case where there was a superfluous selectnewtape() call (i.e.
+ * a call with no subsequent run actually written to destTape), we prefer
+ * to write out a 0 tuple run.
*
* mergepreread()/mergeprereadone() are prepared for 0 tuple runs, and
* will reliably mark the tape inactive for the merge when called from
* beginmerge(). This case is therefore similar to the case where
* mergeonerun() finds a dummy run for the tape, and so doesn't need to
- * merge a run from the tape (or conceptually "merges" the dummy run,
- * if you prefer). According to Knuth, Algorithm D "isn't strictly
- * optimal" in its method of distribution and dummy run assignment;
- * this edge case seems very unlikely to make that appreciably worse.
+ * merge a run from the tape (or conceptually "merges" the dummy run, if
+ * you prefer). According to Knuth, Algorithm D "isn't strictly optimal"
+ * in its method of distribution and dummy run assignment; this edge case
+ * seems very unlikely to make that appreciably worse.
*/
Assert(state->status == TSS_BUILDRUNS);
#endif
/*
- * Sort all tuples accumulated within the allowed amount of memory for this
- * run using quicksort
+ * Sort all tuples accumulated within the allowed amount of memory for
+ * this run using quicksort
*/
tuplesort_sort_memtuples(state);
if (state->batchUsed)
{
/*
- * No USEMEM() call, because during final on-the-fly merge
- * accounting is based on tape-private state. ("Overflow"
- * allocations are detected as an indication that a new round
- * or preloading is required. Preloading marks existing
- * contents of tape's batch buffer for reuse.)
+ * No USEMEM() call, because during final on-the-fly merge accounting
+ * is based on tape-private state. ("Overflow" allocations are
+ * detected as an indication that a new round or preloading is
+ * required. Preloading marks existing contents of tape's batch buffer
+ * for reuse.)
*/
return mergebatchalloc(state, tapenum, tuplen);
}
* ensure a consistent representation (current tuple was just
* handled). It does not matter if some dumped tuples are already
* sorted on tape, since serialized tuples lack abbreviated keys
- * (TSS_BUILDRUNS state prevents control reaching here in any
- * case).
+ * (TSS_BUILDRUNS state prevents control reaching here in any case).
*/
for (i = 0; i < state->memtupcount; i++)
{
* ensure a consistent representation (current tuple was just
* handled). It does not matter if some dumped tuples are already
* sorted on tape, since serialized tuples lack abbreviated keys
- * (TSS_BUILDRUNS state prevents control reaching here in any
- * case).
+ * (TSS_BUILDRUNS state prevents control reaching here in any case).
*/
for (i = 0; i < state->memtupcount; i++)
{
* ensure a consistent representation (current tuple was just
* handled). It does not matter if some dumped tuples are already
* sorted on tape, since serialized tuples lack abbreviated keys
- * (TSS_BUILDRUNS state prevents control reaching here in any
- * case).
+ * (TSS_BUILDRUNS state prevents control reaching here in any case).
*/
for (i = 0; i < state->memtupcount; i++)
{
* Variables for old snapshot handling are shared among processes and are
* only allowed to move forward.
*/
- slock_t mutex_current; /* protect current_timestamp */
+ slock_t mutex_current; /* protect current_timestamp */
int64 current_timestamp; /* latest snapshot timestamp */
- slock_t mutex_latest_xmin; /* protect latest_xmin
- * and next_map_update
- */
- TransactionId latest_xmin; /* latest snapshot xmin */
- int64 next_map_update; /* latest snapshot valid up to */
- slock_t mutex_threshold; /* protect threshold fields */
+ slock_t mutex_latest_xmin; /* protect latest_xmin and
+ * next_map_update */
+ TransactionId latest_xmin; /* latest snapshot xmin */
+ int64 next_map_update; /* latest snapshot valid up to */
+ slock_t mutex_threshold; /* protect threshold fields */
int64 threshold_timestamp; /* earlier snapshot is old */
- TransactionId threshold_xid; /* earlier xid may be gone */
+ TransactionId threshold_xid; /* earlier xid may be gone */
/*
* Keep one xid per minute for old snapshot error handling.
*
* Persistence is not needed.
*/
- int head_offset; /* subscript of oldest tracked time */
- int64 head_timestamp; /* time corresponding to head xid */
- int count_used; /* how many slots are in use */
+ int head_offset; /* subscript of oldest tracked time */
+ int64 head_timestamp; /* time corresponding to head xid */
+ int count_used; /* how many slots are in use */
TransactionId xid_by_minute[FLEXIBLE_ARRAY_MEMBER];
-} OldSnapshotControlData;
+} OldSnapshotControlData;
static volatile OldSnapshotControlData *oldSnapshotControl;
/*
* Don't allow modification of the active snapshot during parallel
- * operation. We share the snapshot to worker backends at the beginning of
- * parallel operation, so any change to the snapshot can lead to
+ * operation. We share the snapshot to worker backends at the beginning
+ * of parallel operation, so any change to the snapshot can lead to
* inconsistencies. We have other defenses against
* CommandCounterIncrement, but there are a few places that call this
* directly, so we put an additional guard here.
}
ts = AlignTimestampToMinuteBoundary(ts)
- - (old_snapshot_threshold * USECS_PER_MINUTE);
+ - (old_snapshot_threshold * USECS_PER_MINUTE);
/* Check for fast exit without LW locking. */
SpinLockAcquire(&oldSnapshotControl->mutex_threshold);
if (oldSnapshotControl->count_used > 0
&& ts >= oldSnapshotControl->head_timestamp)
{
- int offset;
+ int offset;
offset = ((ts - oldSnapshotControl->head_timestamp)
/ USECS_PER_MINUTE);
if (offset > oldSnapshotControl->count_used - 1)
offset = oldSnapshotControl->count_used - 1;
offset = (oldSnapshotControl->head_offset + offset)
- % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+ % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
xlimit = oldSnapshotControl->xid_by_minute[offset];
if (NormalTransactionIdFollows(xlimit, recentXmin))
ts = AlignTimestampToMinuteBoundary(whenTaken);
/*
- * Keep track of the latest xmin seen by any process. Update mapping
- * with a new value when we have crossed a bucket boundary.
+ * Keep track of the latest xmin seen by any process. Update mapping with
+ * a new value when we have crossed a bucket boundary.
*/
SpinLockAcquire(&oldSnapshotControl->mutex_latest_xmin);
latest_xmin = oldSnapshotControl->latest_xmin;
if (whenTaken < 0)
{
elog(DEBUG1,
- "MaintainOldSnapshotTimeMapping called with negative whenTaken = %ld",
+ "MaintainOldSnapshotTimeMapping called with negative whenTaken = %ld",
(long) whenTaken);
return;
}
* USECS_PER_MINUTE)))
{
/* existing mapping; advance xid if possible */
- int bucket = (oldSnapshotControl->head_offset
- + ((ts - oldSnapshotControl->head_timestamp)
- / USECS_PER_MINUTE))
- % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+ int bucket = (oldSnapshotControl->head_offset
+ + ((ts - oldSnapshotControl->head_timestamp)
+ / USECS_PER_MINUTE))
+ % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
if (TransactionIdPrecedes(oldSnapshotControl->xid_by_minute[bucket], xmin))
oldSnapshotControl->xid_by_minute[bucket] = xmin;
else
{
/* We need a new bucket, but it might not be the very next one. */
- int advance = ((ts - oldSnapshotControl->head_timestamp)
- / USECS_PER_MINUTE);
+ int advance = ((ts - oldSnapshotControl->head_timestamp)
+ / USECS_PER_MINUTE);
oldSnapshotControl->head_timestamp = ts;
else
{
/* Store the new value in one or more buckets. */
- int i;
+ int i;
for (i = 0; i < advance; i++)
{
if (oldSnapshotControl->count_used == OLD_SNAPSHOT_TIME_MAP_ENTRIES)
{
/* Map full and new value replaces old head. */
- int old_head = oldSnapshotControl->head_offset;
+ int old_head = oldSnapshotControl->head_offset;
if (old_head == (OLD_SNAPSHOT_TIME_MAP_ENTRIES - 1))
oldSnapshotControl->head_offset = 0;
else
{
/* Extend map to unused entry. */
- int new_tail = (oldSnapshotControl->head_offset
- + oldSnapshotControl->count_used)
- % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+ int new_tail = (oldSnapshotControl->head_offset
+ + oldSnapshotControl->count_used)
+ % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
oldSnapshotControl->count_used++;
oldSnapshotControl->xid_by_minute[new_tail] = xmin;
{
/*
* Truncation is essentially harmless, because we skip names of
- * length other than XLOG_FNAME_LEN. (In principle, one
- * could use a 1000-character additional_ext and get trouble.)
+ * length other than XLOG_FNAME_LEN. (In principle, one could use
+ * a 1000-character additional_ext and get trouble.)
*/
strlcpy(walfile, xlde->d_name, MAXPGPATH);
TrimExtension(walfile, additional_ext);
int r;
#else
DWORD status;
+
/*
* get a pointer sized version of bgchild to avoid warnings about
* casting to a different size on WIN64.
/*
* Obtain a connection to server. This is not really necessary but it
- * helps to get more precise error messages about authentication,
- * required GUC parameters and such.
+ * helps to get more precise error messages about authentication, required
+ * GUC parameters and such.
*/
conn = GetConnection();
if (!conn)
if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &origToken))
{
/*
- * Most Windows targets make DWORD a 32-bit unsigned long, but
- * in case it doesn't cast DWORD before printing.
+ * Most Windows targets make DWORD a 32-bit unsigned long, but in case
+ * it doesn't cast DWORD before printing.
*/
write_stderr(_("%s: could not open process token: error code %lu\n"),
progname, (unsigned long) GetLastError());
* and is what the current privileges are).
*
* We always perform this delta on all ACLs and expect that by the time
- * these are run the initial privileges will be in place, even in a
- * binary upgrade situation (see below).
+ * these are run the initial privileges will be in place, even in a binary
+ * upgrade situation (see below).
*/
printfPQExpBuffer(acl_subquery, "(SELECT pg_catalog.array_agg(acl) FROM "
"(SELECT pg_catalog.unnest(coalesce(%s,pg_catalog.acldefault(%s,%s))) AS acl "
"EXCEPT "
- "SELECT pg_catalog.unnest(coalesce(pip.initprivs,pg_catalog.acldefault(%s,%s)))) as foo)",
+ "SELECT pg_catalog.unnest(coalesce(pip.initprivs,pg_catalog.acldefault(%s,%s)))) as foo)",
acl_column,
obj_kind,
acl_owner,
acl_owner);
printfPQExpBuffer(racl_subquery, "(SELECT pg_catalog.array_agg(acl) FROM "
- "(SELECT pg_catalog.unnest(coalesce(pip.initprivs,pg_catalog.acldefault(%s,%s))) AS acl "
+ "(SELECT pg_catalog.unnest(coalesce(pip.initprivs,pg_catalog.acldefault(%s,%s))) AS acl "
"EXCEPT "
- "SELECT pg_catalog.unnest(coalesce(%s,pg_catalog.acldefault(%s,%s)))) as foo)",
+ "SELECT pg_catalog.unnest(coalesce(%s,pg_catalog.acldefault(%s,%s)))) as foo)",
obj_kind,
acl_owner,
acl_column,
"(SELECT pg_catalog.array_agg(acl) FROM "
"(SELECT pg_catalog.unnest(pip.initprivs) AS acl "
"EXCEPT "
- "SELECT pg_catalog.unnest(pg_catalog.acldefault(%s,%s))) as foo) END",
+ "SELECT pg_catalog.unnest(pg_catalog.acldefault(%s,%s))) as foo) END",
obj_kind,
acl_owner);
printfPQExpBuffer(init_racl_subquery,
"CASE WHEN privtype = 'e' THEN "
"(SELECT pg_catalog.array_agg(acl) FROM "
- "(SELECT pg_catalog.unnest(pg_catalog.acldefault(%s,%s)) AS acl "
+ "(SELECT pg_catalog.unnest(pg_catalog.acldefault(%s,%s)) AS acl "
"EXCEPT "
- "SELECT pg_catalog.unnest(pip.initprivs)) as foo) END",
+ "SELECT pg_catalog.unnest(pip.initprivs)) as foo) END",
obj_kind,
acl_owner);
}
dobj->dump = DUMP_COMPONENT_NONE;
else
dobj->dump = ext->dobj.dump_contains & (DUMP_COMPONENT_ACL |
- DUMP_COMPONENT_SECLABEL | DUMP_COMPONENT_POLICY);
+ DUMP_COMPONENT_SECLABEL | DUMP_COMPONENT_POLICY);
}
selectDumpableExtension(ExtensionInfo *extinfo, DumpOptions *dopt)
{
/*
- * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users
- * to change permissions on those objects, if they wish to, and have
- * those changes preserved.
+ * Use DUMP_COMPONENT_ACL for from-initdb extensions, to allow users to
+ * change permissions on those objects, if they wish to, and have those
+ * changes preserved.
*/
if (dopt->binary_upgrade && extinfo->dobj.catId.oid < (Oid) FirstNormalObjectId)
extinfo->dobj.dump = extinfo->dobj.dump_contains = DUMP_COMPONENT_ACL;
else
extinfo->dobj.dump = extinfo->dobj.dump_contains =
- dopt->include_everything ? DUMP_COMPONENT_ALL :
- DUMP_COMPONENT_NONE;
+ dopt->include_everything ? DUMP_COMPONENT_ALL :
+ DUMP_COMPONENT_NONE;
}
/*
dopt->binary_upgrade);
buildACLQueries(attacl_subquery, attracl_subquery, attinitacl_subquery,
- attinitracl_subquery, "at.attacl", "c.relowner", "'c'",
+ attinitracl_subquery, "at.attacl", "c.relowner", "'c'",
dopt->binary_upgrade);
appendPQExpBuffer(query,
/*
* If the table-level and all column-level ACLs for this table are
- * unchanged, then we don't need to worry about including the ACLs
- * for this table. If any column-level ACLs have been changed, the
+ * unchanged, then we don't need to worry about including the ACLs for
+ * this table. If any column-level ACLs have been changed, the
* 'changed_acl' column from the query will indicate that.
*
* This can result in a significant performance improvement in cases
* NOTE: it'd be kinda nice to lock other relations too, not only
* plain tables, but the backend doesn't presently allow that.
*
- * We only need to lock the table for certain components; see pg_dump.h
+ * We only need to lock the table for certain components; see
+ * pg_dump.h
*/
if (tblinfo[i].dobj.dump && tblinfo[i].relkind == RELKIND_RELATION &&
- (tblinfo[i].dobj.dump & DUMP_COMPONENTS_REQUIRING_LOCK))
+ (tblinfo[i].dobj.dump & DUMP_COMPONENTS_REQUIRING_LOCK))
{
resetPQExpBuffer(query);
appendPQExpBuffer(query,
"FROM pg_foreign_data_wrapper f "
"LEFT JOIN pg_init_privs pip ON "
"(f.oid = pip.objoid "
- "AND pip.classoid = 'pg_foreign_data_wrapper'::regclass "
+ "AND pip.classoid = 'pg_foreign_data_wrapper'::regclass "
"AND pip.objsubid = 0) ",
username_subquery,
acl_subquery->data,
if (fout->remoteVersion >= 90600)
{
appendPQExpBuffer(query, "SELECT aggtransfn, "
- "aggfinalfn, aggtranstype::pg_catalog.regtype, "
- "aggcombinefn, aggserialfn, aggdeserialfn, aggmtransfn, "
- "aggminvtransfn, aggmfinalfn, aggmtranstype::pg_catalog.regtype, "
- "aggfinalextra, aggmfinalextra, "
- "aggsortop::pg_catalog.regoperator, "
- "aggserialtype::pg_catalog.regtype, "
- "(aggkind = 'h') AS hypothetical, "
- "aggtransspace, agginitval, "
- "aggmtransspace, aggminitval, "
- "true AS convertok, "
- "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, "
- "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs, "
- "p.proparallel "
- "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
- "WHERE a.aggfnoid = p.oid "
- "AND p.oid = '%u'::pg_catalog.oid",
- agginfo->aggfn.dobj.catId.oid);
+ "aggfinalfn, aggtranstype::pg_catalog.regtype, "
+ "aggcombinefn, aggserialfn, aggdeserialfn, aggmtransfn, "
+ "aggminvtransfn, aggmfinalfn, aggmtranstype::pg_catalog.regtype, "
+ "aggfinalextra, aggmfinalextra, "
+ "aggsortop::pg_catalog.regoperator, "
+ "aggserialtype::pg_catalog.regtype, "
+ "(aggkind = 'h') AS hypothetical, "
+ "aggtransspace, agginitval, "
+ "aggmtransspace, aggminitval, "
+ "true AS convertok, "
+ "pg_catalog.pg_get_function_arguments(p.oid) AS funcargs, "
+ "pg_catalog.pg_get_function_identity_arguments(p.oid) AS funciargs, "
+ "p.proparallel "
+ "FROM pg_catalog.pg_aggregate a, pg_catalog.pg_proc p "
+ "WHERE a.aggfnoid = p.oid "
+ "AND p.oid = '%u'::pg_catalog.oid",
+ agginfo->aggfn.dobj.catId.oid);
}
else if (fout->remoteVersion >= 90400)
{
appendPQExpBuffer(query, "SELECT aggtransfn, "
"aggfinalfn, aggtranstype::pg_catalog.regtype, "
"'-' AS aggcombinefn, '-' AS aggserialfn, "
- "'-' AS aggdeserialfn, aggmtransfn, aggminvtransfn, "
+ "'-' AS aggdeserialfn, aggmtransfn, aggminvtransfn, "
"aggmfinalfn, aggmtranstype::pg_catalog.regtype, "
"aggfinalextra, aggmfinalextra, "
"aggsortop::pg_catalog.regoperator, "
if (strcmp(aggcombinefn, "-") != 0)
{
- appendPQExpBuffer(details, ",\n COMBINEFUNC = %s", aggcombinefn);
+ appendPQExpBuffer(details, ",\n COMBINEFUNC = %s", aggcombinefn);
}
/*
*/
if (strcmp(aggserialfn, "-") != 0)
{
- appendPQExpBuffer(details, ",\n SERIALFUNC = %s", aggserialfn);
- appendPQExpBuffer(details, ",\n DESERIALFUNC = %s", aggdeserialfn);
- appendPQExpBuffer(details, ",\n SERIALTYPE = %s", aggserialtype);
+ appendPQExpBuffer(details, ",\n SERIALFUNC = %s", aggserialfn);
+ appendPQExpBuffer(details, ",\n DESERIALFUNC = %s", aggdeserialfn);
+ appendPQExpBuffer(details, ",\n SERIALTYPE = %s", aggserialtype);
}
if (strcmp(aggmtransfn, "-") != 0)
dumpSecLabel(fout, labelq->data,
agginfo->aggfn.dobj.namespace->dobj.name,
agginfo->aggfn.rolname,
- agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
+ agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
/*
* Since there is no GRANT ON AGGREGATE syntax, we have to make the ACL
"JOIN pg_catalog.pg_class c ON (at.attrelid = c.oid) "
"LEFT JOIN pg_catalog.pg_init_privs pip ON "
"(at.attrelid = pip.objoid "
- "AND pip.classoid = 'pg_catalog.pg_class'::pg_catalog.regclass "
+ "AND pip.classoid = 'pg_catalog.pg_class'::pg_catalog.regclass "
"AND at.attnum = pip.objsubid) "
"WHERE at.attrelid = '%u'::pg_catalog.oid AND "
"NOT at.attisdropped "
"SELECT attname, attacl, NULL as rattacl, "
"NULL AS initattacl, NULL AS initrattacl "
"FROM pg_catalog.pg_attribute "
- "WHERE attrelid = '%u'::pg_catalog.oid AND NOT attisdropped "
+ "WHERE attrelid = '%u'::pg_catalog.oid AND NOT attisdropped "
"AND attacl IS NOT NULL "
"ORDER BY attnum",
tbinfo->dobj.catId.oid);
if (typeInfo->shellType)
{
addObjectDependency(funcobj, typeInfo->shellType->dobj.dumpId);
+
/*
- * Mark shell type (always including the definition, as we need
- * the shell type defined to identify the function fully) as to be
- * dumped if any such function is
+ * Mark shell type (always including the definition, as we need the
+ * shell type defined to identify the function fully) as to be dumped
+ * if any such function is
*/
if (funcobj->dump)
typeInfo->shellType->dobj.dump = funcobj->dump |
- DUMP_COMPONENT_DEFINITION;
+ DUMP_COMPONENT_DEFINITION;
}
}
auth_oid = atooid(PQgetvalue(res, i, i_oid));
rolename = PQgetvalue(res, i, i_rolname);
- if (strncmp(rolename,"pg_",3) == 0)
+ if (strncmp(rolename, "pg_", 3) == 0)
{
fprintf(stderr, _("%s: role name starting with \"pg_\" skipped (%s)\n"),
progname, rolename);
"LEFT JOIN pg_authid ur on ur.oid = a.roleid "
"LEFT JOIN pg_authid um on um.oid = a.member "
"LEFT JOIN pg_authid ug on ug.oid = a.grantor "
- "WHERE NOT (ur.rolname ~ '^pg_' AND um.rolname ~ '^pg_')"
+ "WHERE NOT (ur.rolname ~ '^pg_' AND um.rolname ~ '^pg_')"
"ORDER BY 1,2,3");
if (PQntuples(res) > 0)
return;
/*
- * Pretend that pg_xlog is a directory, even if it's really a symlink.
- * We don't want to mess with the symlink itself, nor complain if it's a
+ * Pretend that pg_xlog is a directory, even if it's really a symlink. We
+ * don't want to mess with the symlink itself, nor complain if it's a
* symlink in source but not in target or vice versa.
*/
if (strcmp(path, "pg_xlog") == 0 && type == FILE_TYPE_SYMLINK)
if (PQgetisnull(res, 0, 2))
{
pg_log(PG_DEBUG,
- "received null value for chunk for file \"%s\", file has been deleted\n",
+ "received null value for chunk for file \"%s\", file has been deleted\n",
filename);
pg_free(filename);
PQclear(res);
/*
* Since incomplete segments are copied into next timelines, switch to
- * the timeline holding the required segment. Assuming this scan can be
- * done both forward and backward, consider also switching timeline
+ * the timeline holding the required segment. Assuming this scan can
+ * be done both forward and backward, consider also switching timeline
* accordingly.
*/
while (private->tliIndex < targetNentries - 1 &&
- targetHistory[private->tliIndex].end < targetSegEnd)
+ targetHistory[private->tliIndex].end < targetSegEnd)
private->tliIndex++;
while (private->tliIndex > 0 &&
- targetHistory[private->tliIndex].begin >= targetSegEnd)
+ targetHistory[private->tliIndex].begin >= targetSegEnd)
private->tliIndex--;
XLogFileName(xlogfname, targetHistory[private->tliIndex].tli, xlogreadsegno);
/* Target history */
TimeLineHistoryEntry *targetHistory;
-int targetNentries;
+int targetNentries;
static void
usage(const char *progname)
targetHistory[lastcommontliIndex].tli);
/*
- * Check for the possibility that the target is in fact a direct ancestor
- * of the source. In that case, there is no divergent history in the
- * target that needs rewinding.
+ * Check for the possibility that the target is in fact a direct
+ * ancestor of the source. In that case, there is no divergent history
+ * in the target that needs rewinding.
*/
if (ControlFile_target.checkPoint >= divergerec)
{
/*
* If the histories diverged exactly at the end of the shutdown
- * checkpoint record on the target, there are no WAL records in the
- * target that don't belong in the source's history, and no rewind is
- * needed.
+ * checkpoint record on the target, there are no WAL records in
+ * the target that don't belong in the source's history, and no
+ * rewind is needed.
*/
if (chkptendrec == divergerec)
rewind_needed = false;
static TimeLineHistoryEntry *
getTimelineHistory(ControlFileData *controlFile, int *nentries)
{
- TimeLineHistoryEntry *history;
- TimeLineID tli;
+ TimeLineHistoryEntry *history;
+ TimeLineID tli;
tli = controlFile->checkPointCopy.ThisTimeLineID;
/*
- * Timeline 1 does not have a history file, so there is no need to check and
- * fake an entry with infinite start and end positions.
+ * Timeline 1 does not have a history file, so there is no need to check
+ * and fake an entry with infinite start and end positions.
*/
if (tli == 1)
{
if (debug)
{
- int i;
+ int i;
if (controlFile == &ControlFile_source)
pg_log(PG_DEBUG, "Source timeline history:\n");
{
TimeLineHistoryEntry *sourceHistory;
int sourceNentries;
- int i, n;
+ int i,
+ n;
/* Retrieve timelines for both source and target */
sourceHistory = getTimelineHistory(&ControlFile_source, &sourceNentries);
static void
syncTargetDirectory(const char *argv0)
{
- int ret;
+ int ret;
#define MAXCMDLEN (2 * MAXPGPATH)
- char exec_path[MAXPGPATH];
- char cmd[MAXCMDLEN];
+ char exec_path[MAXPGPATH];
+ char cmd[MAXCMDLEN];
/* locate initdb binary */
if ((ret = find_other_exec(argv0, "initdb",
"initdb (PostgreSQL) " PG_VERSION "\n",
exec_path)) < 0)
{
- char full_path[MAXPGPATH];
+ char full_path[MAXPGPATH];
if (find_my_exec(argv0, full_path) < 0)
strlcpy(full_path, progname, sizeof(full_path));
/* Target history */
extern TimeLineHistoryEntry *targetHistory;
-extern int targetNentries;
+extern int targetNentries;
/* in parsexlog.c */
extern void extractPageMap(const char *datadir, XLogRecPtr startpoint,
deletion_script_file_name);
else
pg_log(PG_REPORT,
- "Could not create a script to delete the old cluster's data files\n"
- "because user-defined tablespaces or the new cluster's data directory\n"
- "exist in the old cluster directory. The old cluster's contents must\n"
- "be deleted manually.\n");
+ "Could not create a script to delete the old cluster's data files\n"
+ "because user-defined tablespaces or the new cluster's data directory\n"
+ "exist in the old cluster directory. The old cluster's contents must\n"
+ "be deleted manually.\n");
}
{
FILE *script = NULL;
int tblnum;
- char old_cluster_pgdata[MAXPGPATH], new_cluster_pgdata[MAXPGPATH];
+ char old_cluster_pgdata[MAXPGPATH],
+ new_cluster_pgdata[MAXPGPATH];
*deletion_script_file_name = psprintf("%sdelete_old_cluster.%s",
SCRIPT_PREFIX, SCRIPT_EXT);
if (path_is_prefix_of_path(old_cluster_pgdata, new_cluster_pgdata))
{
pg_log(PG_WARNING,
- "\nWARNING: new data directory should not be inside the old data directory, e.g. %s\n", old_cluster_pgdata);
+ "\nWARNING: new data directory should not be inside the old data directory, e.g. %s\n", old_cluster_pgdata);
/* Unlink file in case it is left over from a previous run. */
unlink(*deletion_script_file_name);
/*
* Delimiter changed from '/' to ':' in 9.6. We don't test for
* the catalog version of the change because the catalog version
- * is pulled from pg_controldata too, and it isn't worth adding
- * an order dependency for this --- we just check the string.
+ * is pulled from pg_controldata too, and it isn't worth adding an
+ * order dependency for this --- we just check the string.
*/
if (strchr(p, '/') != NULL)
p = strchr(p, '/');
copyFile(const char *src, const char *dst)
{
#ifndef WIN32
- if (copy_file(src, dst) == -1)
+ if (copy_file(src, dst) == -1)
#else
- if (CopyFile(src, dst, true) == 0)
+ if (CopyFile(src, dst, true) == 0)
#endif
- return getErrorText();
- else
- return NULL;
+ return getErrorText();
+ else
+ return NULL;
}
/*
* We must update databases where datallowconn = false, e.g.
* template0, because autovacuum increments their datfrozenxids,
- * relfrozenxids, and relminmxid even if autovacuum is turned off,
- * and even though all the data rows are already frozen. To enable
- * this, we temporarily change datallowconn.
+ * relfrozenxids, and relminmxid even if autovacuum is turned off, and
+ * even though all the data rows are already frozen. To enable this,
+ * we temporarily change datallowconn.
*/
if (strcmp(datallowconn, "f") == 0)
PQclear(executeQueryOrDie(conn_template1,
else
report_status(PG_FATAL,
"cannot stat() tablespace directory \"%s\": %s\n",
- os_info.old_tablespaces[tblnum], getErrorText());
+ os_info.old_tablespaces[tblnum], getErrorText());
}
if (!S_ISDIR(statBuf.st_mode))
report_status(PG_FATAL,
setIntValue(&var->num_value, strtoint64(var->value));
var->is_numeric = true;
}
- else /* type should be double */
+ else /* type should be double */
{
- double dv;
- char xs;
+ double dv;
+ char xs;
if (sscanf(var->value, "%lf%c", &dv, &xs) != 1)
{
}
else
{
- double dval = pval->u.dval;
+ double dval = pval->u.dval;
+
Assert(pval->type == PGBT_DOUBLE);
if (dval < PG_INT64_MIN || PG_INT64_MAX < dval)
{
PgBenchFunction func, PgBenchExprLink *args, PgBenchValue *retval)
{
/* evaluate all function arguments */
- int nargs = 0;
- PgBenchValue vargs[MAX_FARGS];
+ int nargs = 0;
+ PgBenchValue vargs[MAX_FARGS];
PgBenchExprLink *l = args;
for (nargs = 0; nargs < MAX_FARGS && l != NULL; nargs++, l = l->next)
/* then evaluate function */
switch (func)
{
- /* overloaded operators */
+ /* overloaded operators */
case PGBENCH_ADD:
case PGBENCH_SUB:
case PGBENCH_MUL:
case PGBENCH_DIV:
case PGBENCH_MOD:
{
- PgBenchValue *lval = &vargs[0],
- *rval = &vargs[1];
+ PgBenchValue *lval = &vargs[0],
+ *rval = &vargs[1];
+
Assert(nargs == 2);
/* overloaded type management, double if some double */
if ((lval->type == PGBT_DOUBLE ||
rval->type == PGBT_DOUBLE) && func != PGBENCH_MOD)
{
- double ld, rd;
+ double ld,
+ rd;
if (!coerceToDouble(lval, &ld) ||
!coerceToDouble(rval, &rd))
Assert(0);
}
}
- else /* we have integer operands, or % */
+ else /* we have integer operands, or % */
{
- int64 li, ri;
+ int64 li,
+ ri;
if (!coerceToInt(lval, &li) ||
!coerceToInt(rval, &ri))
return false;
}
else
- setIntValue(retval, - li);
+ setIntValue(retval, -li);
}
else
setIntValue(retval, 0);
/* else divisor is not -1 */
if (func == PGBENCH_DIV)
setIntValue(retval, li / ri);
- else /* func == PGBENCH_MOD */
+ else /* func == PGBENCH_MOD */
setIntValue(retval, li % ri);
return true;
}
}
- /* no arguments */
+ /* no arguments */
case PGBENCH_PI:
setDoubleValue(retval, M_PI);
return true;
- /* 1 overloaded argument */
+ /* 1 overloaded argument */
case PGBENCH_ABS:
{
PgBenchValue *varg = &vargs[0];
+
Assert(nargs == 1);
if (varg->type == PGBT_INT)
{
- int64 i = varg->u.ival;
+ int64 i = varg->u.ival;
+
setIntValue(retval, i < 0 ? -i : i);
}
else
{
- double d = varg->u.dval;
+ double d = varg->u.dval;
+
Assert(varg->type == PGBT_DOUBLE);
- setDoubleValue(retval, d < 0.0 ? -d: d);
+ setDoubleValue(retval, d < 0.0 ? -d : d);
}
return true;
case PGBENCH_DEBUG:
{
PgBenchValue *varg = &vargs[0];
+
Assert(nargs == 1);
- fprintf(stderr, "debug(script=%d,command=%d): ",
- st->use_file, st->state+1);
+ fprintf(stderr, "debug(script=%d,command=%d): ",
+ st->use_file, st->state + 1);
if (varg->type == PGBT_INT)
- fprintf(stderr, "int "INT64_FORMAT"\n", varg->u.ival);
+ fprintf(stderr, "int " INT64_FORMAT "\n", varg->u.ival);
else
{
Assert(varg->type == PGBT_DOUBLE);
return true;
}
- /* 1 double argument */
+ /* 1 double argument */
case PGBENCH_DOUBLE:
case PGBENCH_SQRT:
{
- double dval;
+ double dval;
+
Assert(nargs == 1);
if (!coerceToDouble(&vargs[0], &dval))
return true;
}
- /* 1 int argument */
+ /* 1 int argument */
case PGBENCH_INT:
{
- int64 ival;
+ int64 ival;
+
Assert(nargs == 1);
if (!coerceToInt(&vargs[0], &ival))
return true;
}
- /* variable number of arguments */
+ /* variable number of arguments */
case PGBENCH_LEAST:
case PGBENCH_GREATEST:
{
return true;
}
- /* random functions */
+ /* random functions */
case PGBENCH_RANDOM:
case PGBENCH_RANDOM_EXPONENTIAL:
case PGBENCH_RANDOM_GAUSSIAN:
- {
- int64 imin, imax;
- Assert(nargs >= 2);
-
- if (!coerceToInt(&vargs[0], &imin) ||
- !coerceToInt(&vargs[1], &imax))
- return false;
-
- /* check random range */
- if (imin > imax)
{
- fprintf(stderr, "empty range given to random\n");
- return false;
- }
- else if (imax - imin < 0 || (imax - imin) + 1 < 0)
- {
- /* prevent int overflows in random functions */
- fprintf(stderr, "random range is too large\n");
- return false;
- }
+ int64 imin,
+ imax;
- if (func == PGBENCH_RANDOM)
- {
- Assert(nargs == 2);
- setIntValue(retval, getrand(thread, imin, imax));
- }
- else /* gaussian & exponential */
- {
- double param;
- Assert(nargs == 3);
+ Assert(nargs >= 2);
- if (!coerceToDouble(&vargs[2], ¶m))
+ if (!coerceToInt(&vargs[0], &imin) ||
+ !coerceToInt(&vargs[1], &imax))
return false;
- if (func == PGBENCH_RANDOM_GAUSSIAN)
+ /* check random range */
+ if (imin > imax)
{
- if (param < MIN_GAUSSIAN_PARAM)
- {
- fprintf(stderr,
- "gaussian parameter must be at least %f "
- "(not %f)\n", MIN_GAUSSIAN_PARAM, param);
- return false;
- }
+ fprintf(stderr, "empty range given to random\n");
+ return false;
+ }
+ else if (imax - imin < 0 || (imax - imin) + 1 < 0)
+ {
+ /* prevent int overflows in random functions */
+ fprintf(stderr, "random range is too large\n");
+ return false;
+ }
- setIntValue(retval,
- getGaussianRand(thread, imin, imax, param));
+ if (func == PGBENCH_RANDOM)
+ {
+ Assert(nargs == 2);
+ setIntValue(retval, getrand(thread, imin, imax));
}
- else /* exponential */
+ else /* gaussian & exponential */
{
- if (param <= 0.0)
- {
- fprintf(stderr,
- "exponential parameter must be greater than zero"
- " (got %f)\n", param);
+ double param;
+
+ Assert(nargs == 3);
+
+ if (!coerceToDouble(&vargs[2], ¶m))
return false;
+
+ if (func == PGBENCH_RANDOM_GAUSSIAN)
+ {
+ if (param < MIN_GAUSSIAN_PARAM)
+ {
+ fprintf(stderr,
+ "gaussian parameter must be at least %f "
+ "(not %f)\n", MIN_GAUSSIAN_PARAM, param);
+ return false;
+ }
+
+ setIntValue(retval,
+ getGaussianRand(thread, imin, imax, param));
}
+ else /* exponential */
+ {
+ if (param <= 0.0)
+ {
+ fprintf(stderr,
+ "exponential parameter must be greater than zero"
+ " (got %f)\n", param);
+ return false;
+ }
- setIntValue(retval,
- getExponentialRand(thread, imin, imax, param));
+ setIntValue(retval,
+ getExponentialRand(thread, imin, imax, param));
+ }
}
- }
- return true;
- }
+ return true;
+ }
default:
/* cannot get here */
if (pg_strcasecmp(argv[0], "set") == 0)
{
PgBenchExpr *expr = commands[st->state]->expr;
- PgBenchValue result;
+ PgBenchValue result;
if (!evaluateExpr(thread, st, expr, &result))
{
doLog(TState *thread, CState *st, instr_time *now,
StatsData *agg, bool skipped, double latency, double lag)
{
- FILE *logfile = thread->logfile;
+ FILE *logfile = thread->logfile;
Assert(use_log);
if (var->is_numeric)
{
if (!putVariableNumber(&state[i], "startup",
- var->name, &var->num_value))
+ var->name, &var->num_value))
exit(1);
}
else
thread->random_state[0] = random();
thread->random_state[1] = random();
thread->random_state[2] = random();
- thread->logfile = NULL; /* filled in later */
+ thread->logfile = NULL; /* filled in later */
thread->latency_late = 0;
initStats(&thread->stats, 0.0);
/* compute when to stop */
if (duration > 0)
end_time = INSTR_TIME_GET_MICROSEC(thread->start_time) +
- (int64) 1000000 * duration;
+ (int64) 1000000 *duration;
/* the first thread (i = 0) is executed by main thread */
if (i > 0)
/* compute when to stop */
if (duration > 0)
end_time = INSTR_TIME_GET_MICROSEC(threads[0].start_time) +
- (int64) 1000000 * duration;
+ (int64) 1000000 *duration;
threads[0].thread = INVALID_THREAD;
#endif /* ENABLE_THREAD_SAFETY */
PgBenchValueType type;
union
{
- int64 ival;
- double dval;
+ int64 ival;
+ double dval;
/* add other types here */
- } u;
+ } u;
} PgBenchValue;
/* Types of expression nodes */
PgBenchExprType etype;
union
{
- PgBenchValue constant;
+ PgBenchValue constant;
struct
{
char *varname;
static bool printCrosstab(const PGresult *results,
- int num_columns, pivot_field *piv_columns, int field_for_columns,
+ int num_columns, pivot_field *piv_columns, int field_for_columns,
int num_rows, pivot_field *piv_rows, int field_for_rows,
int field_for_data);
static void avlInit(avl_tree *tree);
*/
static bool
printCrosstab(const PGresult *results,
- int num_columns, pivot_field *piv_columns, int field_for_columns,
+ int num_columns, pivot_field *piv_columns, int field_for_columns,
int num_rows, pivot_field *piv_rows, int field_for_rows,
int field_for_data)
{
if (cont.cells[idx] != NULL)
{
psql_error("\\crosstabview: query result contains multiple data values for row \"%s\", column \"%s\"\n",
- piv_rows[row_number].name ? piv_rows[row_number].name :
- popt.nullPrint ? popt.nullPrint : "(null)",
- piv_columns[col_number].name ? piv_columns[col_number].name :
- popt.nullPrint ? popt.nullPrint : "(null)");
+ piv_rows[row_number].name ? piv_rows[row_number].name :
+ popt.nullPrint ? popt.nullPrint : "(null)",
+ piv_columns[col_number].name ? piv_columns[col_number].name :
+ popt.nullPrint ? popt.nullPrint : "(null)");
goto error;
}
if (cmp != 0)
{
avlInsertNode(tree,
- cmp > 0 ? ¤t->children[1] : ¤t->children[0],
+ cmp > 0 ? ¤t->children[1] : ¤t->children[0],
field);
avlAdjustBalance(tree, node);
}
fprintf(output, _(" -f, --file=FILENAME execute commands from file, then exit\n"));
fprintf(output, _(" -l, --list list available databases, then exit\n"));
fprintf(output, _(" -v, --set=, --variable=NAME=VALUE\n"
- " set psql variable NAME to VALUE\n"
- " (e.g., -v ON_ERROR_STOP=1)\n"));
+ " set psql variable NAME to VALUE\n"
+ " (e.g., -v ON_ERROR_STOP=1)\n"));
fprintf(output, _(" -V, --version output version information, then exit\n"));
fprintf(output, _(" -X, --no-psqlrc do not read startup file (~/.psqlrc)\n"));
fprintf(output, _(" -1 (\"one\"), --single-transaction\n"
case 'p':
if (pset.db)
{
- int pid = PQbackendPID(pset.db);
+ int pid = PQbackendPID(pset.db);
+
if (pid)
snprintf(buf, sizeof(buf), "%d", pid);
}
/* ALTER SERVER <name> */
else if (Matches3("ALTER", "SERVER", MatchAny))
COMPLETE_WITH_LIST4("VERSION", "OPTIONS", "OWNER TO", "RENAME TO");
- /* ALTER SERVER <name> VERSION <version>*/
+ /* ALTER SERVER <name> VERSION <version> */
else if (Matches5("ALTER", "SERVER", MatchAny, "VERSION", MatchAny))
COMPLETE_WITH_CONST("OPTIONS");
/* ALTER SYSTEM SET, RESET, RESET ALL */
/* First off we complete CREATE UNIQUE with "INDEX" */
else if (TailMatches2("CREATE", "UNIQUE"))
COMPLETE_WITH_CONST("INDEX");
- /* If we have CREATE|UNIQUE INDEX, then add "ON", "CONCURRENTLY",
- and existing indexes */
+
+ /*
+ * If we have CREATE|UNIQUE INDEX, then add "ON", "CONCURRENTLY", and
+ * existing indexes
+ */
else if (TailMatches2("CREATE|UNIQUE", "INDEX"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes,
" UNION SELECT 'ON'"
else if (TailMatches3("INDEX|CONCURRENTLY", MatchAny, "ON") ||
TailMatches2("INDEX|CONCURRENTLY", "ON"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL);
- /* Complete CREATE|UNIQUE INDEX CONCURRENTLY with "ON" and existing indexes */
+
+ /*
+ * Complete CREATE|UNIQUE INDEX CONCURRENTLY with "ON" and existing
+ * indexes
+ */
else if (TailMatches3("CREATE|UNIQUE", "INDEX", "CONCURRENTLY"))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes,
" UNION SELECT 'ON'");
static int list_index,
byte_length;
static PGresult *result = NULL;
+
/*
* If this is the first time for this completion, we fetch a list of our
* "things" from the backend.
list_index = 0;
byte_length = strlen(text);
- /* Count length as number of characters (not bytes), for passing to substring */
+ /*
+ * Count length as number of characters (not bytes), for passing to
+ * substring
+ */
while (*pstr)
{
char_length++;
ConfigData *
get_configdata(const char *my_exec_path, size_t *configdata_len)
{
- ConfigData *configdata;
- char path[MAXPGPATH];
- char *lastsep;
- int i = 0;
+ ConfigData *configdata;
+ char path[MAXPGPATH];
+ char *lastsep;
+ int i = 0;
/* Adjust this to match the number of items filled below */
*configdata_len = 23;
ControlFileData *
get_controlfile(char *DataDir, const char *progname)
{
- ControlFileData *ControlFile;
- int fd;
- char ControlFilePath[MAXPGPATH];
- pg_crc32c crc;
+ ControlFileData *ControlFile;
+ int fd;
+ char ControlFilePath[MAXPGPATH];
+ pg_crc32c crc;
ControlFile = palloc(sizeof(ControlFileData));
snprintf(ControlFilePath, MAXPGPATH, "%s/global/pg_control", DataDir);
#ifndef FRONTEND
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for reading: %m",
- ControlFilePath)));
+ errmsg("could not open file \"%s\" for reading: %m",
+ ControlFilePath)));
#else
{
fprintf(stderr, _("%s: could not open file \"%s\" for reading: %s\n"),
#ifndef FRONTEND
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read file \"%s\": %m", ControlFilePath)));
+ errmsg("could not read file \"%s\": %m", ControlFilePath)));
#else
{
fprintf(stderr, _("%s: could not read file \"%s\": %s\n"),
/* Check the CRC. */
INIT_CRC32C(crc);
COMP_CRC32C(crc,
- (char *) ControlFile,
- offsetof(ControlFileData, crc));
+ (char *) ControlFile,
+ offsetof(ControlFileData, crc));
FIN_CRC32C(crc);
if (!EQ_CRC32C(crc, ControlFile->crc))
extern bool _bt_first(IndexScanDesc scan, ScanDirection dir);
extern bool _bt_next(IndexScanDesc scan, ScanDirection dir);
extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost,
- Snapshot snapshot);
+ Snapshot snapshot);
/*
* prototypes for functions in nbtutils.c
{
TimeLineID tli;
XLogRecPtr begin; /* inclusive */
- XLogRecPtr end; /* exclusive, InvalidXLogRecPtr means
- * infinity */
+ XLogRecPtr end; /* exclusive, InvalidXLogRecPtr means infinity */
} TimeLineHistoryEntry;
extern List *readTimeLineHistory(TimeLineID targetTLI);
/* Flags for bit map */
#define VISIBILITYMAP_ALL_VISIBLE 0x01
#define VISIBILITYMAP_ALL_FROZEN 0x02
-#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid visiblitymap flags bits */
+#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid
+ * visiblitymap flags bits */
/* Macros for visibilitymap test */
#define VM_ALL_VISIBLE(r, b, v) \
Buffer *vmbuf);
extern bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf);
extern void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
- XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
- uint8 flags);
+ XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
+ uint8 flags);
extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf);
extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen);
extern void visibilitymap_truncate(Relation rel, BlockNumber nheapblocks);
* Starting/stopping a base backup
*/
extern XLogRecPtr do_pg_start_backup(const char *backupidstr, bool fast,
- TimeLineID *starttli_p, StringInfo labelfile, DIR *tblspcdir,
- List **tablespaces, StringInfo tblspcmapfile, bool infotbssize,
+ TimeLineID *starttli_p, StringInfo labelfile, DIR *tblspcdir,
+ List **tablespaces, StringInfo tblspcmapfile, bool infotbssize,
bool needtblspcmapfile);
extern XLogRecPtr do_pg_stop_backup(char *labelfile, bool waitforarchive,
TimeLineID *stoptli_p);
#define MAXFNAMELEN 64
/* Length of XLog file name */
-#define XLOG_FNAME_LEN 24
+#define XLOG_FNAME_LEN 24
#define XLogFileName(fname, tli, logSegNo) \
snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, \
LOCKMODE lockmode, bool missing_ok);
extern ObjectAddress get_object_address_rv(ObjectType objtype, RangeVar *rel,
- List *objname, List *objargs, Relation *relp,
- LOCKMODE lockmode, bool missing_ok);
+ List *objname, List *objargs, Relation *relp,
+ LOCKMODE lockmode, bool missing_ok);
extern void check_object_ownership(Oid roleid,
ObjectType objtype, ObjectAddress address,
DATA(insert ( 2100 n 0 int8_avg_accum numeric_poly_avg int8_avg_combine int8_avg_serialize int8_avg_deserialize int8_avg_accum int8_avg_accum_inv numeric_poly_avg f f 0 2281 17 48 2281 48 _null_ _null_ ));
DATA(insert ( 2101 n 0 int4_avg_accum int8_avg int4_avg_combine - - int4_avg_accum int4_avg_accum_inv int8_avg f f 0 1016 0 0 1016 0 "{0,0}" "{0,0}" ));
DATA(insert ( 2102 n 0 int2_avg_accum int8_avg int4_avg_combine - - int2_avg_accum int2_avg_accum_inv int8_avg f f 0 1016 0 0 1016 0 "{0,0}" "{0,0}" ));
-DATA(insert ( 2103 n 0 numeric_avg_accum numeric_avg numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_avg f f 0 2281 17 128 2281 128 _null_ _null_ ));
+DATA(insert ( 2103 n 0 numeric_avg_accum numeric_avg numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_avg f f 0 2281 17 128 2281 128 _null_ _null_ ));
DATA(insert ( 2104 n 0 float4_accum float8_avg float8_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2105 n 0 float8_accum float8_avg float8_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2106 n 0 interval_accum interval_avg interval_combine - - interval_accum interval_accum_inv interval_avg f f 0 1187 0 0 1187 0 "{0 second,0 second}" "{0 second,0 second}" ));
DATA(insert ( 2111 n 0 float8pl - float8pl - - - - - f f 0 701 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2112 n 0 cash_pl - cash_pl - - cash_pl cash_mi - f f 0 790 0 0 790 0 _null_ _null_ ));
DATA(insert ( 2113 n 0 interval_pl - interval_pl - - interval_pl interval_mi - f f 0 1186 0 0 1186 0 _null_ _null_ ));
-DATA(insert ( 2114 n 0 numeric_avg_accum numeric_sum numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_sum f f 0 2281 17 128 2281 128 _null_ _null_ ));
+DATA(insert ( 2114 n 0 numeric_avg_accum numeric_sum numeric_avg_combine numeric_avg_serialize numeric_avg_deserialize numeric_avg_accum numeric_accum_inv numeric_sum f f 0 2281 17 128 2281 128 _null_ _null_ ));
/* max */
DATA(insert ( 2115 n 0 int8larger - int8larger - - - - - f f 413 20 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2140 n 0 timetz_smaller - timetz_smaller - - - - - f f 1552 1266 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2141 n 0 cashsmaller - cashsmaller - - - - - f f 902 790 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2142 n 0 timestamp_smaller - timestamp_smaller - - - - - f f 2062 1114 0 0 0 0 _null_ _null_ ));
-DATA(insert ( 2143 n 0 timestamptz_smaller - timestamptz_smaller - - - - - f f 1322 1184 0 0 0 0 _null_ _null_ ));
+DATA(insert ( 2143 n 0 timestamptz_smaller - timestamptz_smaller - - - - - f f 1322 1184 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2144 n 0 interval_smaller - interval_smaller - - - - - f f 1332 1186 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2145 n 0 text_smaller - text_smaller - - - - - f f 664 25 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2146 n 0 numeric_smaller - numeric_smaller - - - - - f f 1754 1700 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2153 n 0 numeric_accum numeric_var_samp numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_var_samp f f 0 2281 17 128 2281 128 _null_ _null_ ));
/* stddev_pop */
-DATA(insert ( 2724 n 0 int8_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_pop f f 0 2281 17 128 2281 128 _null_ _null_ ));
-DATA(insert ( 2725 n 0 int4_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 2281 17 48 2281 48 _null_ _null_ ));
-DATA(insert ( 2726 n 0 int2_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 2281 17 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2724 n 0 int8_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize int8_accum int8_accum_inv numeric_stddev_pop f f 0 2281 17 128 2281 128 _null_ _null_ ));
+DATA(insert ( 2725 n 0 int4_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int4_accum int4_accum_inv numeric_poly_stddev_pop f f 0 2281 17 48 2281 48 _null_ _null_ ));
+DATA(insert ( 2726 n 0 int2_accum numeric_poly_stddev_pop numeric_poly_combine numeric_poly_serialize numeric_poly_deserialize int2_accum int2_accum_inv numeric_poly_stddev_pop f f 0 2281 17 48 2281 48 _null_ _null_ ));
DATA(insert ( 2727 n 0 float4_accum float8_stddev_pop float8_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2728 n 0 float8_accum float8_stddev_pop float8_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0}" _null_ ));
DATA(insert ( 2729 n 0 numeric_accum numeric_stddev_pop numeric_combine numeric_serialize numeric_deserialize numeric_accum numeric_accum_inv numeric_stddev_pop f f 0 2281 17 128 2281 128 _null_ _null_ ));
/* SQL2003 binary regression aggregates */
DATA(insert ( 2818 n 0 int8inc_float8_float8 - int8pl - - - - - f f 0 20 0 0 0 0 "0" _null_ ));
-DATA(insert ( 2819 n 0 float8_regr_accum float8_regr_sxx float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2820 n 0 float8_regr_accum float8_regr_syy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2821 n 0 float8_regr_accum float8_regr_sxy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2822 n 0 float8_regr_accum float8_regr_avgx float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2823 n 0 float8_regr_accum float8_regr_avgy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2824 n 0 float8_regr_accum float8_regr_r2 float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2825 n 0 float8_regr_accum float8_regr_slope float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2826 n 0 float8_regr_accum float8_regr_intercept float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2827 n 0 float8_regr_accum float8_covar_pop float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2828 n 0 float8_regr_accum float8_covar_samp float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
-DATA(insert ( 2829 n 0 float8_regr_accum float8_corr float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2819 n 0 float8_regr_accum float8_regr_sxx float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2820 n 0 float8_regr_accum float8_regr_syy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2821 n 0 float8_regr_accum float8_regr_sxy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2822 n 0 float8_regr_accum float8_regr_avgx float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2823 n 0 float8_regr_accum float8_regr_avgy float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2824 n 0 float8_regr_accum float8_regr_r2 float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2825 n 0 float8_regr_accum float8_regr_slope float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2826 n 0 float8_regr_accum float8_regr_intercept float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2827 n 0 float8_regr_accum float8_covar_pop float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2828 n 0 float8_regr_accum float8_covar_samp float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
+DATA(insert ( 2829 n 0 float8_regr_accum float8_corr float8_regr_combine - - - - - f f 0 1022 0 0 0 0 "{0,0,0,0,0,0}" _null_ ));
/* boolean-and and boolean-or */
DATA(insert ( 2517 n 0 booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 0 0 2281 16 _null_ _null_ ));
DATA(insert ( 2519 n 0 booland_statefunc - booland_statefunc - - bool_accum bool_accum_inv bool_alltrue f f 58 16 0 0 2281 16 _null_ _null_ ));
/* bitwise integer */
-DATA(insert ( 2236 n 0 int2and - int2and - - - - - f f 0 21 0 0 0 0 _null_ _null_ ));
+DATA(insert ( 2236 n 0 int2and - int2and - - - - - f f 0 21 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2237 n 0 int2or - int2or - - - - - f f 0 21 0 0 0 0 _null_ _null_ ));
-DATA(insert ( 2238 n 0 int4and - int4and - - - - - f f 0 23 0 0 0 0 _null_ _null_ ));
+DATA(insert ( 2238 n 0 int4and - int4and - - - - - f f 0 23 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2239 n 0 int4or - int4or - - - - - f f 0 23 0 0 0 0 _null_ _null_ ));
-DATA(insert ( 2240 n 0 int8and - int8and - - - - - f f 0 20 0 0 0 0 _null_ _null_ ));
+DATA(insert ( 2240 n 0 int8and - int8and - - - - - f f 0 20 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2241 n 0 int8or - int8or - - - - - f f 0 20 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2242 n 0 bitand - bitand - - - - - f f 0 1560 0 0 0 0 _null_ _null_ ));
DATA(insert ( 2243 n 0 bitor - bitor - - - - - f f 0 1560 0 0 0 0 _null_ _null_ ));
/* array */
DATA(insert ( 2335 n 0 array_agg_transfn array_agg_finalfn - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ ));
-DATA(insert ( 4053 n 0 array_agg_array_transfn array_agg_array_finalfn - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ ));
+DATA(insert ( 4053 n 0 array_agg_array_transfn array_agg_array_finalfn - - - - - - t f 0 2281 0 0 0 0 _null_ _null_ ));
/* text */
DATA(insert ( 3538 n 0 string_agg_transfn string_agg_finalfn - - - - - - f f 0 2281 0 0 0 0 _null_ _null_ ));
bool relhastriggers; /* has (or has had) any TRIGGERs */
bool relhassubclass; /* has (or has had) derived classes */
bool relrowsecurity; /* row security is enabled or not */
- bool relforcerowsecurity; /* row security forced for owners or not */
+ bool relforcerowsecurity; /* row security forced for owners or
+ * not */
bool relispopulated; /* matview currently holds query results */
char relreplident; /* see REPLICA_IDENTITY_xxx constants */
TransactionId relfrozenxid; /* all Xids < this are frozen in this rel */
/*
* Oldest XID still running. This is only needed to initialize hot standby
* mode from an online checkpoint, so we only bother calculating this for
- * online checkpoints and only when wal_level is replica. Otherwise
- * it's set to InvalidTransactionId.
+ * online checkpoints and only when wal_level is replica. Otherwise it's
+ * set to InvalidTransactionId.
*/
TransactionId oldestActiveXid;
} CheckPoint;
DATA(insert ( 405 range_ops PGNSP PGUID 3903 3831 t 0 ));
DATA(insert ( 783 range_ops PGNSP PGUID 3919 3831 t 0 ));
DATA(insert ( 4000 range_ops PGNSP PGUID 3474 3831 t 0 ));
-DATA(insert ( 4000 box_ops PGNSP PGUID 5000 603 t 0 ));
+DATA(insert ( 4000 box_ops PGNSP PGUID 5000 603 t 0 ));
DATA(insert ( 4000 quad_point_ops PGNSP PGUID 4015 600 t 0 ));
DATA(insert ( 4000 kd_point_ops PGNSP PGUID 4016 600 f 0 ));
DATA(insert ( 4000 text_ops PGNSP PGUID 4017 25 t 0 ));
DATA(insert OID = 375 ( "||" PGNSP PGUID b f f 2277 2277 2277 0 0 array_cat - - ));
DESCR("concatenate");
-DATA(insert OID = 352 ( "=" PGNSP PGUID b f t 28 28 16 352 3315 xideq eqsel eqjoinsel ));
+DATA(insert OID = 352 ( "=" PGNSP PGUID b f t 28 28 16 352 3315 xideq eqsel eqjoinsel ));
DESCR("equal");
DATA(insert OID = 353 ( "=" PGNSP PGUID b f f 28 23 16 0 3316 xideqint4 eqsel eqjoinsel ));
DESCR("equal");
DATA(insert OID = 3681 ( "||" PGNSP PGUID b f f 3615 3615 3615 0 0 tsquery_or - - ));
DESCR("OR-concatenate");
/* <-> operation calls tsquery_phrase, but function is polymorphic. So, point to OID of the tsquery_phrase */
-DATA(insert OID = 5005 ( "<->" PGNSP PGUID b f f 3615 3615 3615 0 0 5003 - - ));
+DATA(insert OID = 5005 ( "<->" PGNSP PGUID b f f 3615 3615 3615 0 0 5003 - - ));
DESCR("phrase-concatenate");
DATA(insert OID = 3682 ( "!!" PGNSP PGUID l f f 0 3615 3615 0 0 tsquery_not - - ));
DESCR("NOT tsquery");
DESCR("restriction selectivity of < and related operators on scalar datatypes");
DATA(insert OID = 104 ( scalargtsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ scalargtsel _null_ _null_ _null_ ));
DESCR("restriction selectivity of > and related operators on scalar datatypes");
-DATA(insert OID = 105 ( eqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ eqjoinsel _null_ _null_ _null_ ));
+DATA(insert OID = 105 ( eqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ eqjoinsel _null_ _null_ _null_ ));
DESCR("join selectivity of = and related operators");
-DATA(insert OID = 106 ( neqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ neqjoinsel _null_ _null_ _null_ ));
+DATA(insert OID = 106 ( neqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ neqjoinsel _null_ _null_ _null_ ));
DESCR("join selectivity of <> and related operators");
-DATA(insert OID = 107 ( scalarltjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalarltjoinsel _null_ _null_ _null_ ));
+DATA(insert OID = 107 ( scalarltjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalarltjoinsel _null_ _null_ _null_ ));
DESCR("join selectivity of < and related operators on scalar datatypes");
-DATA(insert OID = 108 ( scalargtjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalargtjoinsel _null_ _null_ _null_ ));
+DATA(insert OID = 108 ( scalargtjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ scalargtjoinsel _null_ _null_ _null_ ));
DESCR("join selectivity of > and related operators on scalar datatypes");
DATA(insert OID = 109 ( unknownin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 705 "2275" _null_ _null_ _null_ _null_ _null_ unknownin _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 110 ( unknownout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "705" _null_ _null_ _null_ _null_ _null_ unknownout _null_ _null_ _null_ ));
+DATA(insert OID = 110 ( unknownout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "705" _null_ _null_ _null_ _null_ _null_ unknownout _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 111 ( numeric_fac PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ numeric_fac _null_ _null_ _null_ ));
DATA(insert OID = 115 ( box_above_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_above_eq _null_ _null_ _null_ ));
DATA(insert OID = 116 ( box_below_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_below_eq _null_ _null_ _null_ ));
-DATA(insert OID = 117 ( point_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2275" _null_ _null_ _null_ _null_ _null_ point_in _null_ _null_ _null_ ));
+DATA(insert OID = 117 ( point_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2275" _null_ _null_ _null_ _null_ _null_ point_in _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 118 ( point_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "600" _null_ _null_ _null_ _null_ _null_ point_out _null_ _null_ _null_ ));
+DATA(insert OID = 118 ( point_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "600" _null_ _null_ _null_ _null_ _null_ point_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 119 ( lseg_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2275" _null_ _null_ _null_ _null_ _null_ lseg_in _null_ _null_ _null_ ));
+DATA(insert OID = 119 ( lseg_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2275" _null_ _null_ _null_ _null_ _null_ lseg_in _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 120 ( lseg_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "601" _null_ _null_ _null_ _null_ _null_ lseg_out _null_ _null_ _null_ ));
+DATA(insert OID = 120 ( lseg_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "601" _null_ _null_ _null_ _null_ _null_ lseg_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 121 ( path_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2275" _null_ _null_ _null_ _null_ _null_ path_in _null_ _null_ _null_ ));
+DATA(insert OID = 121 ( path_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2275" _null_ _null_ _null_ _null_ _null_ path_in _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 122 ( path_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "602" _null_ _null_ _null_ _null_ _null_ path_out _null_ _null_ _null_ ));
+DATA(insert OID = 122 ( path_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "602" _null_ _null_ _null_ _null_ _null_ path_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 123 ( box_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2275" _null_ _null_ _null_ _null_ _null_ box_in _null_ _null_ _null_ ));
+DATA(insert OID = 123 ( box_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2275" _null_ _null_ _null_ _null_ _null_ box_in _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 124 ( box_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "603" _null_ _null_ _null_ _null_ _null_ box_out _null_ _null_ _null_ ));
+DATA(insert OID = 124 ( box_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "603" _null_ _null_ _null_ _null_ _null_ box_out _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 125 ( box_overlap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_overlap _null_ _null_ _null_ ));
DATA(insert OID = 126 ( box_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "603 603" _null_ _null_ _null_ _null_ _null_ box_ge _null_ _null_ _null_ ));
DATA(insert OID = 138 ( box_center PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "603" _null_ _null_ _null_ _null_ _null_ box_center _null_ _null_ _null_ ));
DATA(insert OID = 139 ( areasel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ areasel _null_ _null_ _null_ ));
DESCR("restriction selectivity for area-comparison operators");
-DATA(insert OID = 140 ( areajoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ areajoinsel _null_ _null_ _null_ ));
+DATA(insert OID = 140 ( areajoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ areajoinsel _null_ _null_ _null_ ));
DESCR("join selectivity for area-comparison operators");
DATA(insert OID = 141 ( int4mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "23 23" _null_ _null_ _null_ _null_ _null_ int4mul _null_ _null_ _null_ ));
DATA(insert OID = 144 ( int4ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "23 23" _null_ _null_ _null_ _null_ _null_ int4ne _null_ _null_ _null_ ));
/* OIDS 200 - 299 */
-DATA(insert OID = 200 ( float4in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2275" _null_ _null_ _null_ _null_ _null_ float4in _null_ _null_ _null_ ));
+DATA(insert OID = 200 ( float4in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2275" _null_ _null_ _null_ _null_ _null_ float4in _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 201 ( float4out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "700" _null_ _null_ _null_ _null_ _null_ float4out _null_ _null_ _null_ ));
+DATA(insert OID = 201 ( float4out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "700" _null_ _null_ _null_ _null_ _null_ float4out _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 202 ( float4mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4mul _null_ _null_ _null_ ));
DATA(insert OID = 203 ( float4div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 700 "700 700" _null_ _null_ _null_ _null_ _null_ float4div _null_ _null_ _null_ ));
DATA(insert OID = 212 ( int4um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "23" _null_ _null_ _null_ _null_ _null_ int4um _null_ _null_ _null_ ));
DATA(insert OID = 213 ( int2um PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 21 "21" _null_ _null_ _null_ _null_ _null_ int2um _null_ _null_ _null_ ));
-DATA(insert OID = 214 ( float8in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2275" _null_ _null_ _null_ _null_ _null_ float8in _null_ _null_ _null_ ));
+DATA(insert OID = 214 ( float8in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2275" _null_ _null_ _null_ _null_ _null_ float8in _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 215 ( float8out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "701" _null_ _null_ _null_ _null_ _null_ float8out _null_ _null_ _null_ ));
+DATA(insert OID = 215 ( float8out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "701" _null_ _null_ _null_ _null_ _null_ float8out _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 216 ( float8mul PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8mul _null_ _null_ _null_ ));
DATA(insert OID = 217 ( float8div PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ float8div _null_ _null_ _null_ ));
DESCR("convert float4 to int2");
DATA(insert OID = 239 ( line_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "628 628" _null_ _null_ _null_ _null_ _null_ line_distance _null_ _null_ _null_ ));
-DATA(insert OID = 240 ( abstimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 702 "2275" _null_ _null_ _null_ _null_ _null_ abstimein _null_ _null_ _null_ ));
+DATA(insert OID = 240 ( abstimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 702 "2275" _null_ _null_ _null_ _null_ _null_ abstimein _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 241 ( abstimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "702" _null_ _null_ _null_ _null_ _null_ abstimeout _null_ _null_ _null_ ));
+DATA(insert OID = 241 ( abstimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "702" _null_ _null_ _null_ _null_ _null_ abstimeout _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 242 ( reltimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 703 "2275" _null_ _null_ _null_ _null_ _null_ reltimein _null_ _null_ _null_ ));
+DATA(insert OID = 242 ( reltimein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 703 "2275" _null_ _null_ _null_ _null_ _null_ reltimein _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 243 ( reltimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "703" _null_ _null_ _null_ _null_ _null_ reltimeout _null_ _null_ _null_ ));
+DATA(insert OID = 243 ( reltimeout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "703" _null_ _null_ _null_ _null_ _null_ reltimeout _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 244 ( timepl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 702 "702 703" _null_ _null_ _null_ _null_ _null_ timepl _null_ _null_ _null_ ));
DATA(insert OID = 245 ( timemi PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 702 "702 703" _null_ _null_ _null_ _null_ _null_ timemi _null_ _null_ _null_ ));
-DATA(insert OID = 246 ( tintervalin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 704 "2275" _null_ _null_ _null_ _null_ _null_ tintervalin _null_ _null_ _null_ ));
+DATA(insert OID = 246 ( tintervalin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 704 "2275" _null_ _null_ _null_ _null_ _null_ tintervalin _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 247 ( tintervalout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "704" _null_ _null_ _null_ _null_ _null_ tintervalout _null_ _null_ _null_ ));
+DATA(insert OID = 247 ( tintervalout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "704" _null_ _null_ _null_ _null_ _null_ tintervalout _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 248 ( intinterval PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "702 704" _null_ _null_ _null_ _null_ _null_ intinterval _null_ _null_ _null_ ));
DATA(insert OID = 249 ( tintervalrel PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 703 "704" _null_ _null_ _null_ _null_ _null_ tintervalrel _null_ _null_ _null_ ));
DESCR("tinterval to reltime");
-DATA(insert OID = 250 ( timenow PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 702 "" _null_ _null_ _null_ _null_ _null_ timenow _null_ _null_ _null_ ));
+DATA(insert OID = 250 ( timenow PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 702 "" _null_ _null_ _null_ _null_ _null_ timenow _null_ _null_ _null_ ));
DESCR("current date and time (abstime)");
DATA(insert OID = 251 ( abstimeeq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimeeq _null_ _null_ _null_ ));
DATA(insert OID = 252 ( abstimene PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "702 702" _null_ _null_ _null_ _null_ _null_ abstimene _null_ _null_ _null_ ));
DATA(insert OID = 344 ( poly_right PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_right _null_ _null_ _null_ ));
DATA(insert OID = 345 ( poly_contained PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_contained _null_ _null_ _null_ ));
DATA(insert OID = 346 ( poly_overlap PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "604 604" _null_ _null_ _null_ _null_ _null_ poly_overlap _null_ _null_ _null_ ));
-DATA(insert OID = 347 ( poly_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2275" _null_ _null_ _null_ _null_ _null_ poly_in _null_ _null_ _null_ ));
+DATA(insert OID = 347 ( poly_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2275" _null_ _null_ _null_ _null_ _null_ poly_in _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 348 ( poly_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "604" _null_ _null_ _null_ _null_ _null_ poly_out _null_ _null_ _null_ ));
+DATA(insert OID = 348 ( poly_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "604" _null_ _null_ _null_ _null_ _null_ poly_out _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 350 ( btint2cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "21 21" _null_ _null_ _null_ _null_ _null_ btint2cmp _null_ _null_ _null_ ));
DATA(insert OID = 3818 ( arraycontjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ arraycontjoinsel _null_ _null_ _null_ ));
DESCR("join selectivity for array-containment operators");
-DATA(insert OID = 760 ( smgrin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 210 "2275" _null_ _null_ _null_ _null_ _null_ smgrin _null_ _null_ _null_ ));
+DATA(insert OID = 760 ( smgrin PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 210 "2275" _null_ _null_ _null_ _null_ _null_ smgrin _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 761 ( smgrout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "210" _null_ _null_ _null_ _null_ _null_ smgrout _null_ _null_ _null_ ));
+DATA(insert OID = 761 ( smgrout PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "210" _null_ _null_ _null_ _null_ _null_ smgrout _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 762 ( smgreq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "210 210" _null_ _null_ _null_ _null_ _null_ smgreq _null_ _null_ _null_ ));
DESCR("storage manager");
DATA(insert OID = 764 ( lo_import PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 26 "25" _null_ _null_ _null_ _null_ _null_ lo_import _null_ _null_ _null_ ));
DESCR("large object import");
-DATA(insert OID = 767 ( lo_import PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 26 "25 26" _null_ _null_ _null_ _null_ _null_ lo_import_with_oid _null_ _null_ _null_ ));
+DATA(insert OID = 767 ( lo_import PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 26 "25 26" _null_ _null_ _null_ _null_ _null_ lo_import_with_oid _null_ _null_ _null_ ));
DESCR("large object import");
DATA(insert OID = 765 ( lo_export PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 23 "26 25" _null_ _null_ _null_ _null_ _null_ lo_export _null_ _null_ _null_ ));
DESCR("large object export");
DATA(insert OID = 861 ( current_database PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ current_database _null_ _null_ _null_ ));
DESCR("name of the current database");
-DATA(insert OID = 817 ( current_query PGNSP PGUID 12 1 0 0 0 f f f f f f v r 0 0 25 "" _null_ _null_ _null_ _null_ _null_ current_query _null_ _null_ _null_ ));
+DATA(insert OID = 817 ( current_query PGNSP PGUID 12 1 0 0 0 f f f f f f v r 0 0 25 "" _null_ _null_ _null_ _null_ _null_ current_query _null_ _null_ _null_ ));
DESCR("get the currently executing query");
DATA(insert OID = 862 ( int4_mul_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "23 790" _null_ _null_ _null_ _null_ _null_ int4_mul_cash _null_ _null_ _null_ ));
DATA(insert OID = 866 ( cash_mul_int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 21" _null_ _null_ _null_ _null_ _null_ cash_mul_int2 _null_ _null_ _null_ ));
DATA(insert OID = 867 ( cash_div_int2 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 790 "790 21" _null_ _null_ _null_ _null_ _null_ cash_div_int2 _null_ _null_ _null_ ));
-DATA(insert OID = 886 ( cash_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "2275" _null_ _null_ _null_ _null_ _null_ cash_in _null_ _null_ _null_ ));
+DATA(insert OID = 886 ( cash_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "2275" _null_ _null_ _null_ _null_ _null_ cash_in _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 887 ( cash_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "790" _null_ _null_ _null_ _null_ _null_ cash_out _null_ _null_ _null_ ));
+DATA(insert OID = 887 ( cash_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "790" _null_ _null_ _null_ _null_ _null_ cash_out _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 888 ( cash_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_eq _null_ _null_ _null_ ));
DATA(insert OID = 889 ( cash_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "790 790" _null_ _null_ _null_ _null_ _null_ cash_ne _null_ _null_ _null_ ));
DATA(insert OID = 935 ( cash_words PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "790" _null_ _null_ _null_ _null_ _null_ cash_words _null_ _null_ _null_ ));
DESCR("output money amount as words");
DATA(insert OID = 3822 ( cash_div_cash PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "790 790" _null_ _null_ _null_ _null_ _null_ cash_div_cash _null_ _null_ _null_ ));
-DATA(insert OID = 3823 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1700 "790" _null_ _null_ _null_ _null_ _null_ cash_numeric _null_ _null_ _null_ ));
+DATA(insert OID = 3823 ( numeric PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1700 "790" _null_ _null_ _null_ _null_ _null_ cash_numeric _null_ _null_ _null_ ));
DESCR("convert money to numeric");
-DATA(insert OID = 3824 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "1700" _null_ _null_ _null_ _null_ _null_ numeric_cash _null_ _null_ _null_ ));
+DATA(insert OID = 3824 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "1700" _null_ _null_ _null_ _null_ _null_ numeric_cash _null_ _null_ _null_ ));
DESCR("convert numeric to money");
DATA(insert OID = 3811 ( money PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 790 "23" _null_ _null_ _null_ _null_ _null_ int4_cash _null_ _null_ _null_ ));
DESCR("convert int4 to money");
DESCR("I/O");
DATA(insert OID = 1144 ( time_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1083" _null_ _null_ _null_ _null_ _null_ time_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2909 ( timetypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetypmodin _null_ _null_ _null_ ));
+DATA(insert OID = 2909 ( timetypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetypmodin _null_ _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2910 ( timetypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetypmodout _null_ _null_ _null_ ));
+DATA(insert OID = 2910 ( timetypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetypmodout _null_ _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1145 ( time_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_eq _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 1151 ( timestamptz_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2907 ( timestamptztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptztypmodin _null_ _null_ _null_ ));
+DATA(insert OID = 2907 ( timestamptztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptztypmodin _null_ _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2908 ( timestamptztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptztypmodout _null_ _null_ _null_ ));
+DATA(insert OID = 2908 ( timestamptztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptztypmodout _null_ _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1152 ( timestamptz_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_eq _null_ _null_ _null_ ));
DATA(insert OID = 1153 ( timestamptz_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_ne _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 1161 ( interval_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1186" _null_ _null_ _null_ _null_ _null_ interval_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2903 ( intervaltypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ intervaltypmodin _null_ _null_ _null_ ));
+DATA(insert OID = 2903 ( intervaltypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ intervaltypmodin _null_ _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2904 ( intervaltypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ intervaltypmodout _null_ _null_ _null_ ));
+DATA(insert OID = 2904 ( intervaltypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ intervaltypmodout _null_ _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1162 ( interval_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_eq _null_ _null_ _null_ ));
DATA(insert OID = 1163 ( interval_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1186 1186" _null_ _null_ _null_ _null_ _null_ interval_ne _null_ _null_ _null_ ));
DESCR("convert timestamp with time zone to date");
DATA(insert OID = 1179 ( date PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1082 "702" _null_ _null_ _null_ _null_ _null_ abstime_date _null_ _null_ _null_ ));
DESCR("convert abstime to date");
-DATA(insert OID = 1180 ( abstime PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_abstime _null_ _null_ _null_ ));
+DATA(insert OID = 1180 ( abstime PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "1184" _null_ _null_ _null_ _null_ _null_ timestamptz_abstime _null_ _null_ _null_ ));
DESCR("convert timestamp with time zone to abstime");
DATA(insert OID = 1181 ( age PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 23 "28" _null_ _null_ _null_ _null_ _null_ xid_age _null_ _null_ _null_ ));
DESCR("age of a transaction ID, in transactions before current transaction");
DATA(insert OID = 1300 ( positionsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ positionsel _null_ _null_ _null_ ));
DESCR("restriction selectivity for position-comparison operators");
-DATA(insert OID = 1301 ( positionjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ positionjoinsel _null_ _null_ _null_ ));
+DATA(insert OID = 1301 ( positionjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ positionjoinsel _null_ _null_ _null_ ));
DESCR("join selectivity for position-comparison operators");
DATA(insert OID = 1302 ( contsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ contsel _null_ _null_ _null_ ));
DESCR("restriction selectivity for containment comparison operators");
-DATA(insert OID = 1303 ( contjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ contjoinsel _null_ _null_ _null_ ));
+DATA(insert OID = 1303 ( contjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ contjoinsel _null_ _null_ _null_ ));
DESCR("join selectivity for containment comparison operators");
DATA(insert OID = 1304 ( overlaps PGNSP PGUID 12 1 0 0 0 f f f f f f i s 4 0 16 "1184 1184 1184 1184" _null_ _null_ _null_ _null_ _null_ overlaps_timestamp _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 1313 ( timestamp_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "1114" _null_ _null_ _null_ _null_ _null_ timestamp_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2905 ( timestamptypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptypmodin _null_ _null_ _null_ ));
+DATA(insert OID = 2905 ( timestamptypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timestamptypmodin _null_ _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2906 ( timestamptypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptypmodout _null_ _null_ _null_ ));
+DATA(insert OID = 2906 ( timestamptypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timestamptypmodout _null_ _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1314 ( timestamptz_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1184 1184" _null_ _null_ _null_ _null_ _null_ timestamp_cmp _null_ _null_ _null_ ));
DESCR("less-equal-greater");
DESCR("I/O");
DATA(insert OID = 1351 ( timetz_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1266" _null_ _null_ _null_ _null_ _null_ timetz_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2911 ( timetztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetztypmodin _null_ _null_ _null_ ));
+DATA(insert OID = 2911 ( timetztypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ timetztypmodin _null_ _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2912 ( timetztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetztypmodout _null_ _null_ _null_ ));
+DATA(insert OID = 2912 ( timetztypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ timetztypmodout _null_ _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1352 ( timetz_eq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_eq _null_ _null_ _null_ ));
DATA(insert OID = 1353 ( timetz_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1266 1266" _null_ _null_ _null_ _null_ _null_ timetz_ne _null_ _null_ _null_ ));
DATA(insert OID = 1364 ( time PGNSP PGUID 14 1 0 0 0 f f f f t f s s 1 0 1083 "702" _null_ _null_ _null_ _null_ _null_ "select cast(cast($1 as timestamp without time zone) as pg_catalog.time)" _null_ _null_ _null_ ));
DESCR("convert abstime to time");
-DATA(insert OID = 1367 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ bpcharlen _null_ _null_ _null_ ));
+DATA(insert OID = 1367 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1042" _null_ _null_ _null_ _null_ _null_ bpcharlen _null_ _null_ _null_ ));
DESCR("character length");
DATA(insert OID = 1369 ( character_length PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "25" _null_ _null_ _null_ _null_ _null_ textlen _null_ _null_ _null_ ));
DESCR("character length");
DATA(insert OID = 1402 ( current_schema PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 19 "" _null_ _null_ _null_ _null_ _null_ current_schema _null_ _null_ _null_ ));
DESCR("current schema name");
-DATA(insert OID = 1403 ( current_schemas PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1003 "16" _null_ _null_ _null_ _null_ _null_ current_schemas _null_ _null_ _null_ ));
+DATA(insert OID = 1403 ( current_schemas PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1003 "16" _null_ _null_ _null_ _null_ _null_ current_schemas _null_ _null_ _null_ ));
DESCR("current schema search list");
DATA(insert OID = 1404 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 25 "25 25 23 23" _null_ _null_ _null_ _null_ _null_ textoverlay _null_ _null_ _null_ ));
DESCR("substitute portion of string");
-DATA(insert OID = 1405 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 23" _null_ _null_ _null_ _null_ _null_ textoverlay_no_len _null_ _null_ _null_ ));
+DATA(insert OID = 1405 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 23" _null_ _null_ _null_ _null_ _null_ textoverlay_no_len _null_ _null_ _null_ ));
DESCR("substitute portion of string");
DATA(insert OID = 1406 ( isvertical PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "600 600" _null_ _null_ _null_ _null_ _null_ point_vert _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 1565 ( bit_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1560" _null_ _null_ _null_ _null_ _null_ bit_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2919 ( bittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ bittypmodin _null_ _null_ _null_ ));
+DATA(insert OID = 2919 ( bittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ bittypmodin _null_ _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2920 ( bittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ bittypmodout _null_ _null_ _null_ ));
+DATA(insert OID = 2920 ( bittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ bittypmodout _null_ _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1569 ( like PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ textlike _null_ _null_ _null_ ));
/* SEQUENCE functions */
-DATA(insert OID = 1574 ( nextval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ nextval_oid _null_ _null_ _null_ ));
+DATA(insert OID = 1574 ( nextval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ nextval_oid _null_ _null_ _null_ ));
DESCR("sequence next value");
-DATA(insert OID = 1575 ( currval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ currval_oid _null_ _null_ _null_ ));
+DATA(insert OID = 1575 ( currval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ currval_oid _null_ _null_ _null_ ));
DESCR("sequence current value");
DATA(insert OID = 1576 ( setval PGNSP PGUID 12 1 0 0 0 f f f f t f v u 2 0 20 "2205 20" _null_ _null_ _null_ _null_ _null_ setval_oid _null_ _null_ _null_ ));
DESCR("set sequence value");
DESCR("I/O");
DATA(insert OID = 1580 ( varbit_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1562" _null_ _null_ _null_ _null_ _null_ varbit_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2902 ( varbittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ varbittypmodin _null_ _null_ _null_ ));
+DATA(insert OID = 2902 ( varbittypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ varbittypmodin _null_ _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2921 ( varbittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ varbittypmodout _null_ _null_ _null_ ));
+DATA(insert OID = 2921 ( varbittypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ varbittypmodout _null_ _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 1581 ( biteq PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "1560 1560" _null_ _null_ _null_ _null_ _null_ biteq _null_ _null_ _null_ ));
DESCR("find all match groups for regexp");
DATA(insert OID = 2088 ( split_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 25 "25 25 23" _null_ _null_ _null_ _null_ _null_ split_text _null_ _null_ _null_ ));
DESCR("split string by field_sep and return field_num");
-DATA(insert OID = 2765 ( regexp_split_to_table PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_table_no_flags _null_ _null_ _null_ ));
+DATA(insert OID = 2765 ( regexp_split_to_table PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 2 0 25 "25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_table_no_flags _null_ _null_ _null_ ));
DESCR("split string by pattern");
DATA(insert OID = 2766 ( regexp_split_to_table PGNSP PGUID 12 1 1000 0 0 f f f f t t i s 3 0 25 "25 25 25" _null_ _null_ _null_ _null_ _null_ regexp_split_to_table _null_ _null_ _null_ ));
DESCR("split string by pattern");
DATA(insert OID = 2289 ( pg_options_to_table PGNSP PGUID 12 1 3 0 0 f f f f t t s s 1 0 2249 "1009" "{1009,25,25}" "{i,o,o}" "{options_array,option_name,option_value}" _null_ _null_ pg_options_to_table _null_ _null_ _null_ ));
DESCR("convert generic options array to name/value table");
-DATA(insert OID = 1619 ( pg_typeof PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 2206 "2276" _null_ _null_ _null_ _null_ _null_ pg_typeof _null_ _null_ _null_ ));
+DATA(insert OID = 1619 ( pg_typeof PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 2206 "2276" _null_ _null_ _null_ _null_ _null_ pg_typeof _null_ _null_ _null_ ));
DESCR("type of the argument");
-DATA(insert OID = 3162 ( pg_collation_for PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "2276" _null_ _null_ _null_ _null_ _null_ pg_collation_for _null_ _null_ _null_ ));
+DATA(insert OID = 3162 ( pg_collation_for PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "2276" _null_ _null_ _null_ _null_ _null_ pg_collation_for _null_ _null_ _null_ ));
DESCR("collation of the argument; implementation of the COLLATION FOR expression");
DATA(insert OID = 3842 ( pg_relation_is_updatable PGNSP PGUID 12 10 0 0 0 f f f f t f s s 2 0 23 "2205 16" _null_ _null_ _null_ _null_ _null_ pg_relation_is_updatable _null_ _null_ _null_ ));
DATA(insert OID = 1699 ( substring PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1560 "1560 23" _null_ _null_ _null_ _null_ _null_ bitsubstr_no_len _null_ _null_ _null_ ));
DESCR("extract portion of bitstring");
-DATA(insert OID = 3030 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 1560 "1560 1560 23 23" _null_ _null_ _null_ _null_ _null_ bitoverlay _null_ _null_ _null_ ));
+DATA(insert OID = 3030 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 1560 "1560 1560 23 23" _null_ _null_ _null_ _null_ _null_ bitoverlay _null_ _null_ _null_ ));
DESCR("substitute portion of bitstring");
DATA(insert OID = 3031 ( overlay PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1560 "1560 1560 23" _null_ _null_ _null_ _null_ _null_ bitoverlay_no_len _null_ _null_ _null_ ));
DESCR("substitute portion of bitstring");
DATA(insert OID = 835 ( macaddr_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_ne _null_ _null_ _null_ ));
DATA(insert OID = 836 ( macaddr_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_cmp _null_ _null_ _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 3144 ( macaddr_not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "829" _null_ _null_ _null_ _null_ _null_ macaddr_not _null_ _null_ _null_ ));
-DATA(insert OID = 3145 ( macaddr_and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_and _null_ _null_ _null_ ));
-DATA(insert OID = 3146 ( macaddr_or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_or _null_ _null_ _null_ ));
+DATA(insert OID = 3144 ( macaddr_not PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "829" _null_ _null_ _null_ _null_ _null_ macaddr_not _null_ _null_ _null_ ));
+DATA(insert OID = 3145 ( macaddr_and PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_and _null_ _null_ _null_ ));
+DATA(insert OID = 3146 ( macaddr_or PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 829 "829 829" _null_ _null_ _null_ _null_ _null_ macaddr_or _null_ _null_ _null_ ));
/* for inet type support */
DATA(insert OID = 910 ( inet_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "2275" _null_ _null_ _null_ _null_ _null_ inet_in _null_ _null_ _null_ ));
DATA(insert OID = 923 ( network_gt PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_gt _null_ _null_ _null_ ));
DATA(insert OID = 924 ( network_ge PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_ge _null_ _null_ _null_ ));
DATA(insert OID = 925 ( network_ne PGNSP PGUID 12 1 0 0 0 f f f t t f i s 2 0 16 "869 869" _null_ _null_ _null_ _null_ _null_ network_ne _null_ _null_ _null_ ));
-DATA(insert OID = 3562 ( network_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_larger _null_ _null_ _null_ ));
+DATA(insert OID = 3562 ( network_larger PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_larger _null_ _null_ _null_ ));
DESCR("larger of two");
-DATA(insert OID = 3563 ( network_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_smaller _null_ _null_ _null_ ));
+DATA(insert OID = 3563 ( network_smaller PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ network_smaller _null_ _null_ _null_ ));
DESCR("smaller of two");
DATA(insert OID = 926 ( network_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "869 869" _null_ _null_ _null_ _null_ _null_ network_cmp _null_ _null_ _null_ ));
DESCR("less-equal-greater");
DESCR("show all parts of inet/cidr value");
DATA(insert OID = 1362 ( hostmask PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ network_hostmask _null_ _null_ _null_ ));
DESCR("hostmask of address");
-DATA(insert OID = 1715 ( cidr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "869" _null_ _null_ _null_ _null_ _null_ inet_to_cidr _null_ _null_ _null_ ));
+DATA(insert OID = 1715 ( cidr PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "869" _null_ _null_ _null_ _null_ _null_ inet_to_cidr _null_ _null_ _null_ ));
DESCR("convert inet to cidr");
DATA(insert OID = 2196 ( inet_client_addr PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 869 "" _null_ _null_ _null_ _null_ _null_ inet_client_addr _null_ _null_ _null_ ));
DESCR("inet address of the client");
-DATA(insert OID = 2197 ( inet_client_port PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_client_port _null_ _null_ _null_ ));
+DATA(insert OID = 2197 ( inet_client_port PGNSP PGUID 12 1 0 0 0 f f f f f f s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_client_port _null_ _null_ _null_ ));
DESCR("client's port number for this connection");
DATA(insert OID = 2198 ( inet_server_addr PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 869 "" _null_ _null_ _null_ _null_ _null_ inet_server_addr _null_ _null_ _null_ ));
DESCR("inet address of the server");
-DATA(insert OID = 2199 ( inet_server_port PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_server_port _null_ _null_ _null_ ));
+DATA(insert OID = 2199 ( inet_server_port PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 23 "" _null_ _null_ _null_ _null_ _null_ inet_server_port _null_ _null_ _null_ ));
DESCR("server's port number for this connection");
-DATA(insert OID = 2627 ( inetnot PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ inetnot _null_ _null_ _null_ ));
-DATA(insert OID = 2628 ( inetand PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetand _null_ _null_ _null_ ));
-DATA(insert OID = 2629 ( inetor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetor _null_ _null_ _null_ ));
+DATA(insert OID = 2627 ( inetnot PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "869" _null_ _null_ _null_ _null_ _null_ inetnot _null_ _null_ _null_ ));
+DATA(insert OID = 2628 ( inetand PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetand _null_ _null_ _null_ ));
+DATA(insert OID = 2629 ( inetor PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 869" _null_ _null_ _null_ _null_ _null_ inetor _null_ _null_ _null_ ));
DATA(insert OID = 2630 ( inetpl PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 20" _null_ _null_ _null_ _null_ _null_ inetpl _null_ _null_ _null_ ));
DATA(insert OID = 2631 ( int8pl_inet PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 869 "20 869" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
DATA(insert OID = 2632 ( inetmi_int8 PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 869 "869 20" _null_ _null_ _null_ _null_ _null_ inetmi_int8 _null_ _null_ _null_ ));
/* Selectivity estimation for inet and cidr */
DATA(insert OID = 3560 ( networksel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ networksel _null_ _null_ _null_ ));
DESCR("restriction selectivity for network operators");
-DATA(insert OID = 3561 ( networkjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ networkjoinsel _null_ _null_ _null_ ));
+DATA(insert OID = 3561 ( networkjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ networkjoinsel _null_ _null_ _null_ ));
DESCR("join selectivity for network operators");
DATA(insert OID = 1690 ( time_mi_time PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1186 "1083 1083" _null_ _null_ _null_ _null_ _null_ time_mi_time _null_ _null_ _null_ ));
/* OID's 1700 - 1799 NUMERIC data type */
-DATA(insert OID = 1701 ( numeric_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1700 "2275 26 23" _null_ _null_ _null_ _null_ _null_ numeric_in _null_ _null_ _null_ ));
+DATA(insert OID = 1701 ( numeric_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 1700 "2275 26 23" _null_ _null_ _null_ _null_ _null_ numeric_in _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 1702 ( numeric_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "1700" _null_ _null_ _null_ _null_ _null_ numeric_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2917 ( numerictypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ numerictypmodin _null_ _null_ _null_ ));
+DATA(insert OID = 2917 ( numerictypmodin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 23 "1263" _null_ _null_ _null_ _null_ _null_ numerictypmodin _null_ _null_ _null_ ));
DESCR("I/O typmod");
-DATA(insert OID = 2918 ( numerictypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ numerictypmodout _null_ _null_ _null_ ));
+DATA(insert OID = 2918 ( numerictypmodout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "23" _null_ _null_ _null_ _null_ _null_ numerictypmodout _null_ _null_ _null_ ));
DESCR("I/O typmod");
DATA(insert OID = 3157 ( numeric_transform PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ numeric_transform _null_ _null_ _null_ ));
DESCR("transform a numeric length coercion");
DATA(insert OID = 1799 ( oidout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "26" _null_ _null_ _null_ _null_ _null_ oidout _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 3058 ( concat PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 25 "2276" "{2276}" "{v}" _null_ _null_ _null_ text_concat _null_ _null_ _null_ ));
+DATA(insert OID = 3058 ( concat PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 25 "2276" "{2276}" "{v}" _null_ _null_ _null_ text_concat _null_ _null_ _null_ ));
DESCR("concatenate values");
-DATA(insert OID = 3059 ( concat_ws PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_concat_ws _null_ _null_ _null_ ));
+DATA(insert OID = 3059 ( concat_ws PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_concat_ws _null_ _null_ _null_ ));
DESCR("concatenate values with separators");
DATA(insert OID = 3060 ( left PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ text_left _null_ _null_ _null_ ));
DESCR("extract the first n characters");
DATA(insert OID = 3061 ( right PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 25 "25 23" _null_ _null_ _null_ _null_ _null_ text_right _null_ _null_ _null_ ));
DESCR("extract the last n characters");
-DATA(insert OID = 3062 ( reverse PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_reverse _null_ _null_ _null_ ));
+DATA(insert OID = 3062 ( reverse PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_reverse _null_ _null_ _null_ ));
DESCR("reverse text");
-DATA(insert OID = 3539 ( format PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_format _null_ _null_ _null_ ));
+DATA(insert OID = 3539 ( format PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 2 0 25 "25 2276" "{25,2276}" "{i,v}" _null_ _null_ _null_ text_format _null_ _null_ _null_ ));
DESCR("format text message");
-DATA(insert OID = 3540 ( format PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_format_nv _null_ _null_ _null_ ));
+DATA(insert OID = 3540 ( format PGNSP PGUID 12 1 0 0 0 f f f f f f s s 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ text_format_nv _null_ _null_ _null_ ));
DESCR("format text message");
DATA(insert OID = 1810 ( bit_length PGNSP PGUID 14 1 0 0 0 f f f f t f i s 1 0 23 "17" _null_ _null_ _null_ _null_ _null_ "select pg_catalog.octet_length($1) * 8" _null_ _null_ _null_ ));
DESCR("length in bits");
/* Selectivity estimators for LIKE and related operators */
-DATA(insert OID = 1814 ( iclikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ iclikesel _null_ _null_ _null_ ));
+DATA(insert OID = 1814 ( iclikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ iclikesel _null_ _null_ _null_ ));
DESCR("restriction selectivity of ILIKE");
-DATA(insert OID = 1815 ( icnlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icnlikesel _null_ _null_ _null_ ));
+DATA(insert OID = 1815 ( icnlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icnlikesel _null_ _null_ _null_ ));
DESCR("restriction selectivity of NOT ILIKE");
DATA(insert OID = 1816 ( iclikejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ iclikejoinsel _null_ _null_ _null_ ));
DESCR("join selectivity of ILIKE");
DATA(insert OID = 1817 ( icnlikejoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ icnlikejoinsel _null_ _null_ _null_ ));
DESCR("join selectivity of NOT ILIKE");
-DATA(insert OID = 1818 ( regexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexeqsel _null_ _null_ _null_ ));
+DATA(insert OID = 1818 ( regexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexeqsel _null_ _null_ _null_ ));
DESCR("restriction selectivity of regex match");
-DATA(insert OID = 1819 ( likesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ likesel _null_ _null_ _null_ ));
+DATA(insert OID = 1819 ( likesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ likesel _null_ _null_ _null_ ));
DESCR("restriction selectivity of LIKE");
-DATA(insert OID = 1820 ( icregexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexeqsel _null_ _null_ _null_ ));
+DATA(insert OID = 1820 ( icregexeqsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexeqsel _null_ _null_ _null_ ));
DESCR("restriction selectivity of case-insensitive regex match");
-DATA(insert OID = 1821 ( regexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexnesel _null_ _null_ _null_ ));
+DATA(insert OID = 1821 ( regexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ regexnesel _null_ _null_ _null_ ));
DESCR("restriction selectivity of regex non-match");
-DATA(insert OID = 1822 ( nlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ nlikesel _null_ _null_ _null_ ));
+DATA(insert OID = 1822 ( nlikesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ nlikesel _null_ _null_ _null_ ));
DESCR("restriction selectivity of NOT LIKE");
-DATA(insert OID = 1823 ( icregexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexnesel _null_ _null_ _null_ ));
+DATA(insert OID = 1823 ( icregexnesel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 4 0 701 "2281 26 2281 23" _null_ _null_ _null_ _null_ _null_ icregexnesel _null_ _null_ _null_ ));
DESCR("restriction selectivity of case-insensitive regex non-match");
DATA(insert OID = 1824 ( regexeqjoinsel PGNSP PGUID 12 1 0 0 0 f f f f t f s s 5 0 701 "2281 26 2281 21 2281" _null_ _null_ _null_ _null_ _null_ regexeqjoinsel _null_ _null_ _null_ ));
DESCR("join selectivity of regex match");
DESCR("aggregate final function");
DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 3341 ( numeric_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_combine _null_ _null_ _null_ ));
+DATA(insert OID = 3341 ( numeric_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_combine _null_ _null_ _null_ ));
DESCR("aggregate combine function");
DATA(insert OID = 2858 ( numeric_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_avg_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 3337 ( numeric_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_combine _null_ _null_ _null_ ));
+DATA(insert OID = 3337 ( numeric_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_combine _null_ _null_ _null_ ));
DESCR("aggregate combine function");
DATA(insert OID = 2740 ( numeric_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_avg_serialize _null_ _null_ _null_ ));
DESCR("aggregate serial function");
-DATA(insert OID = 2741 ( numeric_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_avg_deserialize _null_ _null_ _null_ ));
+DATA(insert OID = 2741 ( numeric_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_avg_deserialize _null_ _null_ _null_ ));
DESCR("aggregate deserial function");
DATA(insert OID = 3335 ( numeric_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_serialize _null_ _null_ _null_ ));
DESCR("aggregate serial function");
-DATA(insert OID = 3336 ( numeric_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_deserialize _null_ _null_ _null_ ));
+DATA(insert OID = 3336 ( numeric_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_deserialize _null_ _null_ _null_ ));
DESCR("aggregate deserial function");
DATA(insert OID = 3548 ( numeric_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 1700" _null_ _null_ _null_ _null_ _null_ numeric_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
DESCR("aggregate transition function");
DATA(insert OID = 1836 ( int8_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 3338 ( numeric_poly_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_combine _null_ _null_ _null_ ));
+DATA(insert OID = 3338 ( numeric_poly_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_combine _null_ _null_ _null_ ));
DESCR("aggregate combine function");
-DATA(insert OID = 3339 ( numeric_poly_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_serialize _null_ _null_ _null_ ));
+DATA(insert OID = 3339 ( numeric_poly_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ numeric_poly_serialize _null_ _null_ _null_ ));
DESCR("aggregate serial function");
-DATA(insert OID = 3340 ( numeric_poly_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_poly_deserialize _null_ _null_ _null_ ));
+DATA(insert OID = 3340 ( numeric_poly_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ numeric_poly_deserialize _null_ _null_ _null_ ));
DESCR("aggregate deserial function");
DATA(insert OID = 2746 ( int8_avg_accum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
DESCR("aggregate transition function");
DATA(insert OID = 3387 ( int8_avg_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 20" _null_ _null_ _null_ _null_ _null_ int8_avg_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 2785 ( int8_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_combine _null_ _null_ _null_ ));
+DATA(insert OID = 2785 ( int8_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int8_avg_combine _null_ _null_ _null_ ));
DESCR("aggregate combine function");
-DATA(insert OID = 2786 ( int8_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ int8_avg_serialize _null_ _null_ _null_ ));
+DATA(insert OID = 2786 ( int8_avg_serialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "2281" _null_ _null_ _null_ _null_ _null_ int8_avg_serialize _null_ _null_ _null_ ));
DESCR("aggregate serial function");
-DATA(insert OID = 2787 ( int8_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ int8_avg_deserialize _null_ _null_ _null_ ));
+DATA(insert OID = 2787 ( int8_avg_deserialize PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "17" _null_ _null_ _null_ _null_ _null_ int8_avg_deserialize _null_ _null_ _null_ ));
DESCR("aggregate deserial function");
-DATA(insert OID = 3324 ( int4_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int4_avg_combine _null_ _null_ _null_ ));
+DATA(insert OID = 3324 ( int4_avg_combine PGNSP PGUID 12 1 0 0 0 f f f f f f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ int4_avg_combine _null_ _null_ _null_ ));
DESCR("aggregate combine function");
DATA(insert OID = 3178 ( numeric_sum PGNSP PGUID 12 1 0 0 0 f f f f f f i s 1 0 1700 "2281" _null_ _null_ _null_ _null_ _null_ numeric_sum _null_ _null_ _null_ ));
DESCR("aggregate final function");
DATA(insert OID = 1843 ( interval_accum PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum _null_ _null_ _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 3325 ( interval_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_combine _null_ _null_ _null_ ));
+DATA(insert OID = 3325 ( interval_combine PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1187" _null_ _null_ _null_ _null_ _null_ interval_combine _null_ _null_ _null_ ));
DESCR("aggregate combine function");
DATA(insert OID = 3549 ( interval_accum_inv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1187 "1187 1186" _null_ _null_ _null_ _null_ _null_ interval_accum_inv _null_ _null_ _null_ ));
DESCR("aggregate transition function");
DATA(insert OID = 3023 ( has_column_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 21 25" _null_ _null_ _null_ _null_ _null_ has_column_privilege_id_attnum _null_ _null_ _null_ ));
DESCR("current user privilege on column by rel oid, col attnum");
-DATA(insert OID = 3024 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_name _null_ _null_ _null_ ));
+DATA(insert OID = 3024 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_name _null_ _null_ _null_ ));
DESCR("user privilege on any column by username, rel name");
-DATA(insert OID = 3025 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_id _null_ _null_ _null_ ));
+DATA(insert OID = 3025 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name_id _null_ _null_ _null_ ));
DESCR("user privilege on any column by username, rel oid");
-DATA(insert OID = 3026 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_name _null_ _null_ _null_ ));
+DATA(insert OID = 3026 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_name _null_ _null_ _null_ ));
DESCR("user privilege on any column by user oid, rel name");
-DATA(insert OID = 3027 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_id _null_ _null_ _null_ ));
+DATA(insert OID = 3027 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_id_id _null_ _null_ _null_ ));
DESCR("user privilege on any column by user oid, rel oid");
DATA(insert OID = 3028 ( has_any_column_privilege PGNSP PGUID 12 10 0 0 0 f f f f t f s s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ has_any_column_privilege_name _null_ _null_ _null_ ));
DESCR("current user privilege on any column by rel name");
DESCR("statistics: last manual vacuum time for a table");
DATA(insert OID = 2782 ( pg_stat_get_last_autovacuum_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_autovacuum_time _null_ _null_ _null_ ));
DESCR("statistics: last auto vacuum time for a table");
-DATA(insert OID = 2783 ( pg_stat_get_last_analyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_analyze_time _null_ _null_ _null_ ));
+DATA(insert OID = 2783 ( pg_stat_get_last_analyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_analyze_time _null_ _null_ _null_ ));
DESCR("statistics: last manual analyze time for a table");
-DATA(insert OID = 2784 ( pg_stat_get_last_autoanalyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_autoanalyze_time _null_ _null_ _null_ ));
+DATA(insert OID = 2784 ( pg_stat_get_last_autoanalyze_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 1184 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_last_autoanalyze_time _null_ _null_ _null_ ));
DESCR("statistics: last auto analyze time for a table");
DATA(insert OID = 3054 ( pg_stat_get_vacuum_count PGNSP PGUID 12 1 0 0 0 f f f f t f s r 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_get_vacuum_count _null_ _null_ _null_ ));
DESCR("statistics: number of manual vacuums for a table");
DESCR("statistics: currently active backend IDs");
DATA(insert OID = 2022 ( pg_stat_get_activity PGNSP PGUID 12 1 100 0 0 f f f f f t s r 1 0 2249 "23" "{23,26,23,26,25,25,25,25,25,1184,1184,1184,1184,869,25,23,28,28,16,25,25,23,16,25}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{pid,datid,pid,usesysid,application_name,state,query,wait_event_type,wait_event,xact_start,query_start,backend_start,state_change,client_addr,client_hostname,client_port,backend_xid,backend_xmin,ssl,sslversion,sslcipher,sslbits,sslcompression,sslclientdn}" _null_ _null_ pg_stat_get_activity _null_ _null_ _null_ ));
DESCR("statistics: information about currently active backends");
-DATA(insert OID = 3318 ( pg_stat_get_progress_info PGNSP PGUID 12 1 100 0 0 f f f f t t s r 1 0 2249 "25" "{25,23,26,26,20,20,20,20,20,20,20,20,20,20}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{cmdtype,pid,datid,relid,param1,param2,param3,param4,param5,param6,param7,param8,param9,param10}" _null_ _null_ pg_stat_get_progress_info _null_ _null_ _null_ ));
+DATA(insert OID = 3318 ( pg_stat_get_progress_info PGNSP PGUID 12 1 100 0 0 f f f f t t s r 1 0 2249 "25" "{25,23,26,26,20,20,20,20,20,20,20,20,20,20}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{cmdtype,pid,datid,relid,param1,param2,param3,param4,param5,param6,param7,param8,param9,param10}" _null_ _null_ pg_stat_get_progress_info _null_ _null_ _null_ ));
DESCR("statistics: information about progress of backends running maintenance command");
DATA(insert OID = 3099 ( pg_stat_get_wal_senders PGNSP PGUID 12 1 10 0 0 f f f f f t s r 0 0 2249 "" "{23,25,3220,3220,3220,3220,23,25}" "{o,o,o,o,o,o,o,o}" "{pid,state,sent_location,write_location,flush_location,replay_location,sync_priority,sync_state}" _null_ _null_ pg_stat_get_wal_senders _null_ _null_ _null_ ));
DESCR("statistics: information about currently active replication");
DESCR("statistics: number of buffers written by the bgwriter for cleaning dirty buffers");
DATA(insert OID = 2773 ( pg_stat_get_bgwriter_maxwritten_clean PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 20 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_maxwritten_clean _null_ _null_ _null_ ));
DESCR("statistics: number of times the bgwriter stopped processing when it had written too many buffers while cleaning");
-DATA(insert OID = 3075 ( pg_stat_get_bgwriter_stat_reset_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_stat_reset_time _null_ _null_ _null_ ));
+DATA(insert OID = 3075 ( pg_stat_get_bgwriter_stat_reset_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 1184 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_stat_reset_time _null_ _null_ _null_ ));
DESCR("statistics: last reset for the bgwriter");
DATA(insert OID = 3160 ( pg_stat_get_checkpoint_write_time PGNSP PGUID 12 1 0 0 0 f f f f t f s r 0 0 701 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_checkpoint_write_time _null_ _null_ _null_ ));
DESCR("statistics: checkpoint time spent writing buffers to disk, in msec");
DESCR("statistics: discard current transaction's statistics snapshot");
DATA(insert OID = 2274 ( pg_stat_reset PGNSP PGUID 12 1 0 0 0 f f f f f f v s 0 0 2278 "" _null_ _null_ _null_ _null_ _null_ pg_stat_reset _null_ _null_ _null_ ));
DESCR("statistics: reset collected statistics for current database");
-DATA(insert OID = 3775 ( pg_stat_reset_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_shared _null_ _null_ _null_ ));
+DATA(insert OID = 3775 ( pg_stat_reset_shared PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "25" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_shared _null_ _null_ _null_ ));
DESCR("statistics: reset collected statistics shared across the cluster");
-DATA(insert OID = 3776 ( pg_stat_reset_single_table_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_table_counters _null_ _null_ _null_ ));
+DATA(insert OID = 3776 ( pg_stat_reset_single_table_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_table_counters _null_ _null_ _null_ ));
DESCR("statistics: reset collected statistics for a single table or index in the current database");
-DATA(insert OID = 3777 ( pg_stat_reset_single_function_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_function_counters _null_ _null_ _null_ ));
+DATA(insert OID = 3777 ( pg_stat_reset_single_function_counters PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 2278 "26" _null_ _null_ _null_ _null_ _null_ pg_stat_reset_single_function_counters _null_ _null_ _null_ ));
DESCR("statistics: reset collected statistics for a single function in the current database");
DATA(insert OID = 3163 ( pg_trigger_depth PGNSP PGUID 12 1 0 0 0 f f f f t f s s 0 0 23 "" _null_ _null_ _null_ _null_ _null_ pg_trigger_depth _null_ _null_ _null_ ));
DESCR("convert timestamp with time zone to time");
DATA(insert OID = 2020 ( date_trunc PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 1114 "25 1114" _null_ _null_ _null_ _null_ _null_ timestamp_trunc _null_ _null_ _null_ ));
DESCR("truncate timestamp to specified units");
-DATA(insert OID = 2021 ( date_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "25 1114" _null_ _null_ _null_ _null_ _null_ timestamp_part _null_ _null_ _null_ ));
+DATA(insert OID = 2021 ( date_part PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 701 "25 1114" _null_ _null_ _null_ _null_ _null_ timestamp_part _null_ _null_ _null_ ));
DESCR("extract field from timestamp");
DATA(insert OID = 2023 ( timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 1114 "702" _null_ _null_ _null_ _null_ _null_ abstime_timestamp _null_ _null_ _null_ ));
DESCR("convert abstime to timestamp");
DESCR("hash");
DATA(insert OID = 2041 ( overlaps PGNSP PGUID 12 1 0 0 0 f f f f f f i s 4 0 16 "1114 1114 1114 1114" _null_ _null_ _null_ _null_ _null_ overlaps_timestamp _null_ _null_ _null_ ));
DESCR("intervals overlap?");
-DATA(insert OID = 2042 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ ));
+DATA(insert OID = 2042 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ ));
DESCR("intervals overlap?");
-DATA(insert OID = 2043 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1114 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ ));
+DATA(insert OID = 2043 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1114 1114 1186" _null_ _null_ _null_ _null_ _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" _null_ _null_ _null_ ));
DESCR("intervals overlap?");
-DATA(insert OID = 2044 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1114" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" _null_ _null_ _null_ ));
+DATA(insert OID = 2044 ( overlaps PGNSP PGUID 14 1 0 0 0 f f f f f f i s 4 0 16 "1114 1186 1114 1114" _null_ _null_ _null_ _null_ _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" _null_ _null_ _null_ ));
DESCR("intervals overlap?");
DATA(insert OID = 2045 ( timestamp_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 23 "1114 1114" _null_ _null_ _null_ _null_ _null_ timestamp_cmp _null_ _null_ _null_ ));
DESCR("less-equal-greater");
DESCR("current xlog write location");
DATA(insert OID = 2852 ( pg_current_xlog_insert_location PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_xlog_insert_location _null_ _null_ _null_ ));
DESCR("current xlog insert location");
-DATA(insert OID = 3330 ( pg_current_xlog_flush_location PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_xlog_flush_location _null_ _null_ _null_ ));
+DATA(insert OID = 3330 ( pg_current_xlog_flush_location PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 3220 "" _null_ _null_ _null_ _null_ _null_ pg_current_xlog_flush_location _null_ _null_ _null_ ));
DESCR("current xlog flush location");
DATA(insert OID = 2850 ( pg_xlogfile_name_offset PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2249 "3220" "{3220,25,23}" "{i,o,o}" "{wal_location,file_name,file_offset}" _null_ _null_ pg_xlogfile_name_offset _null_ _null_ _null_ ));
DESCR("xlog filename and byte offset, given an xlog location");
/* Aggregates (moved here from pg_aggregate for 7.3) */
-DATA(insert OID = 2100 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2100 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("the average (arithmetic mean) as numeric of all bigint values");
-DATA(insert OID = 2101 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2101 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("the average (arithmetic mean) as numeric of all integer values");
-DATA(insert OID = 2102 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2102 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("the average (arithmetic mean) as numeric of all smallint values");
DATA(insert OID = 2103 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("the average (arithmetic mean) as numeric of all numeric values");
-DATA(insert OID = 2104 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2104 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("the average (arithmetic mean) as float8 of all float4 values");
-DATA(insert OID = 2105 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2105 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("the average (arithmetic mean) as float8 of all float8 values");
DATA(insert OID = 2106 ( avg PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("the average (arithmetic mean) as interval of all interval values");
-DATA(insert OID = 2107 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2107 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum as numeric across all bigint input values");
DATA(insert OID = 2108 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum as bigint across all integer input values");
DATA(insert OID = 2109 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum as bigint across all smallint input values");
-DATA(insert OID = 2110 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2110 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum as float4 across all float4 input values");
-DATA(insert OID = 2111 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2111 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum as float8 across all float8 input values");
-DATA(insert OID = 2112 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2112 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum as money across all money input values");
DATA(insert OID = 2113 ( sum PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1186 "1186" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum as interval across all interval input values");
DESCR("maximum value of all smallint input values");
DATA(insert OID = 2118 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("maximum value of all oid input values");
-DATA(insert OID = 2119 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2119 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("maximum value of all float4 input values");
-DATA(insert OID = 2120 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2120 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("maximum value of all float8 input values");
-DATA(insert OID = 2121 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2121 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("maximum value of all abstime input values");
DATA(insert OID = 2122 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1082 "1082" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("maximum value of all date input values");
DESCR("maximum value of all time input values");
DATA(insert OID = 2124 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1266 "1266" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("maximum value of all time with time zone input values");
-DATA(insert OID = 2125 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2125 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("maximum value of all money input values");
DATA(insert OID = 2126 ( max PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1114 "1114" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("maximum value of all timestamp input values");
DESCR("minimum value of all smallint input values");
DATA(insert OID = 2134 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("minimum value of all oid input values");
-DATA(insert OID = 2135 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2135 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 700 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("minimum value of all float4 input values");
-DATA(insert OID = 2136 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2136 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("minimum value of all float8 input values");
-DATA(insert OID = 2137 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2137 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 702 "702" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("minimum value of all abstime input values");
DATA(insert OID = 2138 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1082 "1082" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("minimum value of all date input values");
DESCR("minimum value of all time input values");
DATA(insert OID = 2140 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1266 "1266" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("minimum value of all time with time zone input values");
-DATA(insert OID = 2141 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2141 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 790 "790" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("minimum value of all money input values");
DATA(insert OID = 2142 ( min PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1114 "1114" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("minimum value of all timestamp input values");
DESCR("minimum value of all inet input values");
/* count has two forms: count(any) and count(*) */
-DATA(insert OID = 2147 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2147 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 20 "2276" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("number of input rows for which the input expression is not null");
-DATA(insert OID = 2803 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 0 0 20 "" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2803 ( count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 0 0 20 "" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("number of input rows");
-DATA(insert OID = 2718 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2718 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population variance of bigint input values (square of the population standard deviation)");
-DATA(insert OID = 2719 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2719 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population variance of integer input values (square of the population standard deviation)");
-DATA(insert OID = 2720 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2720 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population variance of smallint input values (square of the population standard deviation)");
-DATA(insert OID = 2721 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2721 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population variance of float4 input values (square of the population standard deviation)");
-DATA(insert OID = 2722 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2722 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population variance of float8 input values (square of the population standard deviation)");
DATA(insert OID = 2723 ( var_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population variance of numeric input values (square of the population standard deviation)");
-DATA(insert OID = 2641 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2641 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample variance of bigint input values (square of the sample standard deviation)");
-DATA(insert OID = 2642 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2642 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample variance of integer input values (square of the sample standard deviation)");
-DATA(insert OID = 2643 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2643 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample variance of smallint input values (square of the sample standard deviation)");
-DATA(insert OID = 2644 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2644 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample variance of float4 input values (square of the sample standard deviation)");
-DATA(insert OID = 2645 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2645 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample variance of float8 input values (square of the sample standard deviation)");
DATA(insert OID = 2646 ( var_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample variance of numeric input values (square of the sample standard deviation)");
-DATA(insert OID = 2148 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2148 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for var_samp");
-DATA(insert OID = 2149 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2149 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for var_samp");
-DATA(insert OID = 2150 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2150 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for var_samp");
-DATA(insert OID = 2151 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2151 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for var_samp");
-DATA(insert OID = 2152 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2152 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for var_samp");
DATA(insert OID = 2153 ( variance PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for var_samp");
-DATA(insert OID = 2724 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2724 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population standard deviation of bigint input values");
-DATA(insert OID = 2725 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2725 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population standard deviation of integer input values");
-DATA(insert OID = 2726 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2726 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population standard deviation of smallint input values");
-DATA(insert OID = 2727 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2727 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population standard deviation of float4 input values");
-DATA(insert OID = 2728 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2728 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population standard deviation of float8 input values");
DATA(insert OID = 2729 ( stddev_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population standard deviation of numeric input values");
-DATA(insert OID = 2712 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2712 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample standard deviation of bigint input values");
-DATA(insert OID = 2713 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2713 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample standard deviation of integer input values");
-DATA(insert OID = 2714 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2714 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample standard deviation of smallint input values");
-DATA(insert OID = 2715 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2715 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample standard deviation of float4 input values");
-DATA(insert OID = 2716 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2716 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample standard deviation of float8 input values");
DATA(insert OID = 2717 ( stddev_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample standard deviation of numeric input values");
-DATA(insert OID = 2154 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2154 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "20" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for stddev_samp");
-DATA(insert OID = 2155 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2155 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "23" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for stddev_samp");
-DATA(insert OID = 2156 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2156 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "21" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for stddev_samp");
-DATA(insert OID = 2157 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2157 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for stddev_samp");
-DATA(insert OID = 2158 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2158 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 701 "701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for stddev_samp");
DATA(insert OID = 2159 ( stddev PGNSP PGUID 12 1 0 0 0 t f f f f f i s 1 0 1700 "1700" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("historical alias for stddev_samp");
DATA(insert OID = 2818 ( regr_count PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 20 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("number of input rows in which both expressions are not null");
-DATA(insert OID = 2819 ( regr_sxx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2819 ( regr_sxx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum of squares of the independent variable (sum(X^2) - sum(X)^2/N)");
-DATA(insert OID = 2820 ( regr_syy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2820 ( regr_syy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum of squares of the dependent variable (sum(Y^2) - sum(Y)^2/N)");
-DATA(insert OID = 2821 ( regr_sxy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2821 ( regr_sxy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sum of products of independent times dependent variable (sum(X*Y) - sum(X) * sum(Y)/N)");
-DATA(insert OID = 2822 ( regr_avgx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2822 ( regr_avgx PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("average of the independent variable (sum(X)/N)");
-DATA(insert OID = 2823 ( regr_avgy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2823 ( regr_avgy PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("average of the dependent variable (sum(Y)/N)");
-DATA(insert OID = 2824 ( regr_r2 PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2824 ( regr_r2 PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("square of the correlation coefficient");
-DATA(insert OID = 2825 ( regr_slope PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2825 ( regr_slope PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("slope of the least-squares-fit linear equation determined by the (X, Y) pairs");
-DATA(insert OID = 2826 ( regr_intercept PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2826 ( regr_intercept PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("y-intercept of the least-squares-fit linear equation determined by the (X, Y) pairs");
-DATA(insert OID = 2827 ( covar_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2827 ( covar_pop PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("population covariance");
-DATA(insert OID = 2828 ( covar_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2828 ( covar_samp PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("sample covariance");
-DATA(insert OID = 2829 ( corr PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 2829 ( corr PGNSP PGUID 12 1 0 0 0 t f f f f f i s 2 0 701 "701 701" _null_ _null_ _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("correlation coefficient");
DATA(insert OID = 2160 ( text_pattern_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "25 25" _null_ _null_ _null_ _null_ _null_ text_pattern_lt _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 3493 ( to_regtype PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2206 "25" _null_ _null_ _null_ _null_ _null_ to_regtype _null_ _null_ _null_ ));
DESCR("convert type name to regtype");
-DATA(insert OID = 1079 ( regclass PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2205 "25" _null_ _null_ _null_ _null_ _null_ text_regclass _null_ _null_ _null_ ));
+DATA(insert OID = 1079 ( regclass PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2205 "25" _null_ _null_ _null_ _null_ _null_ text_regclass _null_ _null_ _null_ ));
DESCR("convert text to regclass");
DATA(insert OID = 4098 ( regrolein PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 4096 "2275" _null_ _null_ _null_ _null_ _null_ regrolein _null_ _null_ _null_ ));
DATA(insert OID = 3143 ( has_type_privilege PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ has_type_privilege_id _null_ _null_ _null_ ));
DESCR("current user privilege on type by type oid");
-DATA(insert OID = 2705 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_name _null_ _null_ _null_ ));
+DATA(insert OID = 2705 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_name _null_ _null_ _null_ ));
DESCR("user privilege on role by username, role name");
-DATA(insert OID = 2706 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_id _null_ _null_ _null_ ));
+DATA(insert OID = 2706 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "19 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name_id _null_ _null_ _null_ ));
DESCR("user privilege on role by username, role oid");
-DATA(insert OID = 2707 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_name _null_ _null_ _null_ ));
+DATA(insert OID = 2707 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_name _null_ _null_ _null_ ));
DESCR("user privilege on role by user oid, role name");
-DATA(insert OID = 2708 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_id _null_ _null_ _null_ ));
+DATA(insert OID = 2708 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 16 "26 26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id_id _null_ _null_ _null_ ));
DESCR("user privilege on role by user oid, role oid");
DATA(insert OID = 2709 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "19 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_name _null_ _null_ _null_ ));
DESCR("current user privilege on role by role name");
DATA(insert OID = 2710 ( pg_has_role PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 16 "26 25" _null_ _null_ _null_ _null_ _null_ pg_has_role_id _null_ _null_ _null_ ));
DESCR("current user privilege on role by role oid");
-DATA(insert OID = 1269 ( pg_column_size PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 23 "2276" _null_ _null_ _null_ _null_ _null_ pg_column_size _null_ _null_ _null_ ));
+DATA(insert OID = 1269 ( pg_column_size PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 23 "2276" _null_ _null_ _null_ _null_ _null_ pg_column_size _null_ _null_ _null_ ));
DESCR("bytes required to store the value, perhaps with compression");
DATA(insert OID = 2322 ( pg_tablespace_size PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "26" _null_ _null_ _null_ _null_ _null_ pg_tablespace_size_oid _null_ _null_ _null_ ));
DESCR("total disk space usage for the specified tablespace");
DATA(insert OID = 2316 ( postgresql_fdw_validator PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "1009 26" _null_ _null_ _null_ _null_ _null_ postgresql_fdw_validator _null_ _null_ _null_));
DESCR("(internal)");
-DATA(insert OID = 2290 ( record_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2249 "2275 26 23" _null_ _null_ _null_ _null_ _null_ record_in _null_ _null_ _null_ ));
+DATA(insert OID = 2290 ( record_in PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 2249 "2275 26 23" _null_ _null_ _null_ _null_ _null_ record_in _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2291 ( record_out PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 2275 "2249" _null_ _null_ _null_ _null_ _null_ record_out _null_ _null_ _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2415 ( textsend PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "25" _null_ _null_ _null_ _null_ _null_ textsend _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2416 ( unknownrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 705 "2281" _null_ _null_ _null_ _null_ _null_ unknownrecv _null_ _null_ _null_ ));
+DATA(insert OID = 2416 ( unknownrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 705 "2281" _null_ _null_ _null_ _null_ _null_ unknownrecv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2417 ( unknownsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "705" _null_ _null_ _null_ _null_ _null_ unknownsend _null_ _null_ _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2423 ( namesend PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "19" _null_ _null_ _null_ _null_ _null_ namesend _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2424 ( float4recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2281" _null_ _null_ _null_ _null_ _null_ float4recv _null_ _null_ _null_ ));
+DATA(insert OID = 2424 ( float4recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 700 "2281" _null_ _null_ _null_ _null_ _null_ float4recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2425 ( float4send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "700" _null_ _null_ _null_ _null_ _null_ float4send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2426 ( float8recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2281" _null_ _null_ _null_ _null_ _null_ float8recv _null_ _null_ _null_ ));
+DATA(insert OID = 2426 ( float8recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 701 "2281" _null_ _null_ _null_ _null_ _null_ float8recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2427 ( float8send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "701" _null_ _null_ _null_ _null_ _null_ float8send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2428 ( point_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2281" _null_ _null_ _null_ _null_ _null_ point_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2428 ( point_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 600 "2281" _null_ _null_ _null_ _null_ _null_ point_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2429 ( point_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "600" _null_ _null_ _null_ _null_ _null_ point_send _null_ _null_ _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2461 ( numeric_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1700" _null_ _null_ _null_ _null_ _null_ numeric_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2462 ( abstimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "2281" _null_ _null_ _null_ _null_ _null_ abstimerecv _null_ _null_ _null_ ));
+DATA(insert OID = 2462 ( abstimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 702 "2281" _null_ _null_ _null_ _null_ _null_ abstimerecv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2463 ( abstimesend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "702" _null_ _null_ _null_ _null_ _null_ abstimesend _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2464 ( reltimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 703 "2281" _null_ _null_ _null_ _null_ _null_ reltimerecv _null_ _null_ _null_ ));
+DATA(insert OID = 2464 ( reltimerecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 703 "2281" _null_ _null_ _null_ _null_ _null_ reltimerecv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2465 ( reltimesend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "703" _null_ _null_ _null_ _null_ _null_ reltimesend _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2466 ( tintervalrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 704 "2281" _null_ _null_ _null_ _null_ _null_ tintervalrecv _null_ _null_ _null_ ));
+DATA(insert OID = 2466 ( tintervalrecv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 704 "2281" _null_ _null_ _null_ _null_ _null_ tintervalrecv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2467 ( tintervalsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "704" _null_ _null_ _null_ _null_ _null_ tintervalsend _null_ _null_ _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2479 ( interval_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "1186" _null_ _null_ _null_ _null_ _null_ interval_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2480 ( lseg_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2281" _null_ _null_ _null_ _null_ _null_ lseg_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2480 ( lseg_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 601 "2281" _null_ _null_ _null_ _null_ _null_ lseg_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2481 ( lseg_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "601" _null_ _null_ _null_ _null_ _null_ lseg_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2482 ( path_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2281" _null_ _null_ _null_ _null_ _null_ path_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2482 ( path_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 602 "2281" _null_ _null_ _null_ _null_ _null_ path_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2483 ( path_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "602" _null_ _null_ _null_ _null_ _null_ path_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2484 ( box_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2281" _null_ _null_ _null_ _null_ _null_ box_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2484 ( box_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 603 "2281" _null_ _null_ _null_ _null_ _null_ box_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2485 ( box_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "603" _null_ _null_ _null_ _null_ _null_ box_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2486 ( poly_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2281" _null_ _null_ _null_ _null_ _null_ poly_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2486 ( poly_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 604 "2281" _null_ _null_ _null_ _null_ _null_ poly_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2487 ( poly_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "604" _null_ _null_ _null_ _null_ _null_ poly_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2488 ( line_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 628 "2281" _null_ _null_ _null_ _null_ _null_ line_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2488 ( line_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 628 "2281" _null_ _null_ _null_ _null_ _null_ line_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2489 ( line_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "628" _null_ _null_ _null_ _null_ _null_ line_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2490 ( circle_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 718 "2281" _null_ _null_ _null_ _null_ _null_ circle_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2490 ( circle_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 718 "2281" _null_ _null_ _null_ _null_ _null_ circle_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2491 ( circle_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "718" _null_ _null_ _null_ _null_ _null_ circle_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2492 ( cash_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 790 "2281" _null_ _null_ _null_ _null_ _null_ cash_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2492 ( cash_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 790 "2281" _null_ _null_ _null_ _null_ _null_ cash_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2493 ( cash_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "790" _null_ _null_ _null_ _null_ _null_ cash_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2494 ( macaddr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "2281" _null_ _null_ _null_ _null_ _null_ macaddr_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2494 ( macaddr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 829 "2281" _null_ _null_ _null_ _null_ _null_ macaddr_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2495 ( macaddr_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "829" _null_ _null_ _null_ _null_ _null_ macaddr_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2496 ( inet_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "2281" _null_ _null_ _null_ _null_ _null_ inet_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2496 ( inet_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 869 "2281" _null_ _null_ _null_ _null_ _null_ inet_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2497 ( inet_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "869" _null_ _null_ _null_ _null_ _null_ inet_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 2498 ( cidr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "2281" _null_ _null_ _null_ _null_ _null_ cidr_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2498 ( cidr_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 650 "2281" _null_ _null_ _null_ _null_ _null_ cidr_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2499 ( cidr_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "650" _null_ _null_ _null_ _null_ _null_ cidr_send _null_ _null_ _null_ ));
DESCR("I/O");
DESCR("I/O");
/* System-view support functions with pretty-print option */
-DATA(insert OID = 2504 ( pg_get_ruledef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_ruledef_ext _null_ _null_ _null_ ));
+DATA(insert OID = 2504 ( pg_get_ruledef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_ruledef_ext _null_ _null_ _null_ ));
DESCR("source text of a rule with pretty-print option");
-DATA(insert OID = 2505 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "25 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_name_ext _null_ _null_ _null_ ));
+DATA(insert OID = 2505 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "25 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_name_ext _null_ _null_ _null_ ));
DESCR("select statement of a view with pretty-print option");
-DATA(insert OID = 2506 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_ext _null_ _null_ _null_ ));
+DATA(insert OID = 2506 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_ext _null_ _null_ _null_ ));
DESCR("select statement of a view with pretty-print option");
-DATA(insert OID = 3159 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 23" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_wrap _null_ _null_ _null_ ));
+DATA(insert OID = 3159 ( pg_get_viewdef PGNSP PGUID 12 1 0 0 0 f f f f t f s r 2 0 25 "26 23" _null_ _null_ _null_ _null_ _null_ pg_get_viewdef_wrap _null_ _null_ _null_ ));
DESCR("select statement of a view with pretty-printing and specified line wrapping");
DATA(insert OID = 2507 ( pg_get_indexdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 25 "26 23 16" _null_ _null_ _null_ _null_ _null_ pg_get_indexdef_ext _null_ _null_ _null_ ));
DESCR("index description (full create statement or single expression) with pretty-print option");
-DATA(insert OID = 2508 ( pg_get_constraintdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_constraintdef_ext _null_ _null_ _null_ ));
+DATA(insert OID = 2508 ( pg_get_constraintdef PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 25 "26 16" _null_ _null_ _null_ _null_ _null_ pg_get_constraintdef_ext _null_ _null_ _null_ ));
DESCR("constraint description with pretty-print option");
DATA(insert OID = 2509 ( pg_get_expr PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 25 "194 26 16" _null_ _null_ _null_ _null_ _null_ pg_get_expr_ext _null_ _null_ _null_ ));
DESCR("deparse an encoded expression with pretty-print option");
DESCR("bitwise-or bit aggregate");
/* formerly-missing interval + datetime operators */
-DATA(insert OID = 2546 ( interval_pl_date PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1082" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
-DATA(insert OID = 2547 ( interval_pl_timetz PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1266 "1186 1266" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
-DATA(insert OID = 2548 ( interval_pl_timestamp PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1114" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
-DATA(insert OID = 2549 ( interval_pl_timestamptz PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 1184 "1186 1184" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
+DATA(insert OID = 2546 ( interval_pl_date PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1082" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
+DATA(insert OID = 2547 ( interval_pl_timetz PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1266 "1186 1266" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
+DATA(insert OID = 2548 ( interval_pl_timestamp PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1114 "1186 1114" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
+DATA(insert OID = 2549 ( interval_pl_timestamptz PGNSP PGUID 14 1 0 0 0 f f f f t f s s 2 0 1184 "1186 1184" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
DATA(insert OID = 2550 ( integer_pl_date PGNSP PGUID 14 1 0 0 0 f f f f t f i s 2 0 1082 "23 1082" _null_ _null_ _null_ _null_ _null_ "select $2 + $1" _null_ _null_ _null_ ));
DATA(insert OID = 2556 ( pg_tablespace_databases PGNSP PGUID 12 1 1000 0 0 f f f f t t s s 1 0 26 "26" _null_ _null_ _null_ _null_ _null_ pg_tablespace_databases _null_ _null_ _null_ ));
DESCR("GiST support");
DATA(insert OID = 3281 ( gist_box_fetch PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_box_fetch _null_ _null_ _null_ ));
DESCR("GiST support");
-DATA(insert OID = 2581 ( gist_box_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gist_box_penalty _null_ _null_ _null_ ));
+DATA(insert OID = 2581 ( gist_box_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gist_box_penalty _null_ _null_ _null_ ));
DESCR("GiST support");
DATA(insert OID = 2582 ( gist_box_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ _null_ gist_box_picksplit _null_ _null_ _null_ ));
DESCR("GiST support");
DESCR("GiST support");
DATA(insert OID = 2586 ( gist_poly_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_poly_compress _null_ _null_ _null_ ));
DESCR("GiST support");
-DATA(insert OID = 2591 ( gist_circle_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_consistent _null_ _null_ _null_ ));
+DATA(insert OID = 2591 ( gist_circle_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_consistent _null_ _null_ _null_ ));
DESCR("GiST support");
DATA(insert OID = 2592 ( gist_circle_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2281 "2281" _null_ _null_ _null_ _null_ _null_ gist_circle_compress _null_ _null_ _null_ ));
DESCR("GiST support");
DESCR("GiST support");
DATA(insert OID = 2179 ( gist_point_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 600 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_point_consistent _null_ _null_ _null_ ));
DESCR("GiST support");
-DATA(insert OID = 3064 ( gist_point_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 600 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_point_distance _null_ _null_ _null_ ));
+DATA(insert OID = 3064 ( gist_point_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 600 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_point_distance _null_ _null_ _null_ ));
DESCR("GiST support");
-DATA(insert OID = 3280 ( gist_circle_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_distance _null_ _null_ _null_ ));
+DATA(insert OID = 3280 ( gist_circle_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 718 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_circle_distance _null_ _null_ _null_ ));
DESCR("GiST support");
-DATA(insert OID = 3288 ( gist_poly_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 604 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_poly_distance _null_ _null_ _null_ ));
+DATA(insert OID = 3288 ( gist_poly_distance PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 701 "2281 604 21 26 2281" _null_ _null_ _null_ _null_ _null_ gist_poly_distance _null_ _null_ _null_ ));
DESCR("GiST support");
/* GIN array support */
DESCR("GIN array support");
DATA(insert OID = 3920 ( ginarraytriconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 18 "2281 21 2277 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ ginarraytriconsistent _null_ _null_ _null_ ));
DESCR("GIN array support");
-DATA(insert OID = 3076 ( ginarrayextract PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2277 2281" _null_ _null_ _null_ _null_ _null_ ginarrayextract_2args _null_ _null_ _null_ ));
+DATA(insert OID = 3076 ( ginarrayextract PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2281 "2277 2281" _null_ _null_ _null_ _null_ _null_ ginarrayextract_2args _null_ _null_ _null_ ));
DESCR("GIN array support (obsolete)");
/* overlap/contains/contained */
DESCR("perform a non-validating parse of a character string to produce an XML value");
DATA(insert OID = 2897 ( xmlvalidate PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "142 25" _null_ _null_ _null_ _null_ _null_ xmlvalidate _null_ _null_ _null_ ));
DESCR("validate an XML value");
-DATA(insert OID = 2898 ( xml_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 142 "2281" _null_ _null_ _null_ _null_ _null_ xml_recv _null_ _null_ _null_ ));
+DATA(insert OID = 2898 ( xml_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 142 "2281" _null_ _null_ _null_ _null_ _null_ xml_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 2899 ( xml_send PGNSP PGUID 12 1 0 0 0 f f f f t f s s 1 0 17 "142" _null_ _null_ _null_ _null_ _null_ xml_send _null_ _null_ _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 322 ( json_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "114" _null_ _null_ _null_ _null_ _null_ json_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 323 ( json_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_recv _null_ _null_ _null_ ));
+DATA(insert OID = 323 ( json_recv PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 114 "2281" _null_ _null_ _null_ _null_ _null_ json_recv _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 324 ( json_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "114" _null_ _null_ _null_ _null_ _null_ json_send _null_ _null_ _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 3611 ( tsvectorout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3614" _null_ _null_ _null_ _null_ _null_ tsvectorout _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 3638 ( tsvectorsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3614" _null_ _null_ _null_ _null_ _null_ tsvectorsend _null_ _null_ _null_ ));
+DATA(insert OID = 3638 ( tsvectorsend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3614" _null_ _null_ _null_ _null_ _null_ tsvectorsend _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 3612 ( tsqueryin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3615 "2275" _null_ _null_ _null_ _null_ _null_ tsqueryin _null_ _null_ _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 3613 ( tsqueryout PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3615" _null_ _null_ _null_ _null_ _null_ tsqueryout _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 3640 ( tsquerysend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3615" _null_ _null_ _null_ _null_ _null_ tsquerysend _null_ _null_ _null_ ));
+DATA(insert OID = 3640 ( tsquerysend PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3615" _null_ _null_ _null_ _null_ _null_ tsquerysend _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 3646 ( gtsvectorin PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3642 "2275" _null_ _null_ _null_ _null_ _null_ gtsvectorin _null_ _null_ _null_ ));
DESCR("I/O");
DESCR("strip position information");
DATA(insert OID = 3624 ( setweight PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 18" _null_ _null_ _null_ _null_ _null_ tsvector_setweight _null_ _null_ _null_ ));
DESCR("set given weight for whole tsvector");
-DATA(insert OID = 3320 ( setweight PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 3614 "3614 18 1009" _null_ _null_ _null_ _null_ _null_ tsvector_setweight_by_filter _null_ _null_ _null_ ));
+DATA(insert OID = 3320 ( setweight PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 3614 "3614 18 1009" _null_ _null_ _null_ _null_ _null_ tsvector_setweight_by_filter _null_ _null_ _null_ ));
DESCR("set given weight for given lexemes");
-DATA(insert OID = 3625 ( tsvector_concat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_concat _null_ _null_ _null_ ));
-DATA(insert OID = 3321 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 25" _null_ _null_ _null_ _null_ _null_ tsvector_delete_str _null_ _null_ _null_ ));
+DATA(insert OID = 3625 ( tsvector_concat PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 3614" _null_ _null_ _null_ _null_ _null_ tsvector_concat _null_ _null_ _null_ ));
+DATA(insert OID = 3321 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 25" _null_ _null_ _null_ _null_ _null_ tsvector_delete_str _null_ _null_ _null_ ));
DESCR("delete lexeme");
-DATA(insert OID = 3323 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1009" _null_ _null_ _null_ _null_ _null_ tsvector_delete_arr _null_ _null_ _null_ ));
+DATA(insert OID = 3323 ( ts_delete PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1009" _null_ _null_ _null_ _null_ _null_ tsvector_delete_arr _null_ _null_ _null_ ));
DESCR("delete given lexemes");
DATA(insert OID = 3322 ( unnest PGNSP PGUID 12 1 10 0 0 f f f f t t i s 1 0 2249 "3614" "{3614,25,1005,1009}" "{i,o,o,o}" "{tsvector,lexeme,positions,weights}" _null_ _null_ tsvector_unnest _null_ _null_ _null_ ));
DESCR("expand tsvector to set of rows");
-DATA(insert OID = 3326 ( tsvector_to_array PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1009 "3614" _null_ _null_ _null_ _null_ _null_ tsvector_to_array _null_ _null_ _null_ ));
+DATA(insert OID = 3326 ( tsvector_to_array PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 1009 "3614" _null_ _null_ _null_ _null_ _null_ tsvector_to_array _null_ _null_ _null_ ));
DESCR("convert tsvector to array of lexemes");
-DATA(insert OID = 3327 ( array_to_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3614 "1009" _null_ _null_ _null_ _null_ _null_ array_to_tsvector _null_ _null_ _null_ ));
+DATA(insert OID = 3327 ( array_to_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3614 "1009" _null_ _null_ _null_ _null_ _null_ array_to_tsvector _null_ _null_ _null_ ));
DESCR("build tsvector from array of lexemes");
-DATA(insert OID = 3319 ( ts_filter PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1002" _null_ _null_ _null_ _null_ _null_ tsvector_filter _null_ _null_ _null_ ));
+DATA(insert OID = 3319 ( ts_filter PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 3614 "3614 1002" _null_ _null_ _null_ _null_ _null_ tsvector_filter _null_ _null_ _null_ ));
DESCR("delete lexemes that do not have one of the given weights");
DATA(insert OID = 3634 ( ts_match_vq PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "3614 3615" _null_ _null_ _null_ _null_ _null_ ts_match_vq _null_ _null_ _null_ ));
DATA(insert OID = 3790 ( gtsvector_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 5 0 16 "2281 3642 23 26 2281" _null_ _null_ _null_ _null_ _null_ gtsvector_consistent_oldsig _null_ _null_ _null_ ));
DESCR("GiST tsvector support (obsolete)");
-DATA(insert OID = 3656 ( gin_extract_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "3614 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsvector _null_ _null_ _null_ ));
+DATA(insert OID = 3656 ( gin_extract_tsvector PGNSP PGUID 12 1 0 0 0 f f f f t f i s 3 0 2281 "3614 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsvector _null_ _null_ _null_ ));
DESCR("GIN tsvector support");
DATA(insert OID = 3657 ( gin_extract_tsquery PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 2281 "3614 2281 21 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsquery _null_ _null_ _null_ ));
DESCR("GIN tsvector support");
-DATA(insert OID = 3658 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3614 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent _null_ _null_ _null_ ));
+DATA(insert OID = 3658 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3614 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent _null_ _null_ _null_ ));
DESCR("GIN tsvector support");
DATA(insert OID = 3921 ( gin_tsquery_triconsistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 18 "2281 21 3614 23 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_triconsistent _null_ _null_ _null_ ));
DESCR("GIN tsvector support");
DESCR("GIN tsvector support (obsolete)");
DATA(insert OID = 3791 ( gin_extract_tsquery PGNSP PGUID 12 1 0 0 0 f f f f t f i s 7 0 2281 "3615 2281 21 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_extract_tsquery_oldsig _null_ _null_ _null_ ));
DESCR("GIN tsvector support (obsolete)");
-DATA(insert OID = 3792 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3615 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent_oldsig _null_ _null_ _null_ ));
+DATA(insert OID = 3792 ( gin_tsquery_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 8 0 16 "2281 21 3615 23 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ gin_tsquery_consistent_oldsig _null_ _null_ _null_ ));
DESCR("GIN tsvector support (obsolete)");
DATA(insert OID = 3789 ( gin_clean_pending_list PGNSP PGUID 12 1 0 0 0 f f f f t f v s 1 0 20 "2205" _null_ _null_ _null_ _null_ _null_ gin_clean_pending_list _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 3804 ( jsonb_out PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2275 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_out _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 3803 ( jsonb_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_send _null_ _null_ _null_ ));
+DATA(insert OID = 3803 ( jsonb_send PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 17 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_send _null_ _null_ _null_ ));
DESCR("I/O");
DATA(insert OID = 3263 ( jsonb_object PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 3802 "1009" _null_ _null_ _null_ _null_ _null_ jsonb_object _null_ _null_ _null_ ));
DESCR("aggregate inputs into jsonb object");
DATA(insert OID = 3271 ( jsonb_build_array PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_array _null_ _null_ _null_ ));
DESCR("build a jsonb array from any inputs");
-DATA(insert OID = 3272 ( jsonb_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_array_noargs _null_ _null_ _null_ ));
+DATA(insert OID = 3272 ( jsonb_build_array PGNSP PGUID 12 1 0 0 0 f f f f f f s s 0 0 3802 "" _null_ _null_ _null_ _null_ _null_ jsonb_build_array_noargs _null_ _null_ _null_ ));
DESCR("build an empty jsonb array");
DATA(insert OID = 3273 ( jsonb_build_object PGNSP PGUID 12 1 0 2276 0 f f f f f f s s 1 0 3802 "2276" "{2276}" "{v}" _null_ _null_ _null_ jsonb_build_object _null_ _null_ _null_ ));
DESCR("build a jsonb object from pairwise key/value inputs");
DESCR("Set part of a jsonb");
DATA(insert OID = 3306 ( jsonb_pretty PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 25 "3802" _null_ _null_ _null_ _null_ _null_ jsonb_pretty _null_ _null_ _null_ ));
DESCR("Indented text from jsonb");
-DATA(insert OID = 3579 ( jsonb_insert PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 3802 "3802 1009 3802 16" _null_ _null_ _null_ _null_ _null_ jsonb_insert _null_ _null_ _null_ ));
+DATA(insert OID = 3579 ( jsonb_insert PGNSP PGUID 12 1 0 0 0 f f f f t f i s 4 0 3802 "3802 1009 3802 16" _null_ _null_ _null_ _null_ _null_ jsonb_insert _null_ _null_ _null_ ));
DESCR("Insert value into a jsonb");
/* txid */
DATA(insert OID = 2939 ( txid_snapshot_in PGNSP PGUID 12 1 0 0 0 f f f f t f i s 1 0 2970 "2275" _null_ _null_ _null_ _null_ _null_ txid_snapshot_in _null_ _null_ _null_ ));
DESCR("SP-GiST support for quad tree over box");
DATA(insert OID = 5014 ( spg_box_quad_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_picksplit _null_ _null_ _null_ ));
DESCR("SP-GiST support for quad tree over box");
-DATA(insert OID = 5015 ( spg_box_quad_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_inner_consistent _null_ _null_ _null_ ));
+DATA(insert OID = 5015 ( spg_box_quad_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_inner_consistent _null_ _null_ _null_ ));
DESCR("SP-GiST support for quad tree over box");
DATA(insert OID = 5016 ( spg_box_quad_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i s 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ _null_ spg_box_quad_leaf_consistent _null_ _null_ _null_ ));
DESCR("SP-GiST support for quad tree over box");
DESCR("aggregate final function");
/* hypothetical-set aggregates (and their support functions) */
-DATA(insert OID = 3986 ( rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 3986 ( rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("rank of hypothetical row");
-DATA(insert OID = 3987 ( rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_rank_final _null_ _null_ _null_ ));
+DATA(insert OID = 3987 ( rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_rank_final _null_ _null_ _null_ ));
DESCR("aggregate final function");
DATA(insert OID = 3988 ( percent_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 701 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("fractional rank of hypothetical row");
DESCR("cumulative distribution of hypothetical row");
DATA(insert OID = 3991 ( cume_dist_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 701 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_cume_dist_final _null_ _null_ _null_ ));
DESCR("aggregate final function");
-DATA(insert OID = 3992 ( dense_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
+DATA(insert OID = 3992 ( dense_rank PGNSP PGUID 12 1 0 2276 0 t f f f f f i s 1 0 20 "2276" "{2276}" "{v}" _null_ _null_ _null_ aggregate_dummy _null_ _null_ _null_ ));
DESCR("rank of hypothetical row without gaps");
-DATA(insert OID = 3993 ( dense_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_dense_rank_final _null_ _null_ _null_ ));
+DATA(insert OID = 3993 ( dense_rank_final PGNSP PGUID 12 1 0 2276 0 f f f f f f i s 2 0 20 "2281 2276" "{2281,2276}" "{i,v}" _null_ _null_ _null_ hypothetical_dense_rank_final _null_ _null_ _null_ ));
DESCR("aggregate final function");
/* pg_upgrade support */
*/
#define PROPARALLEL_SAFE 's' /* can run in worker or master */
#define PROPARALLEL_RESTRICTED 'r' /* can run in parallel master only */
-#define PROPARALLEL_UNSAFE 'u' /* banned while in parallel mode */
+#define PROPARALLEL_UNSAFE 'u' /* banned while in parallel mode */
/*
* Symbolic values for proargmodes column. Note that these must agree with
DATA(insert OID = 3115 ( fdw_handler PGNSP PGUID 4 t p P f t \054 0 0 0 fdw_handler_in fdw_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
#define FDW_HANDLEROID 3115
DATA(insert OID = 325 ( index_am_handler PGNSP PGUID 4 t p P f t \054 0 0 0 index_am_handler_in index_am_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
-#define INDEX_AM_HANDLEROID 325
+#define INDEX_AM_HANDLEROID 325
DATA(insert OID = 3310 ( tsm_handler PGNSP PGUID 4 t p P f t \054 0 0 0 tsm_handler_in tsm_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
#define TSM_HANDLEROID 3310
DATA(insert OID = 3831 ( anyrange PGNSP PGUID -1 f p P f t \054 0 0 0 anyrange_in anyrange_out - - - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
} ConfigData;
extern ConfigData *get_configdata(const char *my_exec_path,
- size_t *configdata_len);
+ size_t *configdata_len);
#endif /* COMMON_CONFIG_INFO_H */
typedef struct ParallelExecutorInfo
{
- PlanState *planstate;
+ PlanState *planstate;
ParallelContext *pcxt;
BufferUsage *buffer_usage;
SharedExecutorInstrumentation *instrumentation;
shm_mq_handle **tqueue;
- bool finished;
-} ParallelExecutorInfo;
+ bool finished;
+} ParallelExecutorInfo;
extern ParallelExecutorInfo *ExecInitParallelPlan(PlanState *planstate,
EState *estate, int nworkers);
* Parallel execution support
*/
extern void ExecCustomScanEstimate(CustomScanState *node,
- ParallelContext *pcxt);
+ ParallelContext *pcxt);
extern void ExecCustomScanInitializeDSM(CustomScanState *node,
- ParallelContext *pcxt);
+ ParallelContext *pcxt);
extern void ExecCustomScanInitializeWorker(CustomScanState *node,
- shm_toc *toc);
+ shm_toc *toc);
#endif /* NODECUSTOM_H */
extern void ExecReScanForeignScan(ForeignScanState *node);
extern void ExecForeignScanEstimate(ForeignScanState *node,
- ParallelContext *pcxt);
+ ParallelContext *pcxt);
extern void ExecForeignScanInitializeDSM(ForeignScanState *node,
- ParallelContext *pcxt);
+ ParallelContext *pcxt);
extern void ExecForeignScanInitializeWorker(ForeignScanState *node,
- shm_toc *toc);
+ shm_toc *toc);
#endif /* NODEFOREIGNSCAN_H */
JoinPathExtraData *extra);
typedef void (*GetForeignUpperPaths_function) (PlannerInfo *root,
- RelOptInfo *scan_join_rel);
+ RelOptInfo *scan_join_rel);
typedef void (*AddForeignUpdateTargets_function) (Query *parsetree,
RangeTblEntry *target_rte,
typedef int (*IsForeignRelUpdatable_function) (Relation rel);
typedef bool (*PlanDirectModify_function) (PlannerInfo *root,
- ModifyTable *plan,
- Index resultRelation,
- int subplan_index);
+ ModifyTable *plan,
+ Index resultRelation,
+ int subplan_index);
typedef void (*BeginDirectModify_function) (ForeignScanState *node,
- int eflags);
+ int eflags);
typedef TupleTableSlot *(*IterateDirectModify_function) (ForeignScanState *node);
Oid serverOid);
typedef Size (*EstimateDSMForeignScan_function) (ForeignScanState *node,
- ParallelContext *pcxt);
+ ParallelContext *pcxt);
typedef void (*InitializeDSMForeignScan_function) (ForeignScanState *node,
- ParallelContext *pcxt,
- void *coordinate);
+ ParallelContext *pcxt,
+ void *coordinate);
typedef void (*InitializeWorkerForeignScan_function) (ForeignScanState *node,
- shm_toc *toc,
- void *coordinate);
+ shm_toc *toc,
+ void *coordinate);
typedef bool (*IsForeignScanParallelSafe_function) (PlannerInfo *root,
RelOptInfo *rel,
RangeTblEntry *rte);
extern ForeignServer *GetForeignServer(Oid serverid);
extern ForeignServer *GetForeignServerByName(const char *name, bool missing_ok);
extern UserMapping *GetUserMapping(Oid userid, Oid serverid);
-extern Oid GetUserMappingId(Oid userid, Oid serverid, bool missing_ok);
+extern Oid GetUserMappingId(Oid userid, Oid serverid, bool missing_ok);
extern UserMapping *GetUserMappingById(Oid umid);
extern ForeignDataWrapper *GetForeignDataWrapper(Oid fdwid);
extern ForeignDataWrapper *GetForeignDataWrapperByName(const char *name,
* top-level plan */
Instrumentation *instrument; /* Optional runtime stats for this node */
- WorkerInstrumentation *worker_instrument; /* per-worker instrumentation */
+ WorkerInstrumentation *worker_instrument; /* per-worker instrumentation */
/*
* Common structural data for all Plan types. These links to subsidiary
typedef struct ForeignScanState
{
ScanState ss; /* its first field is NodeTag */
- List *fdw_recheck_quals; /* original quals not in ss.ps.qual */
+ List *fdw_recheck_quals; /* original quals not in ss.ps.qual */
Size pscan_len; /* size of parallel coordination information */
/* use struct pointer to avoid including fdwapi.h here */
struct FdwRoutine *fdwroutine;
AggStatePerTrans pertrans; /* per-Trans state information */
ExprContext **aggcontexts; /* econtexts for long-lived data (per GS) */
ExprContext *tmpcontext; /* econtext for input expressions */
- AggStatePerTrans curpertrans; /* currently active trans state */
+ AggStatePerTrans curpertrans; /* currently active trans state */
bool input_done; /* indicates end of input */
bool agg_done; /* indicates completion of Agg scan */
bool combineStates; /* input tuples contain transition states */
/*-------------------------------------------------------------------------
*
* extensible.h
- * Definitions for extensible nodes and custom scans
+ * Definitions for extensible nodes and custom scans
*
*
* Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
{
const char *extnodename;
Size node_size;
- void (*nodeCopy)(struct ExtensibleNode *newnode,
- const struct ExtensibleNode *oldnode);
- bool (*nodeEqual)(const struct ExtensibleNode *a,
- const struct ExtensibleNode *b);
- void (*nodeOut)(struct StringInfoData *str,
- const struct ExtensibleNode *node);
- void (*nodeRead)(struct ExtensibleNode *node);
+ void (*nodeCopy) (struct ExtensibleNode *newnode,
+ const struct ExtensibleNode *oldnode);
+ bool (*nodeEqual) (const struct ExtensibleNode *a,
+ const struct ExtensibleNode *b);
+ void (*nodeOut) (struct StringInfoData *str,
+ const struct ExtensibleNode *node);
+ void (*nodeRead) (struct ExtensibleNode *node);
} ExtensibleNodeMethods;
extern void RegisterExtensibleNodeMethods(const ExtensibleNodeMethods *method);
/* Convert Path to a Plan */
struct Plan *(*PlanCustomPath) (PlannerInfo *root,
- RelOptInfo *rel,
- struct CustomPath *best_path,
- List *tlist,
- List *clauses,
- List *custom_plans);
-} CustomPathMethods;
+ RelOptInfo *rel,
+ struct CustomPath *best_path,
+ List *tlist,
+ List *clauses,
+ List *custom_plans);
+} CustomPathMethods;
/*
* Custom scan. Here again, there's not much to do: we need to be able to
/* Required executor methods */
void (*BeginCustomScan) (CustomScanState *node,
- EState *estate,
- int eflags);
+ EState *estate,
+ int eflags);
TupleTableSlot *(*ExecCustomScan) (CustomScanState *node);
void (*EndCustomScan) (CustomScanState *node);
void (*ReScanCustomScan) (CustomScanState *node);
/* Optional methods: needed if parallel execution is supported */
Size (*EstimateDSMCustomScan) (CustomScanState *node,
- ParallelContext *pcxt);
+ ParallelContext *pcxt);
void (*InitializeDSMCustomScan) (CustomScanState *node,
- ParallelContext *pcxt,
- void *coordinate);
+ ParallelContext *pcxt,
+ void *coordinate);
void (*InitializeWorkerCustomScan) (CustomScanState *node,
- shm_toc *toc,
- void *coordinate);
+ shm_toc *toc,
+ void *coordinate);
/* Optional: print additional information in EXPLAIN */
void (*ExplainCustomScan) (CustomScanState *node,
- List *ancestors,
- ExplainState *es);
+ List *ancestors,
+ ExplainState *es);
} CustomExecMethods;
extern void RegisterCustomScanMethods(const CustomScanMethods *methods);
extern const CustomScanMethods *GetCustomScanMethods(const char *CustomName,
- bool missing_ok);
+ bool missing_ok);
-#endif /* EXTENSIBLE_H */
+#endif /* EXTENSIBLE_H */
extern void outBitmapset(struct StringInfoData *str,
const struct Bitmapset *bms);
extern void outDatum(struct StringInfoData *str, uintptr_t value,
- int typlen, bool typbyval);
+ int typlen, bool typbyval);
/*
* nodes/{readfuncs.c,read.c}
ParserSetupHook parserSetup; /* parser setup hook */
void *parserSetupArg;
int numParams; /* number of ParamExternDatas following */
- struct Bitmapset *paramMask; /* if non-NULL, can ignore omitted params */
+ struct Bitmapset *paramMask; /* if non-NULL, can ignore omitted params */
ParamExternData params[FLEXIBLE_ARRAY_MEMBER];
} ParamListInfoData;
bool hasRowSecurity; /* row security applied? */
- bool parallelModeNeeded; /* parallel mode required to execute? */
- bool hasForeignJoin; /* Plan has a pushed down foreign join */
+ bool parallelModeNeeded; /* parallel mode required to execute? */
+ bool hasForeignJoin; /* Plan has a pushed down foreign join */
} PlannedStmt;
/* macro for fetching the Plan associated with a SubPlan node */
List *fdw_exprs; /* expressions that FDW may evaluate */
List *fdw_private; /* private data for FDW */
List *fdw_scan_tlist; /* optional tlist describing scan tuple */
- List *fdw_recheck_quals; /* original quals not in scan.plan.qual */
+ List *fdw_recheck_quals; /* original quals not in
+ * scan.plan.qual */
Bitmapset *fs_relids; /* RTIs generated by this scan */
bool fsSystemCol; /* true if any "system column" is needed */
} ForeignScan;
/* default result targetlist for Paths scanning this relation */
struct PathTarget *reltarget; /* list of Vars/Exprs, cost, width */
- bool reltarget_has_non_vars; /* true if any expression in
+ bool reltarget_has_non_vars; /* true if any expression in
* PathTarget is a non-Var */
/* materialization information */
bool parallel_aware; /* engage parallel-aware logic? */
bool parallel_safe; /* OK to use as part of parallel plan? */
- int parallel_workers; /* desired # of workers; 0 = not parallel */
+ int parallel_workers; /* desired # of workers; 0 = not
+ * parallel */
/* estimated size/costs for path (see costsize.c for more info) */
double rows; /* estimated number of result tuples */
* which context it is allowed in. We require three states here as there are
* two different contexts in which partial aggregation is safe. For aggregates
* which have an 'stype' of INTERNAL, it is okay to pass a pointer to the
- * aggregate state within a single process, since the datum is just a
+ * aggregate state within a single process, since the datum is just a
* pointer. In cases where the aggregate state must be passed between
- * different processes, for example during parallel aggregation, passing
- * pointers directly is not going to work.
+ * different processes, for example during parallel aggregation, passing
+ * pointers directly is not going to work.
*/
typedef enum
{
- PAT_ANY = 0, /* Any type of partial aggregation is okay. */
- PAT_INTERNAL_ONLY, /* Some aggregates support only internal mode. */
- PAT_DISABLED /* Some aggregates don't support partial mode at all */
+ PAT_ANY = 0, /* Any type of partial aggregation is okay. */
+ PAT_INTERNAL_ONLY, /* Some aggregates support only internal mode. */
+ PAT_DISABLED /* Some aggregates don't support partial mode
+ * at all */
} PartialAggType;
extern Expr *make_opclause(Oid opno, Oid opresulttype, bool opretset,
int numArguments);
extern void build_aggregate_transfn_expr(Oid *agg_input_types,
- int agg_num_inputs,
- int agg_num_direct_inputs,
- bool agg_variadic,
- Oid agg_state_type,
- Oid agg_input_collation,
- Oid transfn_oid,
- Oid invtransfn_oid,
- Expr **transfnexpr,
- Expr **invtransfnexpr);
+ int agg_num_inputs,
+ int agg_num_direct_inputs,
+ bool agg_variadic,
+ Oid agg_state_type,
+ Oid agg_input_collation,
+ Oid transfn_oid,
+ Oid invtransfn_oid,
+ Expr **transfnexpr,
+ Expr **invtransfnexpr);
extern void build_aggregate_combinefn_expr(Oid agg_state_type,
- Oid agg_input_collation,
- Oid combinefn_oid,
- Expr **combinefnexpr);
+ Oid agg_input_collation,
+ Oid combinefn_oid,
+ Expr **combinefnexpr);
extern void build_aggregate_serialfn_expr(Oid agg_state_type,
- Oid agg_serial_type,
- Oid agg_input_collation,
- Oid serialfn_oid,
- Expr **serialfnexpr);
+ Oid agg_serial_type,
+ Oid agg_input_collation,
+ Oid serialfn_oid,
+ Expr **serialfnexpr);
extern void build_aggregate_finalfn_expr(Oid *agg_input_types,
- int num_finalfn_inputs,
- Oid agg_state_type,
- Oid agg_result_type,
- Oid agg_input_collation,
- Oid finalfn_oid,
- Expr **finalfnexpr);
+ int num_finalfn_inputs,
+ Oid agg_state_type,
+ Oid agg_result_type,
+ Oid agg_input_collation,
+ Oid finalfn_oid,
+ Expr **finalfnexpr);
#endif /* PARSE_AGG_H */
bool warn);
extern char *downcase_identifier(const char *ident, int len,
- bool warn, bool truncate);
+ bool warn, bool truncate);
extern void truncate_identifier(char *ident, int len, bool warn);
{
PROGRESS_COMMAND_INVALID,
PROGRESS_COMMAND_VACUUM
-} ProgressCommandType;
+} ProgressCommandType;
#define PGSTAT_NUM_PROGRESS_PARAM 10
*/
typedef struct xl_logical_message
{
- Oid dbId; /* database Oid emitted from */
- bool transactional; /* is message transactional? */
- Size prefix_size; /* length of prefix */
- Size message_size; /* size of the message */
- char message[FLEXIBLE_ARRAY_MEMBER]; /* message including the null
+ Oid dbId; /* database Oid emitted from */
+ bool transactional; /* is message transactional? */
+ Size prefix_size; /* length of prefix */
+ Size message_size; /* size of the message */
+ char message[FLEXIBLE_ARRAY_MEMBER]; /* message including the null
* terminated prefix of length
* prefix_size */
} xl_logical_message;
#define SizeOfLogicalMessage (offsetof(xl_logical_message, message))
extern XLogRecPtr LogLogicalMessage(const char *prefix, const char *message,
- size_t size, bool transactional);
+ size_t size, bool transactional);
/* RMGR API*/
#define XLOG_LOGICAL_MESSAGE 0x00
*/
typedef void (*LogicalDecodeMessageCB) (
struct LogicalDecodingContext *,
- ReorderBufferTXN *txn,
- XLogRecPtr message_lsn,
- bool transactional,
- const char *prefix,
- Size message_size,
- const char *message);
+ ReorderBufferTXN *txn,
+ XLogRecPtr message_lsn,
+ bool transactional,
+ const char *prefix,
+ Size message_size,
+ const char *message);
/*
* Filter changes by origin.
HeapTupleData tuple;
/* pre-allocated size of tuple buffer, different from tuple size */
- Size alloc_tuple_size;
+ Size alloc_tuple_size;
/* actual tuple data follows */
} ReorderBufferTupleBuf;
/* Message with arbitrary data. */
struct
{
- char *prefix;
- Size message_size;
- char *message;
- } msg;
+ char *prefix;
+ Size message_size;
+ char *message;
+ } msg;
/* New snapshot, set when action == *_INTERNAL_SNAPSHOT */
Snapshot snapshot;
/* message callback signature */
typedef void (*ReorderBufferMessageCB) (
- ReorderBuffer *rb,
- ReorderBufferTXN *txn,
- XLogRecPtr message_lsn,
- bool transactional,
- const char *prefix, Size sz,
- const char *message);
+ ReorderBuffer *rb,
+ ReorderBufferTXN *txn,
+ XLogRecPtr message_lsn,
+ bool transactional,
+ const char *prefix, Size sz,
+ const char *message);
struct ReorderBuffer
{
void ReorderBufferReturnChange(ReorderBuffer *, ReorderBufferChange *);
void ReorderBufferQueueChange(ReorderBuffer *, TransactionId, XLogRecPtr lsn, ReorderBufferChange *);
-void ReorderBufferQueueMessage(ReorderBuffer *, TransactionId, Snapshot snapshot, XLogRecPtr lsn,
- bool transactional, const char *prefix,
- Size message_size, const char *message);
+void ReorderBufferQueueMessage(ReorderBuffer *, TransactionId, Snapshot snapshot, XLogRecPtr lsn,
+ bool transactional, const char *prefix,
+ Size message_size, const char *message);
void ReorderBufferCommit(ReorderBuffer *, TransactionId,
XLogRecPtr commit_lsn, XLogRecPtr end_lsn,
TimestampTz commit_time, RepOriginId origin_id, XLogRecPtr origin_lsn);
void ReorderBufferAddInvalidations(ReorderBuffer *, TransactionId, XLogRecPtr lsn,
Size nmsgs, SharedInvalidationMessage *msgs);
void ReorderBufferImmediateInvalidation(ReorderBuffer *, uint32 ninvalidations,
- SharedInvalidationMessage *invalidations);
+ SharedInvalidationMessage *invalidations);
void ReorderBufferProcessXid(ReorderBuffer *, TransactionId xid, XLogRecPtr lsn);
void ReorderBufferXidSetCatalogChanges(ReorderBuffer *, TransactionId xid, XLogRecPtr lsn);
bool ReorderBufferXidHasCatalogChanges(ReorderBuffer *, TransactionId xid);
extern SnapBuildState SnapBuildCurrentState(SnapBuild *snapstate);
extern Snapshot SnapBuildGetOrBuildSnapshot(SnapBuild *builder,
- TransactionId xid);
+ TransactionId xid);
extern bool SnapBuildXactNeedsSkip(SnapBuild *snapstate, XLogRecPtr ptr);
extern void FileWriteback(File file, off_t offset, off_t nbytes);
extern char *FilePathName(File file);
extern int FileGetRawDesc(File file);
-extern int FileGetRawFlags(File file);
+extern int FileGetRawFlags(File file);
extern int FileGetRawMode(File file);
/* Operations that allow use of regular stdio --- USE WITH CAUTION */
extern void FreeSpaceMapTruncateRel(Relation rel, BlockNumber nblocks);
extern void FreeSpaceMapVacuum(Relation rel);
extern void UpdateFreeSpaceMap(Relation rel,
- BlockNumber startBlkNum,
- BlockNumber endBlkNum,
- Size freespace);
+ BlockNumber startBlkNum,
+ BlockNumber endBlkNum,
+ Size freespace);
#endif /* FREESPACE_H_ */
Oid relOid;
} xl_standby_lock;
-#endif /* LOCKDEF_H_ */
+#endif /* LOCKDEF_H_ */
{
/* proc->links MUST BE FIRST IN STRUCT (see ProcSleep,ProcWakeup,etc) */
SHM_QUEUE links; /* list link if process is in a list */
- PGPROC **procgloballist; /* procglobal list that owns this PGPROC */
+ PGPROC **procgloballist; /* procglobal list that owns this PGPROC */
PGSemaphoreData sem; /* ONE semaphore to sleep on */
int waitStatus; /* STATUS_WAITING, STATUS_OK or STATUS_ERROR */
/* Support for group XID clearing. */
/* true, if member of ProcArray group waiting for XID clear */
- bool procArrayGroupMember;
+ bool procArrayGroupMember;
/* next ProcArray group member waiting for XID clear */
- pg_atomic_uint32 procArrayGroupNext;
+ pg_atomic_uint32 procArrayGroupNext;
+
/*
* latest transaction id among the transaction's main XID and
* subtransactions
*/
- TransactionId procArrayGroupMemberXid;
+ TransactionId procArrayGroupMemberXid;
- uint32 wait_event_info; /* proc's wait information */
+ uint32 wait_event_info; /* proc's wait information */
/* Per-backend LWLock. Protects fields below (but not group fields). */
LWLock backendLock;
* leader to get the LWLock protecting these fields.
*/
PGPROC *lockGroupLeader; /* lock group leader, if I'm a member */
- dlist_head lockGroupMembers; /* list of members, if I'm a leader */
- dlist_node lockGroupLink; /* my member link, if I'm a member */
+ dlist_head lockGroupMembers; /* list of members, if I'm a leader */
+ dlist_node lockGroupLink; /* my member link, if I'm a member */
};
/* NOTE: "typedef struct PGPROC PGPROC" appears in storage/lock.h. */
extern XLogRecPtr LogStandbySnapshot(void);
extern void LogStandbyInvalidations(int nmsgs, SharedInvalidationMessage *msgs,
- bool relcacheInitFileInval);
+ bool relcacheInitFileInval);
#endif /* STANDBY_H */
extern void standby_desc(StringInfo buf, XLogReaderState *record);
extern const char *standby_identify(uint8 info);
extern void standby_desc_invalidations(StringInfo buf,
- int nmsgs, SharedInvalidationMessage *msgs,
- Oid dbId, Oid tsId,
- bool relcacheInitFileInval);
+ int nmsgs, SharedInvalidationMessage *msgs,
+ Oid dbId, Oid tsId,
+ bool relcacheInitFileInval);
/*
* XLOG message types
{
uint32 val:8,
isword:1,
- /* Stores compound flags listed below */
+ /* Stores compound flags listed below */
compoundflag:4,
- /* Reference to an entry of the AffixData field */
+ /* Reference to an entry of the AffixData field */
affix:19;
struct SPNode *node;
} SPNodeData;
union
{
/*
- * flag is filled in by NIImportDictionary(). After NISortDictionary(),
- * d is used instead of flag.
+ * flag is filled in by NIImportDictionary(). After
+ * NISortDictionary(), d is used instead of flag.
*/
char *flag;
/* d is used in mkSPNode() */
typedef struct aff_struct
{
char *flag;
- /* FF_SUFFIX or FF_PREFIX */
+ /* FF_SUFFIX or FF_PREFIX */
uint32 type:1,
flagflags:7,
issimple:1,
*/
typedef enum
{
- FM_CHAR, /* one character (like ispell) */
- FM_LONG, /* two characters */
- FM_NUM /* number, >= 0 and < 65536 */
+ FM_CHAR, /* one character (like ispell) */
+ FM_LONG, /* two characters */
+ FM_NUM /* number, >= 0 and < 65536 */
} FlagMode;
/*
*/
/* Array of Hunspell options in affix file */
- CompoundAffixFlag *CompoundAffixFlags;
+ CompoundAffixFlag *CompoundAffixFlags;
/* number of entries in CompoundAffixFlags array */
- int nCompoundAffixFlag;
+ int nCompoundAffixFlag;
/* allocated length of CompoundAffixFlags array */
- int mCompoundAffixFlag;
+ int mCompoundAffixFlag;
/*
* Remaining fields are only used during dictionary construction; they are
*/
typedef struct
{
- uint32 selected: 1,
- in: 1,
- replace: 1,
- repeated: 1,
- skip: 1,
- unused: 3,
- type: 8,
- len: 16;
- WordEntryPos pos;
- char *word;
- QueryOperand *item;
+ uint32 selected:1,
+ in:1,
+ replace:1,
+ repeated:1,
+ skip:1,
+ unused:3,
+ type:8,
+ len:16;
+ WordEntryPos pos;
+ char *word;
+ QueryOperand *item;
} HeadlineWordEntry;
typedef struct
HeadlineWordEntry *words;
int32 lenwords;
int32 curwords;
- int32 vectorpos; /* positions a-la tsvector */
+ int32 vectorpos; /* positions a-la tsvector */
char *startsel;
char *stopsel;
char *fragdelim;
#define MAXSTRLEN ( (1<<11) - 1)
#define MAXSTRPOS ( (1<<20) - 1)
-extern int compareWordEntryPos(const void *a, const void *b);
+extern int compareWordEntryPos(const void *a, const void *b);
/*
* Equivalent to
extern const int tsearch_op_priority[OP_COUNT];
-#define NOT_PHRASE_P 5 /*
- * OP_PHRASE negation operations must have greater
- * priority in order to force infix() to surround
- * the whole OP_PHRASE expression with parentheses.
- */
+#define NOT_PHRASE_P 5 /* OP_PHRASE negation operations must have
+ * greater priority in order to force infix()
+ * to surround the whole OP_PHRASE expression
+ * with parentheses. */
-#define TOP_PRIORITY 6 /* highest priority for val nodes */
+#define TOP_PRIORITY 6 /* highest priority for val nodes */
/* get operation priority by its code*/
-#define OP_PRIORITY(x) ( tsearch_op_priority[(x) - 1] )
+#define OP_PRIORITY(x) ( tsearch_op_priority[(x) - 1] )
/* get QueryOperator priority */
#define QO_PRIORITY(x) OP_PRIORITY(((QueryOperator *) (x))->oper)
/* special case: get QueryOperator priority for correct printing !(a <-> b>) */
*/
typedef struct ExecPhraseData
{
- int npos;
- bool allocated;
- WordEntryPos *pos;
+ int npos;
+ bool allocated;
+ WordEntryPos *pos;
} ExecPhraseData;
extern bool TS_execute(QueryItem *curitem, void *checkval, bool calcnot,
extern Oid get_role_oid(const char *rolename, bool missing_ok);
extern Oid get_role_oid_or_public(const char *rolename);
extern Oid get_rolespec_oid(const Node *node, bool missing_ok);
-extern void check_rolespec_name(const Node *node, const char *detail_msg);
+extern void check_rolespec_name(const Node *node, const char *detail_msg);
extern HeapTuple get_rolespec_tuple(const Node *node);
extern char *get_rolespec_name(const Node *node);
* PG_END_TRY();
*
* (The braces are not actually necessary, but are recommended so that
- * pgindent will indent the construct nicely.) The error recovery code
+ * pgindent will indent the construct nicely.) The error recovery code
* can optionally do PG_RE_THROW() to propagate the same error outwards.
*
* Note: while the system will correctly propagate any new ereport(ERROR)
extern Datum gist_point_fetch(PG_FUNCTION_ARGS);
/* utils/adt/geo_spgist.c */
-Datum spg_box_quad_config(PG_FUNCTION_ARGS);
-Datum spg_box_quad_choose(PG_FUNCTION_ARGS);
-Datum spg_box_quad_picksplit(PG_FUNCTION_ARGS);
-Datum spg_box_quad_inner_consistent(PG_FUNCTION_ARGS);
-Datum spg_box_quad_leaf_consistent(PG_FUNCTION_ARGS);
+Datum spg_box_quad_config(PG_FUNCTION_ARGS);
+Datum spg_box_quad_choose(PG_FUNCTION_ARGS);
+Datum spg_box_quad_picksplit(PG_FUNCTION_ARGS);
+Datum spg_box_quad_inner_consistent(PG_FUNCTION_ARGS);
+Datum spg_box_quad_leaf_consistent(PG_FUNCTION_ARGS);
/* geo_selfuncs.c */
extern Datum areasel(PG_FUNCTION_ARGS);
* number of elements in passed array lex context. It should be called from an
* array_start action.
*/
-extern int json_count_array_elements(JsonLexContext *lex);
+extern int json_count_array_elements(JsonLexContext *lex);
/*
* constructors for JsonLexContext, with or without strval element.
union
{
Numeric numeric;
- bool boolean;
+ bool boolean;
struct
{
int len;
* changes from this value */
int generation; /* parent's generation number for this plan */
int refcount; /* count of live references to this struct */
- bool has_foreign_join; /* plan has pushed down a foreign join */
+ bool has_foreign_join; /* plan has pushed down a foreign join */
MemoryContext context; /* context containing this CachedPlan */
} CachedPlan;
AutoVacOpts autovacuum; /* autovacuum-related options */
bool user_catalog_table; /* use as an additional catalog
* relation */
- int parallel_workers; /* max number of parallel workers */
+ int parallel_workers; /* max number of parallel workers */
} StdRdOptions;
#define HEAP_MIN_FILLFACTOR 10
extern void DeleteAllExportedSnapshotFiles(void);
extern bool ThereAreNoPriorRegisteredSnapshots(void);
extern TransactionId TransactionIdLimitedForOldSnapshots(TransactionId recentXmin,
- Relation relation);
+ Relation relation);
extern void MaintainOldSnapshotTimeMapping(int64 whenTaken, TransactionId xmin);
extern char *ExportSnapshot(Snapshot snapshot);
int
intoasc(interval * i, char *str)
{
- char *tmp;
+ char *tmp;
errno = 0;
tmp = PGTYPESinterval_to_asc(i);
ECPG_informix_reset_sqlca(void)
{
struct sqlca_t *sqlca = ECPGget_sqlca();
+
if (sqlca == NULL)
return;
{
struct ECPGtype_information_cache *next;
int oid;
- enum ARRAY_TYPE isarray;
+ enum ARRAY_TYPE isarray;
};
/* structure to store one statement */
for (results = assignments; results != NULL; results = results->next)
{
const struct variable *v = find_variable(results->variable);
- char *str_zero = mm_strdup("0");
+ char *str_zero = mm_strdup("0");
switch (results->value)
{
case ECPGd_length:
case ECPGd_type:
{
- char *str_zero = mm_strdup("0");
+ char *str_zero = mm_strdup("0");
+
fprintf(yyout, "%s,", get_dtype(results->value));
ECPGdump_a_type(yyout, v->name, v->type, v->brace_level, NULL, NULL, -1, NULL, NULL, str_zero, NULL, NULL);
free(str_zero);
int i = 0;
int len = strlen(str);
- if (quoted && str[0] == '"' && str[len - 1] == '"') /* do not escape quotes
- * at beginning and end
- * if quoted string */
+ if (quoted && str[0] == '"' && str[len - 1] == '"') /* do not escape quotes
+ * at beginning and end
+ * if quoted string */
{
i = 1;
len--;
unsigned long ecode;
rloop:
+
/*
* Prepare to call SSL_get_error() by clearing thread's OpenSSL error
* queue. In general, the current thread's error queue must be empty
- * before the TLS/SSL I/O operation is attempted, or SSL_get_error()
- * will not work reliably. Since the possibility exists that other
- * OpenSSL clients running in the same thread but not under our control
- * will fail to call ERR_get_error() themselves (after their own I/O
- * operations), pro-actively clear the per-thread error queue now.
+ * before the TLS/SSL I/O operation is attempted, or SSL_get_error() will
+ * not work reliably. Since the possibility exists that other OpenSSL
+ * clients running in the same thread but not under our control will fail
+ * to call ERR_get_error() themselves (after their own I/O operations),
+ * pro-actively clear the per-thread error queue now.
*/
SOCK_ERRNO_SET(0);
ERR_clear_error();
/*
* Other clients of OpenSSL may fail to call ERR_get_error(), but we
- * always do, so as to not cause problems for OpenSSL clients that
- * don't call ERR_clear_error() defensively. Be sure that this
- * happens by calling now. SSL_get_error() relies on the OpenSSL
- * per-thread error queue being intact, so this is the earliest
- * possible point ERR_get_error() may be called.
+ * always do, so as to not cause problems for OpenSSL clients that don't
+ * call ERR_clear_error() defensively. Be sure that this happens by
+ * calling now. SSL_get_error() relies on the OpenSSL per-thread error
+ * queue being intact, so this is the earliest possible point
+ * ERR_get_error() may be called.
*/
ecode = (err != SSL_ERROR_NONE || n < 0) ? ERR_get_error() : 0;
switch (err)
param->paramkind = PARAM_EXTERN;
param->paramid = dno + 1;
plpgsql_exec_get_datum_type_info(estate,
- datum,
- ¶m->paramtype,
- ¶m->paramtypmod,
- ¶m->paramcollid);
+ datum,
+ ¶m->paramtype,
+ ¶m->paramtypmod,
+ ¶m->paramcollid);
param->location = location;
return (Node *) param;
while (true)
{
- uint64 i;
+ uint64 i;
SPI_cursor_fetch(portal, true, 50);
if (SPI_processed == 0)
*/
Oid
plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate,
- PLpgSQL_datum *datum)
+ PLpgSQL_datum *datum)
{
Oid typeid;
*/
void
plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate,
- PLpgSQL_datum *datum,
- Oid *typeid, int32 *typmod, Oid *collation)
+ PLpgSQL_datum *datum,
+ Oid *typeid, int32 *typmod, Oid *collation)
{
switch (datum->dtype)
{
*/
if (expr->plan == NULL)
exec_prepare_plan(estate, expr, parallelOK ?
- CURSOR_OPT_PARALLEL_OK : 0);
+ CURSOR_OPT_PARALLEL_OK : 0);
/*
* If a portal was requested, put the query into the portal
extern void plpgsql_subxact_cb(SubXactEvent event, SubTransactionId mySubid,
SubTransactionId parentSubid, void *arg);
extern Oid plpgsql_exec_get_datum_type(PLpgSQL_execstate *estate,
- PLpgSQL_datum *datum);
+ PLpgSQL_datum *datum);
extern void plpgsql_exec_get_datum_type_info(PLpgSQL_execstate *estate,
- PLpgSQL_datum *datum,
- Oid *typeid, int32 *typmod, Oid *collation);
+ PLpgSQL_datum *datum,
+ Oid *typeid, int32 *typmod, Oid *collation);
/* ----------
* Functions for namespace handling in pl_funcs.c
static void PLy_traceback(PyObject *e, PyObject *v, PyObject *tb,
char **xmsg, char **tbmsg, int *tb_depth);
static void PLy_get_spi_error_data(PyObject *exc, int *sqlerrcode, char **detail,
- char **hint, char **query, int *position,
- char **schema_name, char **table_name, char **column_name,
- char **datatype_name, char **constraint_name);
+ char **hint, char **query, int *position,
+ char **schema_name, char **table_name, char **column_name,
+ char **datatype_name, char **constraint_name);
static void PLy_get_error_data(PyObject *exc, int *sqlerrcode, char **detail,
- char **hint, char **schema_name, char **table_name, char **column_name,
- char **datatype_name, char **constraint_name);
+ char **hint, char **schema_name, char **table_name, char **column_name,
+ char **datatype_name, char **constraint_name);
static char *get_source_line(const char *src, int lineno);
static void get_string_attr(PyObject *obj, char *attrname, char **str);
column_name) : 0,
(datatype_name) ? err_generic_string(PG_DIAG_DATATYPE_NAME,
datatype_name) : 0,
- (constraint_name) ? err_generic_string(PG_DIAG_CONSTRAINT_NAME,
- constraint_name) : 0));
+ (constraint_name) ? err_generic_string(PG_DIAG_CONSTRAINT_NAME,
+ constraint_name) : 0));
}
PG_CATCH();
{
goto failure;
if (!set_string_attr(error, "sqlstate",
- unpack_sql_state(edata->sqlerrcode)))
+ unpack_sql_state(edata->sqlerrcode)))
goto failure;
if (!set_string_attr(error, "detail", edata->detail))
static void
get_string_attr(PyObject *obj, char *attrname, char **str)
{
- PyObject *val;
+ PyObject *val;
val = PyObject_GetAttrString(obj, attrname);
if (val != NULL && val != Py_None)
static bool
set_string_attr(PyObject *obj, char *attrname, char *str)
{
- int result;
- PyObject *val;
+ int result;
+ PyObject *val;
if (str != NULL)
{
/*
* logging methods
*/
- {"debug", (PyCFunction) PLy_debug, METH_VARARGS|METH_KEYWORDS, NULL},
- {"log", (PyCFunction) PLy_log, METH_VARARGS|METH_KEYWORDS, NULL},
- {"info", (PyCFunction) PLy_info, METH_VARARGS|METH_KEYWORDS, NULL},
- {"notice", (PyCFunction) PLy_notice, METH_VARARGS|METH_KEYWORDS, NULL},
- {"warning", (PyCFunction) PLy_warning, METH_VARARGS|METH_KEYWORDS, NULL},
- {"error", (PyCFunction) PLy_error, METH_VARARGS|METH_KEYWORDS, NULL},
- {"fatal", (PyCFunction) PLy_fatal, METH_VARARGS|METH_KEYWORDS, NULL},
+ {"debug", (PyCFunction) PLy_debug, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"log", (PyCFunction) PLy_log, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"info", (PyCFunction) PLy_info, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"notice", (PyCFunction) PLy_notice, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"warning", (PyCFunction) PLy_warning, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"error", (PyCFunction) PLy_error, METH_VARARGS | METH_KEYWORDS, NULL},
+ {"fatal", (PyCFunction) PLy_fatal, METH_VARARGS | METH_KEYWORDS, NULL},
/*
* create a stored plan
* don't confuse these with PLy_elog
*/
static PyObject *PLy_output(volatile int level, PyObject *self,
- PyObject *args, PyObject *kw);
+ PyObject *args, PyObject *kw);
static PyObject *
PLy_debug(PyObject *self, PyObject *args, PyObject *kw)
{
if (obj)
{
- PyObject *so = PyObject_Str(obj);
+ PyObject *so = PyObject_Str(obj);
if (so != NULL)
{
- char *str;
+ char *str;
str = pstrdup(PyString_AsString(so));
Py_DECREF(so);
static PyObject *
PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw)
{
- int sqlstate = 0;
- char *volatile sqlstatestr = NULL;
- char *volatile message = NULL;
- char *volatile detail = NULL;
- char *volatile hint = NULL;
- char *volatile column = NULL;
- char *volatile constraint = NULL;
- char *volatile datatype = NULL;
- char *volatile table = NULL;
- char *volatile schema = NULL;
+ int sqlstate = 0;
+ char *volatile sqlstatestr = NULL;
+ char *volatile message = NULL;
+ char *volatile detail = NULL;
+ char *volatile hint = NULL;
+ char *volatile column = NULL;
+ char *volatile constraint = NULL;
+ char *volatile datatype = NULL;
+ char *volatile table = NULL;
+ char *volatile schema = NULL;
volatile MemoryContext oldcontext;
- PyObject *key, *value;
- PyObject *volatile so;
- Py_ssize_t pos = 0;
+ PyObject *key,
+ *value;
+ PyObject *volatile so;
+ Py_ssize_t pos = 0;
if (PyTuple_Size(args) == 1)
{
{
while (PyDict_Next(kw, &pos, &key, &value))
{
- char *keyword = PyString_AsString(key);
+ char *keyword = PyString_AsString(key);
if (strcmp(keyword, "message") == 0)
{
datatype = object_to_string(value);
else if (strcmp(keyword, "constraint") == 0)
constraint = object_to_string(value);
- else
- PLy_elog(ERROR, "'%s' is an invalid keyword argument for this function",
- keyword);
+ else
+ PLy_elog(ERROR, "'%s' is an invalid keyword argument for this function",
+ keyword);
}
}
PLy_elog(ERROR, "invalid SQLSTATE code");
sqlstate = MAKE_SQLSTATE(sqlstatestr[0],
- sqlstatestr[1],
- sqlstatestr[2],
- sqlstatestr[3],
- sqlstatestr[4]);
+ sqlstatestr[1],
+ sqlstatestr[2],
+ sqlstatestr[3],
+ sqlstatestr[4]);
}
oldcontext = CurrentMemoryContext;
}
PG_CATCH();
{
- ErrorData *edata;
+ ErrorData *edata;
MemoryContextSwitchTo(oldcontext);
edata = CopyErrorData();
/* Look up the correct exception */
entry = hash_search(PLy_spi_exceptions, &(edata->sqlerrcode),
HASH_FIND, NULL);
- /* This could be a custom error code, if that's the case fallback to
+
+ /*
+ * This could be a custom error code, if that's the case fallback to
* SPIError
*/
exc = entry ? entry->exc : PLy_exc_spi_error;
if (!spierror)
goto failure;
- spidata= Py_BuildValue("(izzzizzzzz)", edata->sqlerrcode, edata->detail, edata->hint,
+ spidata = Py_BuildValue("(izzzizzzzz)", edata->sqlerrcode, edata->detail, edata->hint,
edata->internalquery, edata->internalpos,
- edata->schema_name, edata->table_name, edata->column_name,
+ edata->schema_name, edata->table_name, edata->column_name,
edata->datatype_name, edata->constraint_name);
if (!spidata)
goto failure;
MultiByteToWideChar(CP_ACP, 0, ctype, -1, wctype, LOCALE_NAME_MAX_LENGTH);
if (GetLocaleInfoEx(wctype,
- LOCALE_IDEFAULTANSICODEPAGE | LOCALE_RETURN_NUMBER,
- (LPWSTR) &cp, sizeof(cp) / sizeof(WCHAR)) > 0)
+ LOCALE_IDEFAULTANSICODEPAGE | LOCALE_RETURN_NUMBER,
+ (LPWSTR) &cp, sizeof(cp) / sizeof(WCHAR)) > 0)
{
r = malloc(16); /* excess */
if (r != NULL)
else
#endif
{
-
/*
* Locale format on Win32 is <Language>_<Country>.<CodePage> . For
* example, English_United States.1252.
/*----------------------------------------------------------------------
* test_ddl_deparse.c
- * Support functions for the test_ddl_deparse module
+ * Support functions for the test_ddl_deparse module
*
* Copyright (c) 2014-2016, PostgreSQL Global Development Group
*