diff options
527 files changed, 20693 insertions, 18426 deletions
diff --git a/contrib/btree_gist/btree_bit.c b/contrib/btree_gist/btree_bit.c index 138418a30e0..0b220ae2bb7 100644 --- a/contrib/btree_gist/btree_bit.c +++ b/contrib/btree_gist/btree_bit.c @@ -14,92 +14,99 @@ PG_FUNCTION_INFO_V1(gbt_bit_consistent); PG_FUNCTION_INFO_V1(gbt_bit_penalty); PG_FUNCTION_INFO_V1(gbt_bit_same); -Datum gbt_bit_compress(PG_FUNCTION_ARGS); -Datum gbt_bit_union(PG_FUNCTION_ARGS); -Datum gbt_bit_picksplit(PG_FUNCTION_ARGS); -Datum gbt_bit_consistent(PG_FUNCTION_ARGS); -Datum gbt_bit_penalty(PG_FUNCTION_ARGS); -Datum gbt_bit_same(PG_FUNCTION_ARGS); +Datum gbt_bit_compress(PG_FUNCTION_ARGS); +Datum gbt_bit_union(PG_FUNCTION_ARGS); +Datum gbt_bit_picksplit(PG_FUNCTION_ARGS); +Datum gbt_bit_consistent(PG_FUNCTION_ARGS); +Datum gbt_bit_penalty(PG_FUNCTION_ARGS); +Datum gbt_bit_same(PG_FUNCTION_ARGS); /* define for comparison */ -static bool gbt_bitgt (const void *a, const void *b) +static bool +gbt_bitgt(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( bitgt ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(bitgt, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_bitge (const void *a, const void *b) +static bool +gbt_bitge(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( bitge ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(bitge, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_biteq (const void *a, const void *b) +static bool +gbt_biteq(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( biteq ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(biteq, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_bitle (const void *a, const void *b) +static bool +gbt_bitle(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( bitle ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(bitle, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_bitlt (const void *a, const void *b) +static bool +gbt_bitlt(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( bitlt ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(bitlt, PointerGetDatum(a), PointerGetDatum(b)))); } -static int32 gbt_bitcmp ( const bytea * a , const bytea * b ) +static int32 +gbt_bitcmp(const bytea *a, const bytea *b) { - return - ( DatumGetInt32(DirectFunctionCall2(byteacmp,PointerGetDatum(a),PointerGetDatum(b) ) ) ); + return + (DatumGetInt32(DirectFunctionCall2(byteacmp, PointerGetDatum(a), PointerGetDatum(b)))); } - + static bytea * -gbt_bit_xfrm ( bytea * leaf ) +gbt_bit_xfrm(bytea *leaf) { - bytea * out = leaf; - int s = VARBITBYTES(leaf) + VARHDRSZ; + bytea *out = leaf; + int s = VARBITBYTES(leaf) + VARHDRSZ; - out = palloc ( s ); - VARATT_SIZEP(out) = s; - memcpy ( (void*)VARDATA(out), (void*)VARBITS(leaf), VARBITBYTES(leaf) ); - return out; + out = palloc(s); + VARATT_SIZEP(out) = s; + memcpy((void *) VARDATA(out), (void *) VARBITS(leaf), VARBITBYTES(leaf)); + return out; } -static GBT_VARKEY * gbt_bit_l2n ( GBT_VARKEY * leaf ) +static GBT_VARKEY * +gbt_bit_l2n(GBT_VARKEY * leaf) { - GBT_VARKEY *out = leaf ; - GBT_VARKEY_R r = gbt_var_key_readable ( leaf ); - bytea *o ; + GBT_VARKEY *out = leaf; + GBT_VARKEY_R r = gbt_var_key_readable(leaf); + bytea *o; - o = gbt_bit_xfrm (r.lower); - r.upper = r.lower = o; - out = gbt_var_key_copy( &r, TRUE ); - pfree(o); + o = gbt_bit_xfrm(r.lower); + r.upper = r.lower = o; + out = gbt_var_key_copy(&r, TRUE); + pfree(o); - return out; + return out; } static const gbtree_vinfo tinfo = { - gbt_t_bit, - FALSE, - TRUE, - gbt_bitgt, - gbt_bitge, - gbt_biteq, - gbt_bitle, - gbt_bitlt, - gbt_bitcmp, - gbt_bit_l2n + gbt_t_bit, + FALSE, + TRUE, + gbt_bitgt, + gbt_bitge, + gbt_biteq, + gbt_bitle, + gbt_bitlt, + gbt_bitcmp, + gbt_bit_l2n }; @@ -108,40 +115,40 @@ static const gbtree_vinfo tinfo = **************************************************/ Datum -gbt_bit_compress (PG_FUNCTION_ARGS) +gbt_bit_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - PG_RETURN_POINTER ( gbt_var_compress( entry, &tinfo ) ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_var_compress(entry, &tinfo)); } Datum gbt_bit_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer ( entry->key ) ; - GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer ( PG_DETOAST_DATUM( entry->key ) ); - void *qtst = ( void * ) DatumGetPointer( PG_GETARG_DATUM(1) ); - void *query = ( void * ) DatumGetByteaP ( PG_GETARG_DATUM(1) ); - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - bool retval = FALSE; - GBT_VARKEY_R r = gbt_var_key_readable ( key ); - - if ( GIST_LEAF(entry) ) - { - retval = gbt_var_consistent( &r, query, &strategy, TRUE, &tinfo ); - } else { - bytea * q = gbt_bit_xfrm ( ( bytea * ) query ); - retval = gbt_var_consistent( &r, (void*)q, &strategy, FALSE, &tinfo ); - pfree(q); - } - - if ( ktst != key ){ - pfree ( key ); - } - if ( qtst != query ){ - pfree ( query ); - } - PG_RETURN_BOOL(retval); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer(entry->key); + GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + void *qtst = (void *) DatumGetPointer(PG_GETARG_DATUM(1)); + void *query = (void *) DatumGetByteaP(PG_GETARG_DATUM(1)); + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + bool retval = FALSE; + GBT_VARKEY_R r = gbt_var_key_readable(key); + + if (GIST_LEAF(entry)) + retval = gbt_var_consistent(&r, query, &strategy, TRUE, &tinfo); + else + { + bytea *q = gbt_bit_xfrm((bytea *) query); + + retval = gbt_var_consistent(&r, (void *) q, &strategy, FALSE, &tinfo); + pfree(q); + } + + if (ktst != key) + pfree(key); + if (qtst != query) + pfree(query); + PG_RETURN_BOOL(retval); } @@ -149,37 +156,40 @@ gbt_bit_consistent(PG_FUNCTION_ARGS) Datum gbt_bit_union(PG_FUNCTION_ARGS) { - GistEntryVector * entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - int32 * size = (int *) PG_GETARG_POINTER(1); - PG_RETURN_POINTER( gbt_var_union ( entryvec , size , &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + int32 *size = (int *) PG_GETARG_POINTER(1); + + PG_RETURN_POINTER(gbt_var_union(entryvec, size, &tinfo)); } - + Datum gbt_bit_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); - gbt_var_picksplit ( entryvec, v, &tinfo ); - PG_RETURN_POINTER(v); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); + + gbt_var_picksplit(entryvec, v, &tinfo); + PG_RETURN_POINTER(v); } Datum gbt_bit_same(PG_FUNCTION_ARGS) { - Datum d1 = PG_GETARG_DATUM(0); - Datum d2 = PG_GETARG_DATUM(1); - bool *result = (bool *) PG_GETARG_POINTER(2); - PG_RETURN_POINTER( gbt_var_same ( result, d1 , d2 , &tinfo )); + Datum d1 = PG_GETARG_DATUM(0); + Datum d2 = PG_GETARG_DATUM(1); + bool *result = (bool *) PG_GETARG_POINTER(2); + + PG_RETURN_POINTER(gbt_var_same(result, d1, d2, &tinfo)); } Datum gbt_bit_penalty(PG_FUNCTION_ARGS) { - float *result = (float *) PG_GETARG_POINTER(2); - GISTENTRY * o = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY * n = (GISTENTRY *) PG_GETARG_POINTER(1); - PG_RETURN_POINTER( gbt_var_penalty ( result ,o , n, &tinfo ) ); -} + float *result = (float *) PG_GETARG_POINTER(2); + GISTENTRY *o = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *n = (GISTENTRY *) PG_GETARG_POINTER(1); + PG_RETURN_POINTER(gbt_var_penalty(result, o, n, &tinfo)); +} diff --git a/contrib/btree_gist/btree_bytea.c b/contrib/btree_gist/btree_bytea.c index da8a7f405b5..3671acd24c8 100644 --- a/contrib/btree_gist/btree_bytea.c +++ b/contrib/btree_gist/btree_bytea.c @@ -13,61 +13,67 @@ PG_FUNCTION_INFO_V1(gbt_bytea_consistent); PG_FUNCTION_INFO_V1(gbt_bytea_penalty); PG_FUNCTION_INFO_V1(gbt_bytea_same); -Datum gbt_bytea_compress(PG_FUNCTION_ARGS); -Datum gbt_bytea_union(PG_FUNCTION_ARGS); -Datum gbt_bytea_picksplit(PG_FUNCTION_ARGS); -Datum gbt_bytea_consistent(PG_FUNCTION_ARGS); -Datum gbt_bytea_penalty(PG_FUNCTION_ARGS); -Datum gbt_bytea_same(PG_FUNCTION_ARGS); +Datum gbt_bytea_compress(PG_FUNCTION_ARGS); +Datum gbt_bytea_union(PG_FUNCTION_ARGS); +Datum gbt_bytea_picksplit(PG_FUNCTION_ARGS); +Datum gbt_bytea_consistent(PG_FUNCTION_ARGS); +Datum gbt_bytea_penalty(PG_FUNCTION_ARGS); +Datum gbt_bytea_same(PG_FUNCTION_ARGS); /* define for comparison */ -static bool gbt_byteagt (const void *a, const void *b) +static bool +gbt_byteagt(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( byteagt ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(byteagt, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_byteage (const void *a, const void *b) +static bool +gbt_byteage(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( byteage ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(byteage, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_byteaeq (const void *a, const void *b) +static bool +gbt_byteaeq(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( byteaeq ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(byteaeq, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_byteale (const void *a, const void *b) +static bool +gbt_byteale(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( byteale ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(byteale, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_bytealt (const void *a, const void *b) +static bool +gbt_bytealt(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( bytealt ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(bytealt, PointerGetDatum(a), PointerGetDatum(b)))); } - -static int32 gbt_byteacmp ( const bytea * a , const bytea * b ) + +static int32 +gbt_byteacmp(const bytea *a, const bytea *b) { - return - ( DatumGetInt32(DirectFunctionCall2(byteacmp,PointerGetDatum(a),PointerGetDatum(b) ) ) ); + return + (DatumGetInt32(DirectFunctionCall2(byteacmp, PointerGetDatum(a), PointerGetDatum(b)))); } static const gbtree_vinfo tinfo = { - gbt_t_bytea, - FALSE, - TRUE, - gbt_byteagt, - gbt_byteage, - gbt_byteaeq, - gbt_byteale, - gbt_bytealt, - gbt_byteacmp, - NULL + gbt_t_bytea, + FALSE, + TRUE, + gbt_byteagt, + gbt_byteage, + gbt_byteaeq, + gbt_byteale, + gbt_bytealt, + gbt_byteacmp, + NULL }; @@ -77,10 +83,11 @@ static const gbtree_vinfo tinfo = Datum -gbt_bytea_compress (PG_FUNCTION_ARGS) +gbt_bytea_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - PG_RETURN_POINTER ( gbt_var_compress( entry, &tinfo ) ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_var_compress(entry, &tinfo)); } @@ -88,24 +95,22 @@ gbt_bytea_compress (PG_FUNCTION_ARGS) Datum gbt_bytea_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer ( entry->key ) ; - GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer ( PG_DETOAST_DATUM( entry->key ) ); - void *qtst = ( void * ) DatumGetPointer( PG_GETARG_DATUM(1) ); - void *query = ( void * ) DatumGetByteaP ( PG_GETARG_DATUM(1) ); - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - bool retval = FALSE; - GBT_VARKEY_R r = gbt_var_key_readable ( key ); - - retval = gbt_var_consistent( &r, query, &strategy, GIST_LEAF(entry), &tinfo ); - - if ( ktst != key ){ - pfree ( key ); - } - if ( qtst != query ){ - pfree ( query ); - } - PG_RETURN_BOOL(retval); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer(entry->key); + GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + void *qtst = (void *) DatumGetPointer(PG_GETARG_DATUM(1)); + void *query = (void *) DatumGetByteaP(PG_GETARG_DATUM(1)); + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + bool retval = FALSE; + GBT_VARKEY_R r = gbt_var_key_readable(key); + + retval = gbt_var_consistent(&r, query, &strategy, GIST_LEAF(entry), &tinfo); + + if (ktst != key) + pfree(key); + if (qtst != query) + pfree(query); + PG_RETURN_BOOL(retval); } @@ -113,37 +118,40 @@ gbt_bytea_consistent(PG_FUNCTION_ARGS) Datum gbt_bytea_union(PG_FUNCTION_ARGS) { - GistEntryVector * entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - int32 * size = (int *) PG_GETARG_POINTER(1); - PG_RETURN_POINTER( gbt_var_union ( entryvec , size , &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + int32 *size = (int *) PG_GETARG_POINTER(1); + + PG_RETURN_POINTER(gbt_var_union(entryvec, size, &tinfo)); } - + Datum gbt_bytea_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); - gbt_var_picksplit ( entryvec, v, &tinfo ); - PG_RETURN_POINTER(v); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); + + gbt_var_picksplit(entryvec, v, &tinfo); + PG_RETURN_POINTER(v); } Datum gbt_bytea_same(PG_FUNCTION_ARGS) { - Datum d1 = PG_GETARG_DATUM(0); - Datum d2 = PG_GETARG_DATUM(1); - bool *result = (bool *) PG_GETARG_POINTER(2); - PG_RETURN_POINTER( gbt_var_same ( result, d1 , d2 , &tinfo )); + Datum d1 = PG_GETARG_DATUM(0); + Datum d2 = PG_GETARG_DATUM(1); + bool *result = (bool *) PG_GETARG_POINTER(2); + + PG_RETURN_POINTER(gbt_var_same(result, d1, d2, &tinfo)); } Datum gbt_bytea_penalty(PG_FUNCTION_ARGS) { - float *result = (float *) PG_GETARG_POINTER(2); - GISTENTRY * o = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY * n = (GISTENTRY *) PG_GETARG_POINTER(1); - PG_RETURN_POINTER( gbt_var_penalty ( result ,o , n, &tinfo ) ); -} + float *result = (float *) PG_GETARG_POINTER(2); + GISTENTRY *o = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *n = (GISTENTRY *) PG_GETARG_POINTER(1); + PG_RETURN_POINTER(gbt_var_penalty(result, o, n, &tinfo)); +} diff --git a/contrib/btree_gist/btree_cash.c b/contrib/btree_gist/btree_cash.c index 6c5acb3e7e1..84e46101181 100644 --- a/contrib/btree_gist/btree_cash.c +++ b/contrib/btree_gist/btree_cash.c @@ -4,9 +4,9 @@ typedef struct { - Cash lower; - Cash upper; -} cashKEY; + Cash lower; + Cash upper; +} cashKEY; /* ** Cash ops @@ -18,59 +18,62 @@ PG_FUNCTION_INFO_V1(gbt_cash_consistent); PG_FUNCTION_INFO_V1(gbt_cash_penalty); PG_FUNCTION_INFO_V1(gbt_cash_same); -Datum gbt_cash_compress(PG_FUNCTION_ARGS); -Datum gbt_cash_union(PG_FUNCTION_ARGS); -Datum gbt_cash_picksplit(PG_FUNCTION_ARGS); -Datum gbt_cash_consistent(PG_FUNCTION_ARGS); -Datum gbt_cash_penalty(PG_FUNCTION_ARGS); -Datum gbt_cash_same(PG_FUNCTION_ARGS); +Datum gbt_cash_compress(PG_FUNCTION_ARGS); +Datum gbt_cash_union(PG_FUNCTION_ARGS); +Datum gbt_cash_picksplit(PG_FUNCTION_ARGS); +Datum gbt_cash_consistent(PG_FUNCTION_ARGS); +Datum gbt_cash_penalty(PG_FUNCTION_ARGS); +Datum gbt_cash_same(PG_FUNCTION_ARGS); -static bool gbt_cashgt (const void *a, const void *b) +static bool +gbt_cashgt(const void *a, const void *b) { - return ( *((Cash*)a) > *((Cash*)b) ); + return (*((Cash *) a) > *((Cash *) b)); } -static bool gbt_cashge (const void *a, const void *b) +static bool +gbt_cashge(const void *a, const void *b) { - return ( *((Cash*)a) >= *((Cash*)b) ); + return (*((Cash *) a) >= *((Cash *) b)); } -static bool gbt_casheq (const void *a, const void *b) +static bool +gbt_casheq(const void *a, const void *b) { - return ( *((Cash*)a) == *((Cash*)b) ); + return (*((Cash *) a) == *((Cash *) b)); } -static bool gbt_cashle (const void *a, const void *b) +static bool +gbt_cashle(const void *a, const void *b) { - return ( *((Cash*)a) <= *((Cash*)b) ); + return (*((Cash *) a) <= *((Cash *) b)); } -static bool gbt_cashlt (const void *a, const void *b) +static bool +gbt_cashlt(const void *a, const void *b) { - return ( *((Cash*)a) < *((Cash*)b) ); + return (*((Cash *) a) < *((Cash *) b)); } static int gbt_cashkey_cmp(const void *a, const void *b) { - if ( *(Cash*)&(((Nsrt *) a)->t[0]) > *(Cash*)&(((Nsrt *) b)->t[0]) ){ - return 1; - } else - if ( *(Cash*)&(((Nsrt *) a)->t[0]) < *(Cash*)&(((Nsrt *) b)->t[0]) ){ - return -1; - } - return 0; + if (*(Cash *) &(((Nsrt *) a)->t[0]) > *(Cash *) &(((Nsrt *) b)->t[0])) + return 1; + else if (*(Cash *) &(((Nsrt *) a)->t[0]) < *(Cash *) &(((Nsrt *) b)->t[0])) + return -1; + return 0; } -static const gbtree_ninfo tinfo = +static const gbtree_ninfo tinfo = { - gbt_t_cash, - sizeof(Cash), - gbt_cashgt, - gbt_cashge, - gbt_casheq, - gbt_cashle, - gbt_cashlt, - gbt_cashkey_cmp + gbt_t_cash, + sizeof(Cash), + gbt_cashgt, + gbt_cashge, + gbt_casheq, + gbt_cashle, + gbt_cashlt, + gbt_cashkey_cmp }; @@ -82,81 +85,83 @@ static const gbtree_ninfo tinfo = Datum gbt_cash_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } Datum gbt_cash_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - Cash query = (*((Cash *) PG_GETARG_POINTER(1))); - cashKEY *kkk = (cashKEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + Cash query = (*((Cash *) PG_GETARG_POINTER(1))); + cashKEY *kkk = (cashKEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_cash_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(cashKEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(cashKEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(cashKEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(cashKEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_cash_penalty(PG_FUNCTION_ARGS) { - cashKEY *origentry = (cashKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - cashKEY *newentry = (cashKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); + cashKEY *origentry = (cashKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + cashKEY *newentry = (cashKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + + Cash res; - Cash res ; + *result = 0.0; - *result = 0.0; - - penalty_range_enlarge ( origentry->lower, origentry->upper, newentry->lower, newentry->upper ); + penalty_range_enlarge(origentry->lower, origentry->upper, newentry->lower, newentry->upper); - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + origentry->upper - origentry->lower ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (res / ((double) (res + origentry->upper - origentry->lower))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } Datum gbt_cash_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_cash_same(PG_FUNCTION_ARGS) { - cashKEY *b1 = (cashKEY *) PG_GETARG_POINTER(0); - cashKEY *b2 = (cashKEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + cashKEY *b1 = (cashKEY *) PG_GETARG_POINTER(0); + cashKEY *b2 = (cashKEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } - - diff --git a/contrib/btree_gist/btree_date.c b/contrib/btree_gist/btree_date.c index 9522d386252..d3ce33c7172 100644 --- a/contrib/btree_gist/btree_date.c +++ b/contrib/btree_gist/btree_date.c @@ -4,9 +4,9 @@ typedef struct { - DateADT lower; - DateADT upper; -} dateKEY; + DateADT lower; + DateADT upper; +} dateKEY; /* ** date ops @@ -18,46 +18,51 @@ PG_FUNCTION_INFO_V1(gbt_date_consistent); PG_FUNCTION_INFO_V1(gbt_date_penalty); PG_FUNCTION_INFO_V1(gbt_date_same); -Datum gbt_date_compress(PG_FUNCTION_ARGS); -Datum gbt_date_union(PG_FUNCTION_ARGS); -Datum gbt_date_picksplit(PG_FUNCTION_ARGS); -Datum gbt_date_consistent(PG_FUNCTION_ARGS); -Datum gbt_date_penalty(PG_FUNCTION_ARGS); -Datum gbt_date_same(PG_FUNCTION_ARGS); +Datum gbt_date_compress(PG_FUNCTION_ARGS); +Datum gbt_date_union(PG_FUNCTION_ARGS); +Datum gbt_date_picksplit(PG_FUNCTION_ARGS); +Datum gbt_date_consistent(PG_FUNCTION_ARGS); +Datum gbt_date_penalty(PG_FUNCTION_ARGS); +Datum gbt_date_same(PG_FUNCTION_ARGS); -static bool gbt_dategt (const void *a, const void *b) +static bool +gbt_dategt(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(date_gt,DateADTGetDatum( *((DateADT*)a) ), DateADTGetDatum( *((DateADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(date_gt, DateADTGetDatum(*((DateADT *) a)), DateADTGetDatum(*((DateADT *) b))) + ); } -static bool gbt_datege (const void *a, const void *b) +static bool +gbt_datege(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(date_ge,DateADTGetDatum( *((DateADT*)a) ), DateADTGetDatum( *((DateADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(date_ge, DateADTGetDatum(*((DateADT *) a)), DateADTGetDatum(*((DateADT *) b))) + ); } -static bool gbt_dateeq (const void *a, const void *b) +static bool +gbt_dateeq(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(date_eq,DateADTGetDatum( *((DateADT*)a) ), DateADTGetDatum( *((DateADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(date_eq, DateADTGetDatum(*((DateADT *) a)), DateADTGetDatum(*((DateADT *) b))) + ); } -static bool gbt_datele (const void *a, const void *b) +static bool +gbt_datele(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(date_le,DateADTGetDatum( *((DateADT*)a) ), DateADTGetDatum( *((DateADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(date_le, DateADTGetDatum(*((DateADT *) a)), DateADTGetDatum(*((DateADT *) b))) + ); } -static bool gbt_datelt (const void *a, const void *b) +static bool +gbt_datelt(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(date_lt,DateADTGetDatum( *((DateADT*)a) ), DateADTGetDatum( *((DateADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(date_lt, DateADTGetDatum(*((DateADT *) a)), DateADTGetDatum(*((DateADT *) b))) + ); } @@ -65,26 +70,24 @@ static bool gbt_datelt (const void *a, const void *b) static int gbt_datekey_cmp(const void *a, const void *b) { - if ( gbt_dategt( (void*)&(((Nsrt *) a)->t[0]) , (void*)&(((Nsrt *) b)->t[0]) ) ){ - return 1; - } else - if ( gbt_datelt( (void*)&(((Nsrt *) a)->t[0]) , (void*)&(((Nsrt *) b)->t[0]) ) ){ - return -1; - } - return 0; + if (gbt_dategt((void *) &(((Nsrt *) a)->t[0]), (void *) &(((Nsrt *) b)->t[0]))) + return 1; + else if (gbt_datelt((void *) &(((Nsrt *) a)->t[0]), (void *) &(((Nsrt *) b)->t[0]))) + return -1; + return 0; } -static const gbtree_ninfo tinfo = +static const gbtree_ninfo tinfo = { - gbt_t_date, - sizeof(DateADT), - gbt_dategt, - gbt_datege, - gbt_dateeq, - gbt_datele, - gbt_datelt, - gbt_datekey_cmp + gbt_t_date, + sizeof(DateADT), + gbt_dategt, + gbt_datege, + gbt_dateeq, + gbt_datele, + gbt_datelt, + gbt_datekey_cmp }; @@ -97,9 +100,10 @@ static const gbtree_ninfo tinfo = Datum gbt_date_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } @@ -107,86 +111,89 @@ gbt_date_compress(PG_FUNCTION_ARGS) Datum gbt_date_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - DateADT query = PG_GETARG_DATEADT( 1 ); - dateKEY *kkk = (dateKEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + DateADT query = PG_GETARG_DATEADT(1); + dateKEY *kkk = (dateKEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_date_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(dateKEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(dateKEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(dateKEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(dateKEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_date_penalty(PG_FUNCTION_ARGS) { - dateKEY *origentry = (dateKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - dateKEY *newentry = (dateKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); - int32 diff, res ; - - diff = DatumGetInt32(DirectFunctionCall2( - date_mi, - DateADTGetDatum(newentry->upper), - DateADTGetDatum(origentry->upper))); - - res = Max(diff, 0); - - diff = DatumGetInt32(DirectFunctionCall2( - date_mi, - DateADTGetDatum(origentry->lower), - DateADTGetDatum(newentry->lower))); - - res += Max(diff, 0); - - *result = 0.0; - - if ( res > 0 ){ - diff = DatumGetInt32(DirectFunctionCall2( - date_mi, - DateADTGetDatum(origentry->upper), - DateADTGetDatum(origentry->lower))); - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + diff ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } - - PG_RETURN_POINTER(result); + dateKEY *origentry = (dateKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + dateKEY *newentry = (dateKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + int32 diff, + res; + + diff = DatumGetInt32(DirectFunctionCall2( + date_mi, + DateADTGetDatum(newentry->upper), + DateADTGetDatum(origentry->upper))); + + res = Max(diff, 0); + + diff = DatumGetInt32(DirectFunctionCall2( + date_mi, + DateADTGetDatum(origentry->lower), + DateADTGetDatum(newentry->lower))); + + res += Max(diff, 0); + + *result = 0.0; + + if (res > 0) + { + diff = DatumGetInt32(DirectFunctionCall2( + date_mi, + DateADTGetDatum(origentry->upper), + DateADTGetDatum(origentry->lower))); + *result += FLT_MIN; + *result += (float) (res / ((double) (res + diff))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } + + PG_RETURN_POINTER(result); } Datum gbt_date_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_date_same(PG_FUNCTION_ARGS) { - dateKEY *b1 = (dateKEY *) PG_GETARG_POINTER(0); - dateKEY *b2 = (dateKEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + dateKEY *b1 = (dateKEY *) PG_GETARG_POINTER(0); + dateKEY *b2 = (dateKEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } diff --git a/contrib/btree_gist/btree_float4.c b/contrib/btree_gist/btree_float4.c index 621532d9a99..b36178e4541 100644 --- a/contrib/btree_gist/btree_float4.c +++ b/contrib/btree_gist/btree_float4.c @@ -3,9 +3,9 @@ typedef struct float4key { - float4 lower; - float4 upper; -} float4KEY; + float4 lower; + float4 upper; +} float4KEY; /* ** float4 ops @@ -17,59 +17,62 @@ PG_FUNCTION_INFO_V1(gbt_float4_consistent); PG_FUNCTION_INFO_V1(gbt_float4_penalty); PG_FUNCTION_INFO_V1(gbt_float4_same); -Datum gbt_float4_compress(PG_FUNCTION_ARGS); -Datum gbt_float4_union(PG_FUNCTION_ARGS); -Datum gbt_float4_picksplit(PG_FUNCTION_ARGS); -Datum gbt_float4_consistent(PG_FUNCTION_ARGS); -Datum gbt_float4_penalty(PG_FUNCTION_ARGS); -Datum gbt_float4_same(PG_FUNCTION_ARGS); +Datum gbt_float4_compress(PG_FUNCTION_ARGS); +Datum gbt_float4_union(PG_FUNCTION_ARGS); +Datum gbt_float4_picksplit(PG_FUNCTION_ARGS); +Datum gbt_float4_consistent(PG_FUNCTION_ARGS); +Datum gbt_float4_penalty(PG_FUNCTION_ARGS); +Datum gbt_float4_same(PG_FUNCTION_ARGS); -static bool gbt_float4gt (const void *a, const void *b) +static bool +gbt_float4gt(const void *a, const void *b) { - return ( *((float4*)a) > *((float4*)b) ); + return (*((float4 *) a) > *((float4 *) b)); } -static bool gbt_float4ge (const void *a, const void *b) +static bool +gbt_float4ge(const void *a, const void *b) { - return ( *((float4*)a) >= *((float4*)b) ); + return (*((float4 *) a) >= *((float4 *) b)); } -static bool gbt_float4eq (const void *a, const void *b) +static bool +gbt_float4eq(const void *a, const void *b) { - return ( *((float4*)a) == *((float4*)b) ); + return (*((float4 *) a) == *((float4 *) b)); } -static bool gbt_float4le (const void *a, const void *b) +static bool +gbt_float4le(const void *a, const void *b) { - return ( *((float4*)a) <= *((float4*)b) ); + return (*((float4 *) a) <= *((float4 *) b)); } -static bool gbt_float4lt (const void *a, const void *b) +static bool +gbt_float4lt(const void *a, const void *b) { - return ( *((float4*)a) < *((float4*)b) ); + return (*((float4 *) a) < *((float4 *) b)); } static int gbt_float4key_cmp(const void *a, const void *b) { - if ( *(float4*)&(((Nsrt *) a)->t[0]) > *(float4*)&(((Nsrt *) b)->t[0]) ){ - return 1; - } else - if ( *(float4*)&(((Nsrt *) a)->t[0]) < *(float4*)&(((Nsrt *) b)->t[0]) ){ - return -1; - } - return 0; + if (*(float4 *) &(((Nsrt *) a)->t[0]) > *(float4 *) &(((Nsrt *) b)->t[0])) + return 1; + else if (*(float4 *) &(((Nsrt *) a)->t[0]) < *(float4 *) &(((Nsrt *) b)->t[0])) + return -1; + return 0; } -static const gbtree_ninfo tinfo = -{ - gbt_t_float4, - sizeof(float4), - gbt_float4gt, - gbt_float4ge, - gbt_float4eq, - gbt_float4le, - gbt_float4lt, - gbt_float4key_cmp +static const gbtree_ninfo tinfo = +{ + gbt_t_float4, + sizeof(float4), + gbt_float4gt, + gbt_float4ge, + gbt_float4eq, + gbt_float4le, + gbt_float4lt, + gbt_float4key_cmp }; @@ -81,80 +84,83 @@ static const gbtree_ninfo tinfo = Datum gbt_float4_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } Datum gbt_float4_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - float4 query = PG_GETARG_FLOAT4(1); - float4KEY *kkk = (float4KEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + float4 query = PG_GETARG_FLOAT4(1); + float4KEY *kkk = (float4KEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_float4_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(float4KEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(float4KEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(float4KEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(float4KEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_float4_penalty(PG_FUNCTION_ARGS) { - float4KEY *origentry = (float4KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - float4KEY *newentry = (float4KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); + float4KEY *origentry = (float4KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + float4KEY *newentry = (float4KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); - float4 res ; + float4 res; - *result = 0.0; - - penalty_range_enlarge ( origentry->lower, origentry->upper, newentry->lower, newentry->upper ); + *result = 0.0; - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + origentry->upper - origentry->lower ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + penalty_range_enlarge(origentry->lower, origentry->upper, newentry->lower, newentry->upper); - PG_RETURN_POINTER(result); + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (res / ((double) (res + origentry->upper - origentry->lower))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } + + PG_RETURN_POINTER(result); } Datum gbt_float4_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_float4_same(PG_FUNCTION_ARGS) { - float4KEY *b1 = (float4KEY *) PG_GETARG_POINTER(0); - float4KEY *b2 = (float4KEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + float4KEY *b1 = (float4KEY *) PG_GETARG_POINTER(0); + float4KEY *b2 = (float4KEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } - diff --git a/contrib/btree_gist/btree_float8.c b/contrib/btree_gist/btree_float8.c index e19632fd78c..a5511d5abfe 100644 --- a/contrib/btree_gist/btree_float8.c +++ b/contrib/btree_gist/btree_float8.c @@ -3,9 +3,9 @@ typedef struct float8key { - float8 lower; - float8 upper; -} float8KEY; + float8 lower; + float8 upper; +} float8KEY; /* ** float8 ops @@ -17,60 +17,63 @@ PG_FUNCTION_INFO_V1(gbt_float8_consistent); PG_FUNCTION_INFO_V1(gbt_float8_penalty); PG_FUNCTION_INFO_V1(gbt_float8_same); -Datum gbt_float8_compress(PG_FUNCTION_ARGS); -Datum gbt_float8_union(PG_FUNCTION_ARGS); -Datum gbt_float8_picksplit(PG_FUNCTION_ARGS); -Datum gbt_float8_consistent(PG_FUNCTION_ARGS); -Datum gbt_float8_penalty(PG_FUNCTION_ARGS); -Datum gbt_float8_same(PG_FUNCTION_ARGS); +Datum gbt_float8_compress(PG_FUNCTION_ARGS); +Datum gbt_float8_union(PG_FUNCTION_ARGS); +Datum gbt_float8_picksplit(PG_FUNCTION_ARGS); +Datum gbt_float8_consistent(PG_FUNCTION_ARGS); +Datum gbt_float8_penalty(PG_FUNCTION_ARGS); +Datum gbt_float8_same(PG_FUNCTION_ARGS); -static bool gbt_float8gt (const void *a, const void *b) +static bool +gbt_float8gt(const void *a, const void *b) { - return ( *((float8*)a) > *((float8*)b) ); + return (*((float8 *) a) > *((float8 *) b)); } -static bool gbt_float8ge (const void *a, const void *b) +static bool +gbt_float8ge(const void *a, const void *b) { - return ( *((float8*)a) >= *((float8*)b) ); + return (*((float8 *) a) >= *((float8 *) b)); } -static bool gbt_float8eq (const void *a, const void *b) +static bool +gbt_float8eq(const void *a, const void *b) { - return ( *((float8*)a) == *((float8*)b) ); + return (*((float8 *) a) == *((float8 *) b)); } -static bool gbt_float8le (const void *a, const void *b) +static bool +gbt_float8le(const void *a, const void *b) { - return ( *((float8*)a) <= *((float8*)b) ); + return (*((float8 *) a) <= *((float8 *) b)); } -static bool gbt_float8lt (const void *a, const void *b) +static bool +gbt_float8lt(const void *a, const void *b) { - return ( *((float8*)a) < *((float8*)b) ); + return (*((float8 *) a) < *((float8 *) b)); } static int gbt_float8key_cmp(const void *a, const void *b) { - if ( *(float8*)&(((Nsrt *) a)->t[0]) > *(float8*)&(((Nsrt *) b)->t[0]) ){ - return 1; - } else - if ( *(float8*)&(((Nsrt *) a)->t[0]) < *(float8*)&(((Nsrt *) b)->t[0]) ){ - return -1; - } - return 0; + if (*(float8 *) &(((Nsrt *) a)->t[0]) > *(float8 *) &(((Nsrt *) b)->t[0])) + return 1; + else if (*(float8 *) &(((Nsrt *) a)->t[0]) < *(float8 *) &(((Nsrt *) b)->t[0])) + return -1; + return 0; } -static const gbtree_ninfo tinfo = -{ - gbt_t_float8, - sizeof(float8), - gbt_float8gt, - gbt_float8ge, - gbt_float8eq, - gbt_float8le, - gbt_float8lt, - gbt_float8key_cmp +static const gbtree_ninfo tinfo = +{ + gbt_t_float8, + sizeof(float8), + gbt_float8gt, + gbt_float8ge, + gbt_float8eq, + gbt_float8le, + gbt_float8lt, + gbt_float8key_cmp }; @@ -82,9 +85,10 @@ static const gbtree_ninfo tinfo = Datum gbt_float8_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } @@ -92,70 +96,73 @@ Datum gbt_float8_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - float8 query = PG_GETARG_FLOAT8(1); - float8KEY *kkk = (float8KEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + float8 query = PG_GETARG_FLOAT8(1); + float8KEY *kkk = (float8KEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_float8_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(float8KEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(float8KEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(float8KEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(float8KEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_float8_penalty(PG_FUNCTION_ARGS) { - float8KEY *origentry = (float8KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - float8KEY *newentry = (float8KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); + float8KEY *origentry = (float8KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + float8KEY *newentry = (float8KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + + float8 res; - float8 res ; + *result = 0.0; - *result = 0.0; - - penalty_range_enlarge ( origentry->lower, origentry->upper, newentry->lower, newentry->upper ); + penalty_range_enlarge(origentry->lower, origentry->upper, newentry->lower, newentry->upper); - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + origentry->upper - origentry->lower ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (res / ((double) (res + origentry->upper - origentry->lower))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } Datum gbt_float8_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_float8_same(PG_FUNCTION_ARGS) { - float8KEY *b1 = (float8KEY *) PG_GETARG_POINTER(0); - float8KEY *b2 = (float8KEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + float8KEY *b1 = (float8KEY *) PG_GETARG_POINTER(0); + float8KEY *b2 = (float8KEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } diff --git a/contrib/btree_gist/btree_gist.c b/contrib/btree_gist/btree_gist.c index 5e5a6988bb9..a64a6cb7c12 100644 --- a/contrib/btree_gist/btree_gist.c +++ b/contrib/btree_gist/btree_gist.c @@ -1,24 +1,24 @@ #include "btree_gist.h" PG_FUNCTION_INFO_V1(gbt_decompress); -PG_FUNCTION_INFO_V1(gbtreekey_in); +PG_FUNCTION_INFO_V1(gbtreekey_in); PG_FUNCTION_INFO_V1(gbtreekey_out); -Datum gbt_decompress(PG_FUNCTION_ARGS); +Datum gbt_decompress(PG_FUNCTION_ARGS); /************************************************** * In/Out for keys **************************************************/ - + Datum gbtreekey_in(PG_FUNCTION_ARGS) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("<datatype>key_in() not implemented"))); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("<datatype>key_in() not implemented"))); - PG_RETURN_POINTER(NULL); + PG_RETURN_POINTER(NULL); } #include "btree_utils_var.h" @@ -26,10 +26,10 @@ gbtreekey_in(PG_FUNCTION_ARGS) Datum gbtreekey_out(PG_FUNCTION_ARGS) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("<datatype>key_out() not implemented"))); - PG_RETURN_POINTER(NULL); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("<datatype>key_out() not implemented"))); + PG_RETURN_POINTER(NULL); } @@ -40,6 +40,5 @@ gbtreekey_out(PG_FUNCTION_ARGS) Datum gbt_decompress(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(PG_GETARG_POINTER(0)); + PG_RETURN_POINTER(PG_GETARG_POINTER(0)); } - diff --git a/contrib/btree_gist/btree_gist.h b/contrib/btree_gist/btree_gist.h index 1b4fe837d26..f7db9206e7c 100644 --- a/contrib/btree_gist/btree_gist.h +++ b/contrib/btree_gist/btree_gist.h @@ -7,25 +7,25 @@ enum gbtree_type { - gbt_t_var , - gbt_t_int2 , - gbt_t_int4 , - gbt_t_int8 , - gbt_t_float4 , - gbt_t_float8 , - gbt_t_numeric, - gbt_t_ts, - gbt_t_cash, - gbt_t_oid, - gbt_t_time, - gbt_t_date, - gbt_t_intv, - gbt_t_macad, - gbt_t_text, - gbt_t_bpchar, - gbt_t_bytea, - gbt_t_bit, - gbt_t_inet + gbt_t_var, + gbt_t_int2, + gbt_t_int4, + gbt_t_int8, + gbt_t_float4, + gbt_t_float8, + gbt_t_numeric, + gbt_t_ts, + gbt_t_cash, + gbt_t_oid, + gbt_t_time, + gbt_t_date, + gbt_t_intv, + gbt_t_macad, + gbt_t_text, + gbt_t_bpchar, + gbt_t_bytea, + gbt_t_bit, + gbt_t_inet }; @@ -34,6 +34,6 @@ enum gbtree_type * Generic btree functions */ -Datum gbtreekey_in (PG_FUNCTION_ARGS); +Datum gbtreekey_in(PG_FUNCTION_ARGS); -Datum gbtreekey_out(PG_FUNCTION_ARGS); +Datum gbtreekey_out(PG_FUNCTION_ARGS); diff --git a/contrib/btree_gist/btree_inet.c b/contrib/btree_gist/btree_inet.c index 5b3f66789de..fad053a36f7 100644 --- a/contrib/btree_gist/btree_inet.c +++ b/contrib/btree_gist/btree_inet.c @@ -6,9 +6,9 @@ typedef struct inetkey { - double lower; - double upper; -} inetKEY; + double lower; + double upper; +} inetKEY; /* ** inet ops @@ -22,62 +22,65 @@ PG_FUNCTION_INFO_V1(gbt_cidr_consistent); PG_FUNCTION_INFO_V1(gbt_inet_penalty); PG_FUNCTION_INFO_V1(gbt_inet_same); -Datum gbt_inet_compress(PG_FUNCTION_ARGS); -Datum gbt_cidr_compress(PG_FUNCTION_ARGS); -Datum gbt_inet_union(PG_FUNCTION_ARGS); -Datum gbt_inet_picksplit(PG_FUNCTION_ARGS); -Datum gbt_inet_consistent(PG_FUNCTION_ARGS); -Datum gbt_cidr_consistent(PG_FUNCTION_ARGS); -Datum gbt_inet_penalty(PG_FUNCTION_ARGS); -Datum gbt_inet_same(PG_FUNCTION_ARGS); +Datum gbt_inet_compress(PG_FUNCTION_ARGS); +Datum gbt_cidr_compress(PG_FUNCTION_ARGS); +Datum gbt_inet_union(PG_FUNCTION_ARGS); +Datum gbt_inet_picksplit(PG_FUNCTION_ARGS); +Datum gbt_inet_consistent(PG_FUNCTION_ARGS); +Datum gbt_cidr_consistent(PG_FUNCTION_ARGS); +Datum gbt_inet_penalty(PG_FUNCTION_ARGS); +Datum gbt_inet_same(PG_FUNCTION_ARGS); -static bool gbt_inetgt (const void *a, const void *b) +static bool +gbt_inetgt(const void *a, const void *b) { - return ( *((double*)a) > *((double*)b) ); + return (*((double *) a) > *((double *) b)); } -static bool gbt_inetge (const void *a, const void *b) +static bool +gbt_inetge(const void *a, const void *b) { - return ( *((double*)a) >= *((double*)b) ); + return (*((double *) a) >= *((double *) b)); } -static bool gbt_ineteq (const void *a, const void *b) +static bool +gbt_ineteq(const void *a, const void *b) { - return ( *((double*)a) == *((double*)b) ); + return (*((double *) a) == *((double *) b)); } -static bool gbt_inetle (const void *a, const void *b) +static bool +gbt_inetle(const void *a, const void *b) { - return ( *((double*)a) <= *((double*)b) ); + return (*((double *) a) <= *((double *) b)); } -static bool gbt_inetlt (const void *a, const void *b) +static bool +gbt_inetlt(const void *a, const void *b) { - return ( *((double*)a) < *((double*)b) ); + return (*((double *) a) < *((double *) b)); } static int gbt_inetkey_cmp(const void *a, const void *b) { - if ( *(double*)(&((Nsrt *) a)->t[0]) > *(double*)(&((Nsrt *) b)->t[0]) ){ - return 1; - } else - if ( *(double*)(&((Nsrt *) a)->t[0]) < *(double*)(&((Nsrt *) b)->t[0]) ){ - return -1; - } - return 0; + if (*(double *) (&((Nsrt *) a)->t[0]) > *(double *) (&((Nsrt *) b)->t[0])) + return 1; + else if (*(double *) (&((Nsrt *) a)->t[0]) < *(double *) (&((Nsrt *) b)->t[0])) + return -1; + return 0; } -static const gbtree_ninfo tinfo = +static const gbtree_ninfo tinfo = { - gbt_t_inet, - sizeof(double), - gbt_inetgt, - gbt_inetge, - gbt_ineteq, - gbt_inetle, - gbt_inetlt, - gbt_inetkey_cmp + gbt_t_inet, + sizeof(double), + gbt_inetgt, + gbt_inetge, + gbt_ineteq, + gbt_inetle, + gbt_inetlt, + gbt_inetkey_cmp }; @@ -87,24 +90,25 @@ static const gbtree_ninfo tinfo = -static GISTENTRY * -gbt_inet_compress_inetrnal(GISTENTRY *retval , GISTENTRY *entry , Oid typid) +static GISTENTRY * +gbt_inet_compress_inetrnal(GISTENTRY *retval, GISTENTRY *entry, Oid typid) { - - if (entry->leafkey) - { - inetKEY *r = (inetKEY *) palloc(sizeof(inetKEY)); - retval = palloc(sizeof(GISTENTRY)); - r->lower = convert_network_to_scalar(entry->key, typid ); - r->upper = r->lower ; - gistentryinit(*retval, PointerGetDatum(r), - entry->rel, entry->page, - entry->offset, sizeof(inetKEY), FALSE); - } - else - retval = entry; - return ( retval ); + if (entry->leafkey) + { + inetKEY *r = (inetKEY *) palloc(sizeof(inetKEY)); + + retval = palloc(sizeof(GISTENTRY)); + r->lower = convert_network_to_scalar(entry->key, typid); + r->upper = r->lower; + gistentryinit(*retval, PointerGetDatum(r), + entry->rel, entry->page, + entry->offset, sizeof(inetKEY), FALSE); + } + else + retval = entry; + + return (retval); } @@ -112,113 +116,118 @@ gbt_inet_compress_inetrnal(GISTENTRY *retval , GISTENTRY *entry , Oid typid) Datum gbt_inet_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_inet_compress_inetrnal(retval ,entry ,INETOID ) ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_inet_compress_inetrnal(retval, entry, INETOID)); } Datum gbt_cidr_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_inet_compress_inetrnal(retval ,entry ,CIDROID ) ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_inet_compress_inetrnal(retval, entry, CIDROID)); } static bool -gbt_inet_consistent_internal ( - const GISTENTRY * entry, - const double * query, - const StrategyNumber * strategy -){ - inetKEY *kkk = (inetKEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; +gbt_inet_consistent_internal( + const GISTENTRY *entry, + const double *query, + const StrategyNumber *strategy +) +{ + inetKEY *kkk = (inetKEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - return ( - gbt_num_consistent( &key, (void*)query,strategy,GIST_LEAF(entry),&tinfo) - ); + return ( + gbt_num_consistent(&key, (void *) query, strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_inet_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - double query = convert_network_to_scalar( PG_GETARG_DATUM(1) ,INETOID ); - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + double query = convert_network_to_scalar(PG_GETARG_DATUM(1), INETOID); + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - PG_RETURN_BOOL( - gbt_inet_consistent_internal ( entry, &query, &strategy ) - ); + PG_RETURN_BOOL( + gbt_inet_consistent_internal(entry, &query, &strategy) + ); } Datum gbt_cidr_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - double query = convert_network_to_scalar( PG_GETARG_DATUM(1) ,CIDROID ); - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + double query = convert_network_to_scalar(PG_GETARG_DATUM(1), CIDROID); + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - PG_RETURN_BOOL( - gbt_inet_consistent_internal ( entry, &query, &strategy ) - ); + PG_RETURN_BOOL( + gbt_inet_consistent_internal(entry, &query, &strategy) + ); } Datum gbt_inet_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(inetKEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(inetKEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(inetKEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(inetKEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_inet_penalty(PG_FUNCTION_ARGS) { - inetKEY *origentry = (inetKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - inetKEY *newentry = (inetKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); + inetKEY *origentry = (inetKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + inetKEY *newentry = (inetKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); - double res ; + double res; - *result = 0.0; + *result = 0.0; - penalty_range_enlarge ( origentry->lower, origentry->upper, newentry->lower, newentry->upper ); + penalty_range_enlarge(origentry->lower, origentry->upper, newentry->lower, newentry->upper); - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + origentry->upper - origentry->lower ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (res / ((double) (res + origentry->upper - origentry->lower))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } Datum gbt_inet_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_inet_same(PG_FUNCTION_ARGS) { - inetKEY *b1 = (inetKEY *) PG_GETARG_POINTER(0); - inetKEY *b2 = (inetKEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + inetKEY *b1 = (inetKEY *) PG_GETARG_POINTER(0); + inetKEY *b2 = (inetKEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } diff --git a/contrib/btree_gist/btree_int2.c b/contrib/btree_gist/btree_int2.c index 9b791df2c37..3eb5801a7e9 100644 --- a/contrib/btree_gist/btree_int2.c +++ b/contrib/btree_gist/btree_int2.c @@ -3,9 +3,9 @@ typedef struct int16key { - int16 lower; - int16 upper; -} int16KEY; + int16 lower; + int16 upper; +} int16KEY; /* ** int16 ops @@ -17,59 +17,62 @@ PG_FUNCTION_INFO_V1(gbt_int2_consistent); PG_FUNCTION_INFO_V1(gbt_int2_penalty); PG_FUNCTION_INFO_V1(gbt_int2_same); -Datum gbt_int2_compress(PG_FUNCTION_ARGS); -Datum gbt_int2_union(PG_FUNCTION_ARGS); -Datum gbt_int2_picksplit(PG_FUNCTION_ARGS); -Datum gbt_int2_consistent(PG_FUNCTION_ARGS); -Datum gbt_int2_penalty(PG_FUNCTION_ARGS); -Datum gbt_int2_same(PG_FUNCTION_ARGS); +Datum gbt_int2_compress(PG_FUNCTION_ARGS); +Datum gbt_int2_union(PG_FUNCTION_ARGS); +Datum gbt_int2_picksplit(PG_FUNCTION_ARGS); +Datum gbt_int2_consistent(PG_FUNCTION_ARGS); +Datum gbt_int2_penalty(PG_FUNCTION_ARGS); +Datum gbt_int2_same(PG_FUNCTION_ARGS); -static bool gbt_int2gt (const void *a, const void *b) +static bool +gbt_int2gt(const void *a, const void *b) { - return ( *((int16*)a) > *((int16*)b) ); + return (*((int16 *) a) > *((int16 *) b)); } -static bool gbt_int2ge (const void *a, const void *b) +static bool +gbt_int2ge(const void *a, const void *b) { - return ( *((int16*)a) >= *((int16*)b) ); + return (*((int16 *) a) >= *((int16 *) b)); } -static bool gbt_int2eq (const void *a, const void *b) +static bool +gbt_int2eq(const void *a, const void *b) { - return ( *((int16*)a) == *((int16*)b) ); + return (*((int16 *) a) == *((int16 *) b)); } -static bool gbt_int2le (const void *a, const void *b) +static bool +gbt_int2le(const void *a, const void *b) { - return ( *((int16*)a) <= *((int16*)b) ); + return (*((int16 *) a) <= *((int16 *) b)); } -static bool gbt_int2lt (const void *a, const void *b) +static bool +gbt_int2lt(const void *a, const void *b) { - return ( *((int16*)a) < *((int16*)b) ); + return (*((int16 *) a) < *((int16 *) b)); } static int gbt_int2key_cmp(const void *a, const void *b) { - if ( *(int16*)(&((Nsrt *) a)->t[0]) > *(int16*)&(((Nsrt *) b)->t[0]) ){ - return 1; - } else - if ( *(int16*)&(((Nsrt *) a)->t[0]) < *(int16*)&(((Nsrt *) b)->t[0]) ){ - return -1; - } - return 0; + if (*(int16 *) (&((Nsrt *) a)->t[0]) > *(int16 *) &(((Nsrt *) b)->t[0])) + return 1; + else if (*(int16 *) &(((Nsrt *) a)->t[0]) < *(int16 *) &(((Nsrt *) b)->t[0])) + return -1; + return 0; } -static const gbtree_ninfo tinfo = -{ - gbt_t_int2, - sizeof(int16), - gbt_int2gt, - gbt_int2ge, - gbt_int2eq, - gbt_int2le, - gbt_int2lt, - gbt_int2key_cmp +static const gbtree_ninfo tinfo = +{ + gbt_t_int2, + sizeof(int16), + gbt_int2gt, + gbt_int2ge, + gbt_int2eq, + gbt_int2le, + gbt_int2lt, + gbt_int2key_cmp }; @@ -85,77 +88,81 @@ static const gbtree_ninfo tinfo = Datum gbt_int2_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } Datum gbt_int2_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - int16 query = PG_GETARG_INT16(1); - int16KEY *kkk = (int16KEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + int16 query = PG_GETARG_INT16(1); + int16KEY *kkk = (int16KEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_int2_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(int16KEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(int16KEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(int16KEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(int16KEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_int2_penalty(PG_FUNCTION_ARGS) { - int16KEY *origentry = (int16KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - int16KEY *newentry = (int16KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); - int2 res ; + int16KEY *origentry = (int16KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + int16KEY *newentry = (int16KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + int2 res; - *result = 0.0; + *result = 0.0; - penalty_range_enlarge ( origentry->lower, origentry->upper, newentry->lower, newentry->upper ); + penalty_range_enlarge(origentry->lower, origentry->upper, newentry->lower, newentry->upper); - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + origentry->upper - origentry->lower ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (res / ((double) (res + origentry->upper - origentry->lower))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } Datum gbt_int2_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_int2_same(PG_FUNCTION_ARGS) { - int16KEY *b1 = (int16KEY *) PG_GETARG_POINTER(0); - int16KEY *b2 = (int16KEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + int16KEY *b1 = (int16KEY *) PG_GETARG_POINTER(0); + int16KEY *b2 = (int16KEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } diff --git a/contrib/btree_gist/btree_int4.c b/contrib/btree_gist/btree_int4.c index 6e26752e5f3..6693185c98d 100644 --- a/contrib/btree_gist/btree_int4.c +++ b/contrib/btree_gist/btree_int4.c @@ -3,9 +3,9 @@ typedef struct int32key { - int32 lower; - int32 upper; -} int32KEY; + int32 lower; + int32 upper; +} int32KEY; /* ** int32 ops @@ -17,60 +17,63 @@ PG_FUNCTION_INFO_V1(gbt_int4_consistent); PG_FUNCTION_INFO_V1(gbt_int4_penalty); PG_FUNCTION_INFO_V1(gbt_int4_same); -Datum gbt_int4_compress(PG_FUNCTION_ARGS); -Datum gbt_int4_union(PG_FUNCTION_ARGS); -Datum gbt_int4_picksplit(PG_FUNCTION_ARGS); -Datum gbt_int4_consistent(PG_FUNCTION_ARGS); -Datum gbt_int4_penalty(PG_FUNCTION_ARGS); -Datum gbt_int4_same(PG_FUNCTION_ARGS); +Datum gbt_int4_compress(PG_FUNCTION_ARGS); +Datum gbt_int4_union(PG_FUNCTION_ARGS); +Datum gbt_int4_picksplit(PG_FUNCTION_ARGS); +Datum gbt_int4_consistent(PG_FUNCTION_ARGS); +Datum gbt_int4_penalty(PG_FUNCTION_ARGS); +Datum gbt_int4_same(PG_FUNCTION_ARGS); -static bool gbt_int4gt (const void *a, const void *b) +static bool +gbt_int4gt(const void *a, const void *b) { - return ( *((int32*)a) > *((int32*)b) ); + return (*((int32 *) a) > *((int32 *) b)); } -static bool gbt_int4ge (const void *a, const void *b) +static bool +gbt_int4ge(const void *a, const void *b) { - return ( *((int32*)a) >= *((int32*)b) ); + return (*((int32 *) a) >= *((int32 *) b)); } -static bool gbt_int4eq (const void *a, const void *b) +static bool +gbt_int4eq(const void *a, const void *b) { - return ( *((int32*)a) == *((int32*)b) ); + return (*((int32 *) a) == *((int32 *) b)); } -static bool gbt_int4le (const void *a, const void *b) +static bool +gbt_int4le(const void *a, const void *b) { - return ( *((int32*)a) <= *((int32*)b) ); + return (*((int32 *) a) <= *((int32 *) b)); } -static bool gbt_int4lt (const void *a, const void *b) +static bool +gbt_int4lt(const void *a, const void *b) { - return ( *((int32*)a) < *((int32*)b) ); + return (*((int32 *) a) < *((int32 *) b)); } static int gbt_int4key_cmp(const void *a, const void *b) { - if ( *(int32*)&(((Nsrt *) a)->t[0]) > *(int32*)&(((Nsrt *) b)->t[0]) ){ - return 1; - } else - if ( *(int32*)&(((Nsrt *) a)->t[0]) < *(int32*)&(((Nsrt *) b)->t[0]) ){ - return -1; - } - return 0; + if (*(int32 *) &(((Nsrt *) a)->t[0]) > *(int32 *) &(((Nsrt *) b)->t[0])) + return 1; + else if (*(int32 *) &(((Nsrt *) a)->t[0]) < *(int32 *) &(((Nsrt *) b)->t[0])) + return -1; + return 0; } -static const gbtree_ninfo tinfo = -{ - gbt_t_int4, - sizeof(int32), - gbt_int4gt, - gbt_int4ge, - gbt_int4eq, - gbt_int4le, - gbt_int4lt, - gbt_int4key_cmp +static const gbtree_ninfo tinfo = +{ + gbt_t_int4, + sizeof(int32), + gbt_int4gt, + gbt_int4ge, + gbt_int4eq, + gbt_int4le, + gbt_int4lt, + gbt_int4key_cmp }; @@ -82,9 +85,10 @@ static const gbtree_ninfo tinfo = Datum gbt_int4_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } @@ -92,68 +96,71 @@ Datum gbt_int4_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - int32 query = PG_GETARG_INT32(1); - int32KEY *kkk = (int32KEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + int32 query = PG_GETARG_INT32(1); + int32KEY *kkk = (int32KEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_int4_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc( sizeof(int32KEY) ); - *(int *) PG_GETARG_POINTER(1) = sizeof(int32KEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(int32KEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(int32KEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_int4_penalty(PG_FUNCTION_ARGS) { - int32KEY *origentry = (int32KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - int32KEY *newentry = (int32KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); - int4 res ; + int32KEY *origentry = (int32KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + int32KEY *newentry = (int32KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + int4 res; - *result = 0.0; + *result = 0.0; - penalty_range_enlarge ( origentry->lower, origentry->upper, newentry->lower, newentry->upper ); + penalty_range_enlarge(origentry->lower, origentry->upper, newentry->lower, newentry->upper); - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + origentry->upper - origentry->lower ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (res / ((double) (res + origentry->upper - origentry->lower))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } Datum gbt_int4_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_int4_same(PG_FUNCTION_ARGS) { - int32KEY *b1 = (int32KEY *) PG_GETARG_POINTER(0); - int32KEY *b2 = (int32KEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + int32KEY *b1 = (int32KEY *) PG_GETARG_POINTER(0); + int32KEY *b2 = (int32KEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } diff --git a/contrib/btree_gist/btree_int8.c b/contrib/btree_gist/btree_int8.c index c12d078f4cd..0741193c6f0 100644 --- a/contrib/btree_gist/btree_int8.c +++ b/contrib/btree_gist/btree_int8.c @@ -3,9 +3,9 @@ typedef struct int64key { - int64 lower; - int64 upper; -} int64KEY; + int64 lower; + int64 upper; +} int64KEY; /* ** int64 ops @@ -17,60 +17,63 @@ PG_FUNCTION_INFO_V1(gbt_int8_consistent); PG_FUNCTION_INFO_V1(gbt_int8_penalty); PG_FUNCTION_INFO_V1(gbt_int8_same); -Datum gbt_int8_compress(PG_FUNCTION_ARGS); -Datum gbt_int8_union(PG_FUNCTION_ARGS); -Datum gbt_int8_picksplit(PG_FUNCTION_ARGS); -Datum gbt_int8_consistent(PG_FUNCTION_ARGS); -Datum gbt_int8_penalty(PG_FUNCTION_ARGS); -Datum gbt_int8_same(PG_FUNCTION_ARGS); +Datum gbt_int8_compress(PG_FUNCTION_ARGS); +Datum gbt_int8_union(PG_FUNCTION_ARGS); +Datum gbt_int8_picksplit(PG_FUNCTION_ARGS); +Datum gbt_int8_consistent(PG_FUNCTION_ARGS); +Datum gbt_int8_penalty(PG_FUNCTION_ARGS); +Datum gbt_int8_same(PG_FUNCTION_ARGS); -static bool gbt_int8gt (const void *a, const void *b) +static bool +gbt_int8gt(const void *a, const void *b) { - return ( *((int64*)a) > *((int64*)b) ); + return (*((int64 *) a) > *((int64 *) b)); } -static bool gbt_int8ge (const void *a, const void *b) +static bool +gbt_int8ge(const void *a, const void *b) { - return ( *((int64*)a) >= *((int64*)b) ); + return (*((int64 *) a) >= *((int64 *) b)); } -static bool gbt_int8eq (const void *a, const void *b) +static bool +gbt_int8eq(const void *a, const void *b) { - return ( *((int64*)a) == *((int64*)b) ); + return (*((int64 *) a) == *((int64 *) b)); } -static bool gbt_int8le (const void *a, const void *b) +static bool +gbt_int8le(const void *a, const void *b) { - return ( *((int64*)a) <= *((int64*)b) ); + return (*((int64 *) a) <= *((int64 *) b)); } -static bool gbt_int8lt (const void *a, const void *b) +static bool +gbt_int8lt(const void *a, const void *b) { - return ( *((int64*)a) < *((int64*)b) ); + return (*((int64 *) a) < *((int64 *) b)); } static int gbt_int8key_cmp(const void *a, const void *b) { - if ( *(int64*)&(((Nsrt *) a)->t[0]) > *(int64*)&(((Nsrt *) b)->t[0]) ){ - return 1; - } else - if ( *(int64*)&(((Nsrt *) a)->t[0]) < *(int64*)&(((Nsrt *) b)->t[0]) ){ - return -1; - } - return 0; + if (*(int64 *) &(((Nsrt *) a)->t[0]) > *(int64 *) &(((Nsrt *) b)->t[0])) + return 1; + else if (*(int64 *) &(((Nsrt *) a)->t[0]) < *(int64 *) &(((Nsrt *) b)->t[0])) + return -1; + return 0; } -static const gbtree_ninfo tinfo = +static const gbtree_ninfo tinfo = { - gbt_t_int8, - sizeof(int64), - gbt_int8gt, - gbt_int8ge, - gbt_int8eq, - gbt_int8le, - gbt_int8lt, - gbt_int8key_cmp + gbt_t_int8, + sizeof(int64), + gbt_int8gt, + gbt_int8ge, + gbt_int8eq, + gbt_int8le, + gbt_int8lt, + gbt_int8key_cmp }; @@ -82,79 +85,82 @@ static const gbtree_ninfo tinfo = Datum gbt_int8_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } Datum gbt_int8_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - int64 query = PG_GETARG_INT64(1); - int64KEY *kkk = (int64KEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + int64 query = PG_GETARG_INT64(1); + int64KEY *kkk = (int64KEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_int8_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(int64KEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(int64KEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(int64KEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(int64KEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_int8_penalty(PG_FUNCTION_ARGS) { - int64KEY *origentry = (int64KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - int64KEY *newentry = (int64KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); - int64 res ; + int64KEY *origentry = (int64KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + int64KEY *newentry = (int64KEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + int64 res; - *result = 0.0; + *result = 0.0; - penalty_range_enlarge ( origentry->lower, origentry->upper, newentry->lower, newentry->upper ); + penalty_range_enlarge(origentry->lower, origentry->upper, newentry->lower, newentry->upper); - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + origentry->upper - origentry->lower ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (res / ((double) (res + origentry->upper - origentry->lower))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } Datum gbt_int8_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_int8_same(PG_FUNCTION_ARGS) { - int64KEY *b1 = (int64KEY *) PG_GETARG_POINTER(0); - int64KEY *b2 = (int64KEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + int64KEY *b1 = (int64KEY *) PG_GETARG_POINTER(0); + int64KEY *b2 = (int64KEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } - diff --git a/contrib/btree_gist/btree_interval.c b/contrib/btree_gist/btree_interval.c index 9dd37010846..97374a40544 100644 --- a/contrib/btree_gist/btree_interval.c +++ b/contrib/btree_gist/btree_interval.c @@ -3,8 +3,9 @@ typedef struct { - Interval lower, upper; -} intvKEY; + Interval lower, + upper; +} intvKEY; /* @@ -18,81 +19,88 @@ PG_FUNCTION_INFO_V1(gbt_intv_consistent); PG_FUNCTION_INFO_V1(gbt_intv_penalty); PG_FUNCTION_INFO_V1(gbt_intv_same); -Datum gbt_intv_compress(PG_FUNCTION_ARGS); -Datum gbt_intv_decompress(PG_FUNCTION_ARGS); -Datum gbt_intv_union(PG_FUNCTION_ARGS); -Datum gbt_intv_picksplit(PG_FUNCTION_ARGS); -Datum gbt_intv_consistent(PG_FUNCTION_ARGS); -Datum gbt_intv_penalty(PG_FUNCTION_ARGS); -Datum gbt_intv_same(PG_FUNCTION_ARGS); +Datum gbt_intv_compress(PG_FUNCTION_ARGS); +Datum gbt_intv_decompress(PG_FUNCTION_ARGS); +Datum gbt_intv_union(PG_FUNCTION_ARGS); +Datum gbt_intv_picksplit(PG_FUNCTION_ARGS); +Datum gbt_intv_consistent(PG_FUNCTION_ARGS); +Datum gbt_intv_penalty(PG_FUNCTION_ARGS); +Datum gbt_intv_same(PG_FUNCTION_ARGS); -static bool gbt_intvgt (const void *a, const void *b) +static bool +gbt_intvgt(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2 ( interval_gt , IntervalPGetDatum ( a ) , IntervalPGetDatum ( b ) )); + return DatumGetBool(DirectFunctionCall2(interval_gt, IntervalPGetDatum(a), IntervalPGetDatum(b))); } -static bool gbt_intvge (const void *a, const void *b) +static bool +gbt_intvge(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2 ( interval_ge , IntervalPGetDatum ( a ) , IntervalPGetDatum ( b ) )); + return DatumGetBool(DirectFunctionCall2(interval_ge, IntervalPGetDatum(a), IntervalPGetDatum(b))); } -static bool gbt_intveq (const void *a, const void *b) +static bool +gbt_intveq(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2 ( interval_eq , IntervalPGetDatum ( a ) , IntervalPGetDatum ( b ) )); + return DatumGetBool(DirectFunctionCall2(interval_eq, IntervalPGetDatum(a), IntervalPGetDatum(b))); } -static bool gbt_intvle (const void *a, const void *b) +static bool +gbt_intvle(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2 ( interval_le , IntervalPGetDatum ( a ) , IntervalPGetDatum ( b ) )); + return DatumGetBool(DirectFunctionCall2(interval_le, IntervalPGetDatum(a), IntervalPGetDatum(b))); } -static bool gbt_intvlt (const void *a, const void *b) +static bool +gbt_intvlt(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2 ( interval_lt , IntervalPGetDatum ( a ) , IntervalPGetDatum ( b ) )); + return DatumGetBool(DirectFunctionCall2(interval_lt, IntervalPGetDatum(a), IntervalPGetDatum(b))); } static int gbt_intvkey_cmp(const void *a, const void *b) { - return DatumGetInt32 ( - DirectFunctionCall2 ( interval_cmp , - IntervalPGetDatum(((Nsrt *) a)->t) , - IntervalPGetDatum(((Nsrt *) b)->t) - ) - ); + return DatumGetInt32( + DirectFunctionCall2(interval_cmp, + IntervalPGetDatum(((Nsrt *) a)->t), + IntervalPGetDatum(((Nsrt *) b)->t) + ) + ); } -static double intr2num ( const Interval * i ) +static double +intr2num(const Interval *i) { - double ret = 0.0; - struct pg_tm tm; - fsec_t fsec; - interval2tm( *i, &tm, &fsec); - ret += ( tm.tm_year * 360.0 * 86400.0 ) ; - ret += ( tm.tm_mon * 12.0 * 86400.0 ) ; - ret += ( tm.tm_mday * 86400.0 ) ; - ret += ( tm.tm_hour * 3600.0 ) ; - ret += ( tm.tm_min * 60.0 ) ; - ret += ( tm.tm_sec ) ; - ret += ( fsec / 1000000.0 ); + double ret = 0.0; + struct pg_tm tm; + fsec_t fsec; - return ( ret ); + interval2tm(*i, &tm, &fsec); + ret += (tm.tm_year * 360.0 * 86400.0); + ret += (tm.tm_mon * 12.0 * 86400.0); + ret += (tm.tm_mday * 86400.0); + ret += (tm.tm_hour * 3600.0); + ret += (tm.tm_min * 60.0); + ret += (tm.tm_sec); + ret += (fsec / 1000000.0); + + return (ret); } #define INTERVALSIZE 12 -static const gbtree_ninfo tinfo = +static const gbtree_ninfo tinfo = { - gbt_t_intv, - sizeof(Interval), - gbt_intvgt, - gbt_intvge, - gbt_intveq, - gbt_intvle, - gbt_intvlt, - gbt_intvkey_cmp + gbt_t_intv, + sizeof(Interval), + gbt_intvgt, + gbt_intvge, + gbt_intveq, + gbt_intvle, + gbt_intvlt, + gbt_intvkey_cmp }; @@ -104,126 +112,137 @@ static const gbtree_ninfo tinfo = Datum gbt_intv_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = entry; - if ( entry->leafkey || INTERVALSIZE != sizeof(Interval) ) { - char *r = ( char * ) palloc(2 * INTERVALSIZE); - - retval = palloc(sizeof(GISTENTRY)); - - if ( entry->leafkey ) { - Interval *key = DatumGetIntervalP(entry->key); - memcpy( (void*) r , (void*)key, INTERVALSIZE); - memcpy( (void*)(r + INTERVALSIZE), (void*)key, INTERVALSIZE); - } else { - intvKEY *key = ( intvKEY * ) DatumGetPointer(entry->key); - memcpy(r, &key->lower, INTERVALSIZE); - memcpy(r + INTERVALSIZE, &key->upper, INTERVALSIZE); - } - gistentryinit(*retval, PointerGetDatum(r), - entry->rel, entry->page, - entry->offset, 2 * INTERVALSIZE, FALSE); - } - - PG_RETURN_POINTER(retval); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = entry; + + if (entry->leafkey || INTERVALSIZE != sizeof(Interval)) + { + char *r = (char *) palloc(2 * INTERVALSIZE); + + retval = palloc(sizeof(GISTENTRY)); + + if (entry->leafkey) + { + Interval *key = DatumGetIntervalP(entry->key); + + memcpy((void *) r, (void *) key, INTERVALSIZE); + memcpy((void *) (r + INTERVALSIZE), (void *) key, INTERVALSIZE); + } + else + { + intvKEY *key = (intvKEY *) DatumGetPointer(entry->key); + + memcpy(r, &key->lower, INTERVALSIZE); + memcpy(r + INTERVALSIZE, &key->upper, INTERVALSIZE); + } + gistentryinit(*retval, PointerGetDatum(r), + entry->rel, entry->page, + entry->offset, 2 * INTERVALSIZE, FALSE); + } + + PG_RETURN_POINTER(retval); } Datum gbt_intv_decompress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = entry; + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = entry; - if ( INTERVALSIZE != sizeof(Interval) ) { - intvKEY *r = palloc(sizeof(intvKEY)); - char *key = DatumGetPointer(entry->key); + if (INTERVALSIZE != sizeof(Interval)) + { + intvKEY *r = palloc(sizeof(intvKEY)); + char *key = DatumGetPointer(entry->key); - retval = palloc(sizeof(GISTENTRY)); - memcpy( &r->lower, key, INTERVALSIZE); - memcpy( &r->upper, key+ INTERVALSIZE, INTERVALSIZE); + retval = palloc(sizeof(GISTENTRY)); + memcpy(&r->lower, key, INTERVALSIZE); + memcpy(&r->upper, key + INTERVALSIZE, INTERVALSIZE); - gistentryinit(*retval, PointerGetDatum(r), - entry->rel, entry->page, - entry->offset, sizeof(intvKEY), FALSE); - } - PG_RETURN_POINTER(retval); + gistentryinit(*retval, PointerGetDatum(r), + entry->rel, entry->page, + entry->offset, sizeof(intvKEY), FALSE); + } + PG_RETURN_POINTER(retval); } Datum gbt_intv_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - Interval *query = PG_GETARG_INTERVAL_P(1); - intvKEY *kkk = (intvKEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)query ,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + Interval *query = PG_GETARG_INTERVAL_P(1); + intvKEY *kkk = (intvKEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_intv_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(intvKEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(intvKEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(intvKEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(intvKEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_intv_penalty(PG_FUNCTION_ARGS) { - intvKEY *origentry = (intvKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - intvKEY *newentry = (intvKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); - double iorg[2], inew[2], res; + intvKEY *origentry = (intvKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + intvKEY *newentry = (intvKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + double iorg[2], + inew[2], + res; - iorg[0] = intr2num ( &origentry->lower ); - iorg[1] = intr2num ( &origentry->upper ); - inew[0] = intr2num ( &newentry->lower ); - inew[1] = intr2num ( &newentry->upper ); + iorg[0] = intr2num(&origentry->lower); + iorg[1] = intr2num(&origentry->upper); + inew[0] = intr2num(&newentry->lower); + inew[1] = intr2num(&newentry->upper); - penalty_range_enlarge ( iorg[0], iorg[1], inew[0], inew[1] ); + penalty_range_enlarge(iorg[0], iorg[1], inew[0], inew[1]); - *result = 0.0; + *result = 0.0; - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( res / ( res + iorg[1] - iorg[0] ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (res / (res + iorg[1] - iorg[0])); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } Datum gbt_intv_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_intv_same(PG_FUNCTION_ARGS) { - intvKEY *b1 = (intvKEY *) PG_GETARG_POINTER(0); - intvKEY *b2 = (intvKEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + intvKEY *b1 = (intvKEY *) PG_GETARG_POINTER(0); + intvKEY *b2 = (intvKEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } - diff --git a/contrib/btree_gist/btree_macaddr.c b/contrib/btree_gist/btree_macaddr.c index b6e9c672f1d..3f08a586556 100644 --- a/contrib/btree_gist/btree_macaddr.c +++ b/contrib/btree_gist/btree_macaddr.c @@ -5,9 +5,9 @@ typedef struct { - macaddr lower; - macaddr upper; -} macKEY; + macaddr lower; + macaddr upper; +} macKEY; /* ** OID ops @@ -19,62 +19,67 @@ PG_FUNCTION_INFO_V1(gbt_macad_consistent); PG_FUNCTION_INFO_V1(gbt_macad_penalty); PG_FUNCTION_INFO_V1(gbt_macad_same); -Datum gbt_macad_compress(PG_FUNCTION_ARGS); -Datum gbt_macad_union(PG_FUNCTION_ARGS); -Datum gbt_macad_picksplit(PG_FUNCTION_ARGS); -Datum gbt_macad_consistent(PG_FUNCTION_ARGS); -Datum gbt_macad_penalty(PG_FUNCTION_ARGS); -Datum gbt_macad_same(PG_FUNCTION_ARGS); +Datum gbt_macad_compress(PG_FUNCTION_ARGS); +Datum gbt_macad_union(PG_FUNCTION_ARGS); +Datum gbt_macad_picksplit(PG_FUNCTION_ARGS); +Datum gbt_macad_consistent(PG_FUNCTION_ARGS); +Datum gbt_macad_penalty(PG_FUNCTION_ARGS); +Datum gbt_macad_same(PG_FUNCTION_ARGS); -static bool gbt_macadgt (const void *a, const void *b) +static bool +gbt_macadgt(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2(macaddr_gt,PointerGetDatum(a),PointerGetDatum(b))); + return DatumGetBool(DirectFunctionCall2(macaddr_gt, PointerGetDatum(a), PointerGetDatum(b))); } -static bool gbt_macadge (const void *a, const void *b) +static bool +gbt_macadge(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2(macaddr_ge,PointerGetDatum(a),PointerGetDatum(b))); + return DatumGetBool(DirectFunctionCall2(macaddr_ge, PointerGetDatum(a), PointerGetDatum(b))); } -static bool gbt_macadeq (const void *a, const void *b) +static bool +gbt_macadeq(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2(macaddr_eq,PointerGetDatum(a),PointerGetDatum(b))); + return DatumGetBool(DirectFunctionCall2(macaddr_eq, PointerGetDatum(a), PointerGetDatum(b))); } -static bool gbt_macadle (const void *a, const void *b) +static bool +gbt_macadle(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2(macaddr_le,PointerGetDatum(a),PointerGetDatum(b))); + return DatumGetBool(DirectFunctionCall2(macaddr_le, PointerGetDatum(a), PointerGetDatum(b))); } -static bool gbt_macadlt (const void *a, const void *b) +static bool +gbt_macadlt(const void *a, const void *b) { - return DatumGetBool(DirectFunctionCall2(macaddr_lt,PointerGetDatum(a),PointerGetDatum(b))); + return DatumGetBool(DirectFunctionCall2(macaddr_lt, PointerGetDatum(a), PointerGetDatum(b))); } static int gbt_macadkey_cmp(const void *a, const void *b) { - return DatumGetInt32( - DirectFunctionCall2( - macaddr_cmp , - PointerGetDatum (&((Nsrt *) a)->t[0]), - PointerGetDatum (&((Nsrt *) b)->t[0]) - ) - ); + return DatumGetInt32( + DirectFunctionCall2( + macaddr_cmp, + PointerGetDatum(&((Nsrt *) a)->t[0]), + PointerGetDatum(&((Nsrt *) b)->t[0]) + ) + ); } -static const gbtree_ninfo tinfo = -{ - gbt_t_macad, - sizeof(macaddr), - gbt_macadgt, - gbt_macadge, - gbt_macadeq, - gbt_macadle, - gbt_macadlt, - gbt_macadkey_cmp +static const gbtree_ninfo tinfo = +{ + gbt_t_macad, + sizeof(macaddr), + gbt_macadgt, + gbt_macadge, + gbt_macadeq, + gbt_macadle, + gbt_macadlt, + gbt_macadkey_cmp }; @@ -84,14 +89,16 @@ static const gbtree_ninfo tinfo = -static uint64 mac_2_uint64 ( macaddr * m ){ - unsigned char * mi = ( unsigned char * ) m; - uint64 res = 0; - int i; - for (i=0; i<6; i++ ){ - res += ( ( (uint64) mi[i] ) << ( (uint64) ( (5-i)*8 ) ) ); - } - return res; +static uint64 +mac_2_uint64(macaddr *m) +{ + unsigned char *mi = (unsigned char *) m; + uint64 res = 0; + int i; + + for (i = 0; i < 6; i++) + res += (((uint64) mi[i]) << ((uint64) ((5 - i) * 8))); + return res; } @@ -99,9 +106,10 @@ static uint64 mac_2_uint64 ( macaddr * m ){ Datum gbt_macad_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } @@ -109,76 +117,79 @@ Datum gbt_macad_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - macaddr *query = (macaddr *) PG_GETARG_POINTER(1); - macKEY *kkk = (macKEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + macaddr *query = (macaddr *) PG_GETARG_POINTER(1); + macKEY *kkk = (macKEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_macad_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(macKEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(macKEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(macKEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(macKEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_macad_penalty(PG_FUNCTION_ARGS) { - macKEY *origentry = (macKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - macKEY *newentry = (macKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); - uint64 iorg[2], inew[2]; - uint64 res; + macKEY *origentry = (macKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + macKEY *newentry = (macKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + uint64 iorg[2], + inew[2]; + uint64 res; - iorg[0] = mac_2_uint64 ( &origentry->lower ); - iorg[1] = mac_2_uint64 ( &origentry->upper ); - inew[0] = mac_2_uint64 ( &newentry->lower ); - inew[1] = mac_2_uint64 ( &newentry->upper ); + iorg[0] = mac_2_uint64(&origentry->lower); + iorg[1] = mac_2_uint64(&origentry->upper); + inew[0] = mac_2_uint64(&newentry->lower); + inew[1] = mac_2_uint64(&newentry->upper); - penalty_range_enlarge ( iorg[0], iorg[1], inew[0], inew[1] ); + penalty_range_enlarge(iorg[0], iorg[1], inew[0], inew[1]); - *result = 0.0; + *result = 0.0; - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( ( (double)res ) / ( (double)res + (double)iorg[1] - (double)iorg[0] ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (((double) res) / ((double) res + (double) iorg[1] - (double) iorg[0])); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } Datum gbt_macad_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_macad_same(PG_FUNCTION_ARGS) { - macKEY *b1 = (macKEY *) PG_GETARG_POINTER(0); - macKEY *b2 = (macKEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + macKEY *b1 = (macKEY *) PG_GETARG_POINTER(0); + macKEY *b2 = (macKEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } - diff --git a/contrib/btree_gist/btree_numeric.c b/contrib/btree_gist/btree_numeric.c index f46cfa4fef8..8b54b931d5b 100644 --- a/contrib/btree_gist/btree_numeric.c +++ b/contrib/btree_gist/btree_numeric.c @@ -13,61 +13,67 @@ PG_FUNCTION_INFO_V1(gbt_numeric_consistent); PG_FUNCTION_INFO_V1(gbt_numeric_penalty); PG_FUNCTION_INFO_V1(gbt_numeric_same); -Datum gbt_numeric_compress(PG_FUNCTION_ARGS); -Datum gbt_numeric_union(PG_FUNCTION_ARGS); -Datum gbt_numeric_picksplit(PG_FUNCTION_ARGS); -Datum gbt_numeric_consistent(PG_FUNCTION_ARGS); -Datum gbt_numeric_penalty(PG_FUNCTION_ARGS); -Datum gbt_numeric_same(PG_FUNCTION_ARGS); +Datum gbt_numeric_compress(PG_FUNCTION_ARGS); +Datum gbt_numeric_union(PG_FUNCTION_ARGS); +Datum gbt_numeric_picksplit(PG_FUNCTION_ARGS); +Datum gbt_numeric_consistent(PG_FUNCTION_ARGS); +Datum gbt_numeric_penalty(PG_FUNCTION_ARGS); +Datum gbt_numeric_same(PG_FUNCTION_ARGS); /* define for comparison */ -static bool gbt_numeric_gt (const void *a, const void *b) +static bool +gbt_numeric_gt(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( numeric_gt ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(numeric_gt, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_numeric_ge (const void *a, const void *b) +static bool +gbt_numeric_ge(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( numeric_ge ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(numeric_ge, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_numeric_eq (const void *a, const void *b) +static bool +gbt_numeric_eq(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( numeric_eq ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(numeric_eq, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_numeric_le (const void *a, const void *b) +static bool +gbt_numeric_le(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( numeric_le ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(numeric_le, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_numeric_lt (const void *a, const void *b) +static bool +gbt_numeric_lt(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( numeric_lt ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(numeric_lt, PointerGetDatum(a), PointerGetDatum(b)))); } - -static int32 gbt_numeric_cmp ( const bytea * a , const bytea * b ) + +static int32 +gbt_numeric_cmp(const bytea *a, const bytea *b) { - return - ( DatumGetInt32(DirectFunctionCall2(numeric_cmp,PointerGetDatum(a),PointerGetDatum(b) ) ) ); + return + (DatumGetInt32(DirectFunctionCall2(numeric_cmp, PointerGetDatum(a), PointerGetDatum(b)))); } static const gbtree_vinfo tinfo = { - gbt_t_numeric, - FALSE, - FALSE, - gbt_numeric_gt, - gbt_numeric_ge, - gbt_numeric_eq, - gbt_numeric_le, - gbt_numeric_lt, - gbt_numeric_cmp, - NULL + gbt_t_numeric, + FALSE, + FALSE, + gbt_numeric_gt, + gbt_numeric_ge, + gbt_numeric_eq, + gbt_numeric_le, + gbt_numeric_lt, + gbt_numeric_cmp, + NULL }; @@ -77,10 +83,11 @@ static const gbtree_vinfo tinfo = Datum -gbt_numeric_compress (PG_FUNCTION_ARGS) +gbt_numeric_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - PG_RETURN_POINTER ( gbt_var_compress( entry, &tinfo ) ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_var_compress(entry, &tinfo)); } @@ -89,24 +96,22 @@ Datum gbt_numeric_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer ( entry->key ) ; - GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer ( PG_DETOAST_DATUM( entry->key ) ); - void *qtst = ( void * ) DatumGetPointer ( PG_GETARG_DATUM(1) ); - void *query = ( void * ) DatumGetNumeric ( PG_GETARG_DATUM(1) ); - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - bool retval = FALSE; - GBT_VARKEY_R r = gbt_var_key_readable ( key ); - - retval = gbt_var_consistent( &r, query, &strategy, GIST_LEAF(entry), &tinfo ); - - if ( ktst != key ){ - pfree ( key ); - } - if ( qtst != query ){ - pfree ( query ); - } - PG_RETURN_BOOL(retval); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer(entry->key); + GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + void *qtst = (void *) DatumGetPointer(PG_GETARG_DATUM(1)); + void *query = (void *) DatumGetNumeric(PG_GETARG_DATUM(1)); + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + bool retval = FALSE; + GBT_VARKEY_R r = gbt_var_key_readable(key); + + retval = gbt_var_consistent(&r, query, &strategy, GIST_LEAF(entry), &tinfo); + + if (ktst != key) + pfree(key); + if (qtst != query) + pfree(query); + PG_RETURN_BOOL(retval); } @@ -114,104 +119,110 @@ gbt_numeric_consistent(PG_FUNCTION_ARGS) Datum gbt_numeric_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - int32 * size = (int *) PG_GETARG_POINTER(1); - PG_RETURN_POINTER( gbt_var_union ( entryvec , size , &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + int32 *size = (int *) PG_GETARG_POINTER(1); + + PG_RETURN_POINTER(gbt_var_union(entryvec, size, &tinfo)); } - + Datum gbt_numeric_same(PG_FUNCTION_ARGS) { - Datum d1 = PG_GETARG_DATUM(0); - Datum d2 = PG_GETARG_DATUM(1); - bool *result = (bool *) PG_GETARG_POINTER(2); - PG_RETURN_POINTER( gbt_var_same ( result, d1 , d2 , &tinfo )); + Datum d1 = PG_GETARG_DATUM(0); + Datum d2 = PG_GETARG_DATUM(1); + bool *result = (bool *) PG_GETARG_POINTER(2); + + PG_RETURN_POINTER(gbt_var_same(result, d1, d2, &tinfo)); } Datum -gbt_numeric_penalty (PG_FUNCTION_ARGS) +gbt_numeric_penalty(PG_FUNCTION_ARGS) { - GISTENTRY * o = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY * n = (GISTENTRY *) PG_GETARG_POINTER(1); - float *result = (float *) PG_GETARG_POINTER(2); + GISTENTRY *o = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *n = (GISTENTRY *) PG_GETARG_POINTER(1); + float *result = (float *) PG_GETARG_POINTER(2); - Numeric us, os, ds ; + Numeric us, + os, + ds; - GBT_VARKEY *org = (GBT_VARKEY *) DatumGetPointer(o->key); - GBT_VARKEY *newe = (GBT_VARKEY *) DatumGetPointer(n->key); - Datum uni ; - GBT_VARKEY_R rk , ok, uk ; + GBT_VARKEY *org = (GBT_VARKEY *) DatumGetPointer(o->key); + GBT_VARKEY *newe = (GBT_VARKEY *) DatumGetPointer(n->key); + Datum uni; + GBT_VARKEY_R rk, + ok, + uk; - rk = gbt_var_key_readable ( org ); - uni = PointerGetDatum( gbt_var_key_copy( &rk, TRUE ) ); - gbt_var_bin_union ( &uni , newe, &tinfo ); - ok = gbt_var_key_readable ( org ); - uk = gbt_var_key_readable ( (GBT_VARKEY *) DatumGetPointer(uni) ); + rk = gbt_var_key_readable(org); + uni = PointerGetDatum(gbt_var_key_copy(&rk, TRUE)); + gbt_var_bin_union(&uni, newe, &tinfo); + ok = gbt_var_key_readable(org); + uk = gbt_var_key_readable((GBT_VARKEY *) DatumGetPointer(uni)); - us = DatumGetNumeric(DirectFunctionCall2( - numeric_sub, - PointerGetDatum(uk.upper), - PointerGetDatum(uk.lower) - )); + us = DatumGetNumeric(DirectFunctionCall2( + numeric_sub, + PointerGetDatum(uk.upper), + PointerGetDatum(uk.lower) + )); - pfree ( DatumGetPointer(uni) ); + pfree(DatumGetPointer(uni)); - os = DatumGetNumeric(DirectFunctionCall2( - numeric_sub, - PointerGetDatum(ok.upper), - PointerGetDatum(ok.lower) - )); + os = DatumGetNumeric(DirectFunctionCall2( + numeric_sub, + PointerGetDatum(ok.upper), + PointerGetDatum(ok.lower) + )); - ds = DatumGetNumeric(DirectFunctionCall2( - numeric_sub, - NumericGetDatum(us), - NumericGetDatum(os) - )); + ds = DatumGetNumeric(DirectFunctionCall2( + numeric_sub, + NumericGetDatum(us), + NumericGetDatum(os) + )); - pfree ( os ); + pfree(os); - if ( NUMERIC_IS_NAN( us ) ) - { + if (NUMERIC_IS_NAN(us)) + { - if ( NUMERIC_IS_NAN( os ) ) - { - *result = 0.0; - } else { - *result = 1.0; - } + if (NUMERIC_IS_NAN(os)) + *result = 0.0; + else + *result = 1.0; - } else { + } + else + { - Numeric nul = DatumGetNumeric(DirectFunctionCall1( int4_numeric , Int32GetDatum (0) ) ); + Numeric nul = DatumGetNumeric(DirectFunctionCall1(int4_numeric, Int32GetDatum(0))); - *result = 0.0; + *result = 0.0; - if ( DirectFunctionCall2( numeric_gt , NumericGetDatum(ds), NumericGetDatum(nul) ) ) - { + if (DirectFunctionCall2(numeric_gt, NumericGetDatum(ds), NumericGetDatum(nul))) + { - *result += FLT_MIN ; - os = DatumGetNumeric(DirectFunctionCall2( - numeric_div, - NumericGetDatum(ds), - NumericGetDatum(us) - )); - *result += ( float4 ) DatumGetFloat8( DirectFunctionCall1( numeric_float8_no_overflow , NumericGetDatum(os) ) ); - pfree ( os ); + *result += FLT_MIN; + os = DatumGetNumeric(DirectFunctionCall2( + numeric_div, + NumericGetDatum(ds), + NumericGetDatum(us) + )); + *result += (float4) DatumGetFloat8(DirectFunctionCall1(numeric_float8_no_overflow, NumericGetDatum(os))); + pfree(os); - } + } - pfree ( nul ); - } + pfree(nul); + } - if ( *result > 0 ) - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); + if (*result > 0) + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); - pfree ( us ); - pfree ( ds ); + pfree(us); + pfree(ds); - PG_RETURN_POINTER( result ); + PG_RETURN_POINTER(result); } @@ -219,9 +230,9 @@ gbt_numeric_penalty (PG_FUNCTION_ARGS) Datum gbt_numeric_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); - gbt_var_picksplit ( entryvec, v, &tinfo ); - PG_RETURN_POINTER(v); -} + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); + gbt_var_picksplit(entryvec, v, &tinfo); + PG_RETURN_POINTER(v); +} diff --git a/contrib/btree_gist/btree_oid.c b/contrib/btree_gist/btree_oid.c index d7d98e4a7b3..db4182fc047 100644 --- a/contrib/btree_gist/btree_oid.c +++ b/contrib/btree_gist/btree_oid.c @@ -3,9 +3,9 @@ typedef struct { - Oid lower; - Oid upper; -} oidKEY; + Oid lower; + Oid upper; +} oidKEY; /* ** OID ops @@ -17,60 +17,63 @@ PG_FUNCTION_INFO_V1(gbt_oid_consistent); PG_FUNCTION_INFO_V1(gbt_oid_penalty); PG_FUNCTION_INFO_V1(gbt_oid_same); -Datum gbt_oid_compress(PG_FUNCTION_ARGS); -Datum gbt_oid_union(PG_FUNCTION_ARGS); -Datum gbt_oid_picksplit(PG_FUNCTION_ARGS); -Datum gbt_oid_consistent(PG_FUNCTION_ARGS); -Datum gbt_oid_penalty(PG_FUNCTION_ARGS); -Datum gbt_oid_same(PG_FUNCTION_ARGS); +Datum gbt_oid_compress(PG_FUNCTION_ARGS); +Datum gbt_oid_union(PG_FUNCTION_ARGS); +Datum gbt_oid_picksplit(PG_FUNCTION_ARGS); +Datum gbt_oid_consistent(PG_FUNCTION_ARGS); +Datum gbt_oid_penalty(PG_FUNCTION_ARGS); +Datum gbt_oid_same(PG_FUNCTION_ARGS); -static bool gbt_oidgt (const void *a, const void *b) +static bool +gbt_oidgt(const void *a, const void *b) { - return ( *((Oid*)a) > *((Oid*)b) ); + return (*((Oid *) a) > *((Oid *) b)); } -static bool gbt_oidge (const void *a, const void *b) +static bool +gbt_oidge(const void *a, const void *b) { - return ( *((Oid*)a) >= *((Oid*)b) ); + return (*((Oid *) a) >= *((Oid *) b)); } -static bool gbt_oideq (const void *a, const void *b) +static bool +gbt_oideq(const void *a, const void *b) { - return ( *((Oid*)a) == *((Oid*)b) ); + return (*((Oid *) a) == *((Oid *) b)); } -static bool gbt_oidle (const void *a, const void *b) +static bool +gbt_oidle(const void *a, const void *b) { - return ( *((Oid*)a) <= *((Oid*)b) ); + return (*((Oid *) a) <= *((Oid *) b)); } -static bool gbt_oidlt (const void *a, const void *b) +static bool +gbt_oidlt(const void *a, const void *b) { - return ( *((Oid*)a) < *((Oid*)b) ); + return (*((Oid *) a) < *((Oid *) b)); } static int gbt_oidkey_cmp(const void *a, const void *b) { - if ( *(Oid*)&(((Nsrt *) a)->t[0]) > *(Oid*)&(((Nsrt *) b)->t[0]) ){ - return 1; - } else - if ( *(Oid*)&(((Nsrt *) a)->t[0]) < *(Oid*)&(((Nsrt *) b)->t[0]) ){ - return -1; - } - return 0; + if (*(Oid *) &(((Nsrt *) a)->t[0]) > *(Oid *) &(((Nsrt *) b)->t[0])) + return 1; + else if (*(Oid *) &(((Nsrt *) a)->t[0]) < *(Oid *) &(((Nsrt *) b)->t[0])) + return -1; + return 0; } -static const gbtree_ninfo tinfo = -{ - gbt_t_oid, - sizeof(Oid), - gbt_oidgt, - gbt_oidge, - gbt_oideq, - gbt_oidle, - gbt_oidlt, - gbt_oidkey_cmp +static const gbtree_ninfo tinfo = +{ + gbt_t_oid, + sizeof(Oid), + gbt_oidgt, + gbt_oidge, + gbt_oideq, + gbt_oidle, + gbt_oidlt, + gbt_oidkey_cmp }; @@ -82,9 +85,10 @@ static const gbtree_ninfo tinfo = Datum gbt_oid_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } @@ -92,69 +96,72 @@ Datum gbt_oid_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - Oid query = PG_GETARG_OID(1); - oidKEY *kkk = (oidKEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + Oid query = PG_GETARG_OID(1); + oidKEY *kkk = (oidKEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_oid_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(oidKEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(oidKEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(oidKEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(oidKEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_oid_penalty(PG_FUNCTION_ARGS) { - oidKEY *origentry = (oidKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - oidKEY *newentry = (oidKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); + oidKEY *origentry = (oidKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + oidKEY *newentry = (oidKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + + Oid res = 0; - Oid res = 0 ; + *result = 0.0; - *result = 0.0; - - penalty_range_enlarge ( origentry->lower, origentry->upper, newentry->lower , newentry->upper ); + penalty_range_enlarge(origentry->lower, origentry->upper, newentry->lower, newentry->upper); - if ( res > 0 ){ - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + origentry->upper - origentry->lower ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - } + if (res > 0) + { + *result += FLT_MIN; + *result += (float) (res / ((double) (res + origentry->upper - origentry->lower))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } Datum gbt_oid_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_oid_same(PG_FUNCTION_ARGS) { - oidKEY *b1 = (oidKEY *) PG_GETARG_POINTER(0); - oidKEY *b2 = (oidKEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + oidKEY *b1 = (oidKEY *) PG_GETARG_POINTER(0); + oidKEY *b2 = (oidKEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } diff --git a/contrib/btree_gist/btree_text.c b/contrib/btree_gist/btree_text.c index fac9f554c0f..d2603e1d911 100644 --- a/contrib/btree_gist/btree_text.c +++ b/contrib/btree_gist/btree_text.c @@ -14,95 +14,102 @@ PG_FUNCTION_INFO_V1(gbt_bpchar_consistent); PG_FUNCTION_INFO_V1(gbt_text_penalty); PG_FUNCTION_INFO_V1(gbt_text_same); -Datum gbt_text_compress(PG_FUNCTION_ARGS); -Datum gbt_bpchar_compress(PG_FUNCTION_ARGS); -Datum gbt_text_union(PG_FUNCTION_ARGS); -Datum gbt_text_picksplit(PG_FUNCTION_ARGS); -Datum gbt_text_consistent(PG_FUNCTION_ARGS); -Datum gbt_bpchar_consistent(PG_FUNCTION_ARGS); -Datum gbt_text_penalty(PG_FUNCTION_ARGS); -Datum gbt_text_same(PG_FUNCTION_ARGS); +Datum gbt_text_compress(PG_FUNCTION_ARGS); +Datum gbt_bpchar_compress(PG_FUNCTION_ARGS); +Datum gbt_text_union(PG_FUNCTION_ARGS); +Datum gbt_text_picksplit(PG_FUNCTION_ARGS); +Datum gbt_text_consistent(PG_FUNCTION_ARGS); +Datum gbt_bpchar_consistent(PG_FUNCTION_ARGS); +Datum gbt_text_penalty(PG_FUNCTION_ARGS); +Datum gbt_text_same(PG_FUNCTION_ARGS); /* define for comparison */ -static bool gbt_textgt (const void *a, const void *b) +static bool +gbt_textgt(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( text_gt ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(text_gt, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_textge (const void *a, const void *b) +static bool +gbt_textge(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( text_ge ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(text_ge, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_texteq (const void *a, const void *b) +static bool +gbt_texteq(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( texteq ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(texteq, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_textle (const void *a, const void *b) +static bool +gbt_textle(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( text_le ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(text_le, PointerGetDatum(a), PointerGetDatum(b)))); } -static bool gbt_textlt (const void *a, const void *b) +static bool +gbt_textlt(const void *a, const void *b) { - return ( DatumGetBool(DirectFunctionCall2( text_lt ,PointerGetDatum( a ),PointerGetDatum( b ) ) ) ); + return (DatumGetBool(DirectFunctionCall2(text_lt, PointerGetDatum(a), PointerGetDatum(b)))); } -static int32 gbt_textcmp ( const bytea * a , const bytea * b ) +static int32 +gbt_textcmp(const bytea *a, const bytea *b) { - return strcmp( VARDATA(a), VARDATA(b) ); + return strcmp(VARDATA(a), VARDATA(b)); } - + /* * Converts data of leaf using strxfrm ( locale support ) */ static bytea * -gbt_text_xfrm ( bytea * leaf ) +gbt_text_xfrm(bytea *leaf) { - bytea * out = leaf; - int32 ilen = VARSIZE (leaf) - VARHDRSZ; - int32 olen ; - char * sin; - char * sou; + bytea *out = leaf; + int32 ilen = VARSIZE(leaf) - VARHDRSZ; + int32 olen; + char *sin; + char *sou; sin = palloc(ilen + 1); - memcpy (sin, (void*) VARDATA(leaf) ,ilen ); - sin[ilen] = '\0'; - - olen = strxfrm ( NULL, &sin[0], 0 ) + 1; - sou = palloc ( olen ); - olen = strxfrm ( sou , &sin[0] , olen ); - olen += VARHDRSZ; - out = palloc ( olen + 1 ); - out->vl_len = olen+1; - memcpy( (void*) VARDATA(out), sou, olen-VARHDRSZ ); - ((char*)out)[olen] = '\0'; - - pfree(sou); - pfree(sin); - - return out; + memcpy(sin, (void *) VARDATA(leaf), ilen); + sin[ilen] = '\0'; + + olen = strxfrm(NULL, &sin[0], 0) + 1; + sou = palloc(olen); + olen = strxfrm(sou, &sin[0], olen); + olen += VARHDRSZ; + out = palloc(olen + 1); + out->vl_len = olen + 1; + memcpy((void *) VARDATA(out), sou, olen - VARHDRSZ); + ((char *) out)[olen] = '\0'; + + pfree(sou); + pfree(sin); + + return out; } -static GBT_VARKEY * gbt_text_l2n ( GBT_VARKEY * leaf ) +static GBT_VARKEY * +gbt_text_l2n(GBT_VARKEY * leaf) { - - GBT_VARKEY *out = leaf ; - GBT_VARKEY_R r = gbt_var_key_readable ( leaf ); - bytea * o ; - o = gbt_text_xfrm ( r.lower ); - r.lower = r.upper = o; - out = gbt_var_key_copy ( &r , TRUE ); - pfree(o); + GBT_VARKEY *out = leaf; + GBT_VARKEY_R r = gbt_var_key_readable(leaf); + bytea *o; + + o = gbt_text_xfrm(r.lower); + r.lower = r.upper = o; + out = gbt_var_key_copy(&r, TRUE); + pfree(o); - return out; + return out; } @@ -112,16 +119,16 @@ static GBT_VARKEY * gbt_text_l2n ( GBT_VARKEY * leaf ) static const gbtree_vinfo tinfo = { - gbt_t_text, - TRUE, - TRUE, - gbt_textgt, - gbt_textge, - gbt_texteq, - gbt_textle, - gbt_textlt, - gbt_textcmp, - gbt_text_l2n + gbt_t_text, + TRUE, + TRUE, + gbt_textgt, + gbt_textge, + gbt_texteq, + gbt_textle, + gbt_textlt, + gbt_textcmp, + gbt_text_l2n }; @@ -132,36 +139,38 @@ static const gbtree_vinfo tinfo = Datum -gbt_text_compress (PG_FUNCTION_ARGS) +gbt_text_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - PG_RETURN_POINTER ( gbt_var_compress( entry, &tinfo ) ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + + PG_RETURN_POINTER(gbt_var_compress(entry, &tinfo)); } Datum -gbt_bpchar_compress (PG_FUNCTION_ARGS) +gbt_bpchar_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY * retval ; + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval; - if (entry->leafkey) - { + if (entry->leafkey) + { - Datum d = DirectFunctionCall1 ( rtrim1, entry->key ); - GISTENTRY * trim = palloc(sizeof(GISTENTRY)); + Datum d = DirectFunctionCall1(rtrim1, entry->key); + GISTENTRY *trim = palloc(sizeof(GISTENTRY)); - gistentryinit(*trim, d , - entry->rel, entry->page, - entry->offset, VARSIZE(DatumGetPointer(d)), TRUE); - retval = gbt_var_compress( trim , &tinfo ) ; + gistentryinit(*trim, d, + entry->rel, entry->page, + entry->offset, VARSIZE(DatumGetPointer(d)), TRUE); + retval = gbt_var_compress(trim, &tinfo); - pfree ( trim ); - pfree ( DatumGetPointer(d) ); - } else - retval = entry; + pfree(trim); + pfree(DatumGetPointer(d)); + } + else + retval = entry; - PG_RETURN_POINTER ( retval ); + PG_RETURN_POINTER(retval); } @@ -169,68 +178,66 @@ gbt_bpchar_compress (PG_FUNCTION_ARGS) Datum gbt_text_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer ( entry->key ) ; - GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer ( PG_DETOAST_DATUM( entry->key ) ); - void *qtst = ( void * ) DatumGetPointer( PG_GETARG_DATUM(1) ); - void *query = ( void * ) DatumGetTextP ( PG_GETARG_DATUM(1) ); - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - bool retval = FALSE; - GBT_VARKEY_R r = gbt_var_key_readable ( key ); - - if ( GIST_LEAF(entry) ) - { - retval = gbt_var_consistent( &r, query, &strategy, TRUE, &tinfo ); - } else { - bytea * q = gbt_text_xfrm ( ( bytea * ) query ); - retval = gbt_var_consistent( &r, (void*)q, &strategy, FALSE, &tinfo ); - if ( q != query ) - pfree(q); - } - - if ( ktst != key ){ - pfree ( key ); - } - if ( qtst != query ){ - pfree ( query ); - } - - PG_RETURN_BOOL(retval); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer(entry->key); + GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + void *qtst = (void *) DatumGetPointer(PG_GETARG_DATUM(1)); + void *query = (void *) DatumGetTextP(PG_GETARG_DATUM(1)); + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + bool retval = FALSE; + GBT_VARKEY_R r = gbt_var_key_readable(key); + + if (GIST_LEAF(entry)) + retval = gbt_var_consistent(&r, query, &strategy, TRUE, &tinfo); + else + { + bytea *q = gbt_text_xfrm((bytea *) query); + + retval = gbt_var_consistent(&r, (void *) q, &strategy, FALSE, &tinfo); + if (q != query) + pfree(q); + } + + if (ktst != key) + pfree(key); + if (qtst != query) + pfree(query); + + PG_RETURN_BOOL(retval); } Datum gbt_bpchar_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer ( entry->key ) ; - GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer ( PG_DETOAST_DATUM( entry->key ) ); - void *qtst = ( void * ) DatumGetPointer ( PG_GETARG_DATUM(1) ); - void *query = ( void * ) DatumGetPointer (PG_DETOAST_DATUM( PG_GETARG_DATUM(1) ) ); - void *trim = ( void * ) DatumGetPointer ( DirectFunctionCall1 ( rtrim1, PointerGetDatum ( query ) ) ) ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - bool retval = FALSE; - GBT_VARKEY_R r = gbt_var_key_readable ( key ); - - if ( GIST_LEAF(entry) ) - { - retval = gbt_var_consistent( &r, trim , &strategy, TRUE, &tinfo ); - } else { - bytea * q = gbt_text_xfrm ( ( bytea * ) trim ); - retval = gbt_var_consistent( &r, (void*)q, &strategy, FALSE, &tinfo ); - if ( q != trim ) - pfree(q); - } - - pfree(trim); - - if ( ktst != key ){ - pfree ( key ); - } - if ( qtst != query ){ - pfree ( query ); - } - PG_RETURN_BOOL(retval); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GBT_VARKEY *ktst = (GBT_VARKEY *) DatumGetPointer(entry->key); + GBT_VARKEY *key = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + void *qtst = (void *) DatumGetPointer(PG_GETARG_DATUM(1)); + void *query = (void *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1))); + void *trim = (void *) DatumGetPointer(DirectFunctionCall1(rtrim1, PointerGetDatum(query))); + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + bool retval = FALSE; + GBT_VARKEY_R r = gbt_var_key_readable(key); + + if (GIST_LEAF(entry)) + retval = gbt_var_consistent(&r, trim, &strategy, TRUE, &tinfo); + else + { + bytea *q = gbt_text_xfrm((bytea *) trim); + + retval = gbt_var_consistent(&r, (void *) q, &strategy, FALSE, &tinfo); + if (q != trim) + pfree(q); + } + + pfree(trim); + + if (ktst != key) + pfree(key); + if (qtst != query) + pfree(query); + PG_RETURN_BOOL(retval); } @@ -239,37 +246,40 @@ gbt_bpchar_consistent(PG_FUNCTION_ARGS) Datum gbt_text_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - int32 *size = (int *) PG_GETARG_POINTER(1); - PG_RETURN_POINTER( gbt_var_union ( entryvec , size , &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + int32 *size = (int *) PG_GETARG_POINTER(1); + + PG_RETURN_POINTER(gbt_var_union(entryvec, size, &tinfo)); } - + Datum gbt_text_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); - gbt_var_picksplit ( entryvec, v, &tinfo ); - PG_RETURN_POINTER(v); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); + + gbt_var_picksplit(entryvec, v, &tinfo); + PG_RETURN_POINTER(v); } Datum gbt_text_same(PG_FUNCTION_ARGS) { - Datum d1 = PG_GETARG_DATUM(0); - Datum d2 = PG_GETARG_DATUM(1); - bool *result = (bool *) PG_GETARG_POINTER(2); - PG_RETURN_POINTER( gbt_var_same ( result, d1 , d2 , &tinfo )); + Datum d1 = PG_GETARG_DATUM(0); + Datum d2 = PG_GETARG_DATUM(1); + bool *result = (bool *) PG_GETARG_POINTER(2); + + PG_RETURN_POINTER(gbt_var_same(result, d1, d2, &tinfo)); } Datum gbt_text_penalty(PG_FUNCTION_ARGS) { - float *result = (float *) PG_GETARG_POINTER(2); - GISTENTRY * o = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY * n = (GISTENTRY *) PG_GETARG_POINTER(1); - PG_RETURN_POINTER( gbt_var_penalty ( result ,o , n, &tinfo ) ); -} + float *result = (float *) PG_GETARG_POINTER(2); + GISTENTRY *o = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *n = (GISTENTRY *) PG_GETARG_POINTER(1); + PG_RETURN_POINTER(gbt_var_penalty(result, o, n, &tinfo)); +} diff --git a/contrib/btree_gist/btree_time.c b/contrib/btree_gist/btree_time.c index 410bec0fe43..cbbe05dc271 100644 --- a/contrib/btree_gist/btree_time.c +++ b/contrib/btree_gist/btree_time.c @@ -4,9 +4,9 @@ typedef struct { - TimeADT lower; - TimeADT upper; -} timeKEY; + TimeADT lower; + TimeADT upper; +} timeKEY; /* ** time ops @@ -20,49 +20,54 @@ PG_FUNCTION_INFO_V1(gbt_timetz_consistent); PG_FUNCTION_INFO_V1(gbt_time_penalty); PG_FUNCTION_INFO_V1(gbt_time_same); -Datum gbt_time_compress(PG_FUNCTION_ARGS); -Datum gbt_timetz_compress(PG_FUNCTION_ARGS); -Datum gbt_time_union(PG_FUNCTION_ARGS); -Datum gbt_time_picksplit(PG_FUNCTION_ARGS); -Datum gbt_time_consistent(PG_FUNCTION_ARGS); -Datum gbt_timetz_consistent(PG_FUNCTION_ARGS); -Datum gbt_time_penalty(PG_FUNCTION_ARGS); -Datum gbt_time_same(PG_FUNCTION_ARGS); +Datum gbt_time_compress(PG_FUNCTION_ARGS); +Datum gbt_timetz_compress(PG_FUNCTION_ARGS); +Datum gbt_time_union(PG_FUNCTION_ARGS); +Datum gbt_time_picksplit(PG_FUNCTION_ARGS); +Datum gbt_time_consistent(PG_FUNCTION_ARGS); +Datum gbt_timetz_consistent(PG_FUNCTION_ARGS); +Datum gbt_time_penalty(PG_FUNCTION_ARGS); +Datum gbt_time_same(PG_FUNCTION_ARGS); -static bool gbt_timegt (const void *a, const void *b) +static bool +gbt_timegt(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(time_gt,TimeADTGetDatum( *((TimeADT*)a) ), TimeADTGetDatum( *((TimeADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(time_gt, TimeADTGetDatum(*((TimeADT *) a)), TimeADTGetDatum(*((TimeADT *) b))) + ); } -static bool gbt_timege (const void *a, const void *b) +static bool +gbt_timege(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(time_ge,TimeADTGetDatum( *((TimeADT*)a) ), TimeADTGetDatum( *((TimeADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(time_ge, TimeADTGetDatum(*((TimeADT *) a)), TimeADTGetDatum(*((TimeADT *) b))) + ); } -static bool gbt_timeeq (const void *a, const void *b) +static bool +gbt_timeeq(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(time_eq,TimeADTGetDatum( *((TimeADT*)a) ), TimeADTGetDatum( *((TimeADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(time_eq, TimeADTGetDatum(*((TimeADT *) a)), TimeADTGetDatum(*((TimeADT *) b))) + ); } -static bool gbt_timele (const void *a, const void *b) +static bool +gbt_timele(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(time_le,TimeADTGetDatum( *((TimeADT*)a) ), TimeADTGetDatum( *((TimeADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(time_le, TimeADTGetDatum(*((TimeADT *) a)), TimeADTGetDatum(*((TimeADT *) b))) + ); } -static bool gbt_timelt (const void *a, const void *b) +static bool +gbt_timelt(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(time_lt,TimeADTGetDatum( *((TimeADT*)a) ), TimeADTGetDatum( *((TimeADT*)b) ) ) - ); + return DatumGetBool( + DirectFunctionCall2(time_lt, TimeADTGetDatum(*((TimeADT *) a)), TimeADTGetDatum(*((TimeADT *) b))) + ); } @@ -70,26 +75,24 @@ static bool gbt_timelt (const void *a, const void *b) static int gbt_timekey_cmp(const void *a, const void *b) { - if ( gbt_timegt( (void*)&(((Nsrt *) a)->t[0]) , (void*)&(((Nsrt *) b)->t[0]) ) ){ - return 1; - } else - if ( gbt_timelt( (void*)&(((Nsrt *) a)->t[0]) , (void*)&(((Nsrt *) b)->t[0]) ) ){ - return -1; - } - return 0; + if (gbt_timegt((void *) &(((Nsrt *) a)->t[0]), (void *) &(((Nsrt *) b)->t[0]))) + return 1; + else if (gbt_timelt((void *) &(((Nsrt *) a)->t[0]), (void *) &(((Nsrt *) b)->t[0]))) + return -1; + return 0; } -static const gbtree_ninfo tinfo = +static const gbtree_ninfo tinfo = { - gbt_t_time, - sizeof(TimeADT), - gbt_timegt, - gbt_timege, - gbt_timeeq, - gbt_timele, - gbt_timelt, - gbt_timekey_cmp + gbt_t_time, + sizeof(TimeADT), + gbt_timegt, + gbt_timege, + gbt_timeeq, + gbt_timele, + gbt_timelt, + gbt_timekey_cmp }; @@ -102,150 +105,154 @@ static const gbtree_ninfo tinfo = Datum gbt_time_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } Datum gbt_timetz_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval; - - if (entry->leafkey) - { - timeKEY *r = (timeKEY *) palloc(sizeof(timeKEY)); - TimeTzADT *tz = DatumGetTimeTzADTP(entry->key); - - retval = palloc(sizeof(GISTENTRY)); - - /* We are using the time + zone only to compress */ - r->lower = r->upper = ( tz->time + tz->zone ) ; - gistentryinit(*retval, PointerGetDatum(r), - entry->rel, entry->page, - entry->offset, sizeof(timeKEY), FALSE); - } - else - retval = entry; - PG_RETURN_POINTER(retval); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval; + + if (entry->leafkey) + { + timeKEY *r = (timeKEY *) palloc(sizeof(timeKEY)); + TimeTzADT *tz = DatumGetTimeTzADTP(entry->key); + + retval = palloc(sizeof(GISTENTRY)); + + /* We are using the time + zone only to compress */ + r->lower = r->upper = (tz->time + tz->zone); + gistentryinit(*retval, PointerGetDatum(r), + entry->rel, entry->page, + entry->offset, sizeof(timeKEY), FALSE); + } + else + retval = entry; + PG_RETURN_POINTER(retval); } Datum gbt_time_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - TimeADT query = PG_GETARG_TIMEADT( 1 ); - timeKEY *kkk = (timeKEY*) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + TimeADT query = PG_GETARG_TIMEADT(1); + timeKEY *kkk = (timeKEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_timetz_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - TimeTzADT *query = PG_GETARG_TIMETZADT_P( 1 ); - TimeADT qqq = query->time + query->zone ; - timeKEY *kkk = (timeKEY*) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&qqq, &strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + TimeTzADT *query = PG_GETARG_TIMETZADT_P(1); + TimeADT qqq = query->time + query->zone; + timeKEY *kkk = (timeKEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &qqq, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_time_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(timeKEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(timeKEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(timeKEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(timeKEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } Datum gbt_time_penalty(PG_FUNCTION_ARGS) { - timeKEY *origentry = (timeKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - timeKEY *newentry = (timeKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); - Interval *intr; + timeKEY *origentry = (timeKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + timeKEY *newentry = (timeKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + Interval *intr; + #ifdef HAVE_INT64_TIMESTAMP - int64 res; + int64 res; + #else - double res; + double res; #endif - intr = DatumGetIntervalP(DirectFunctionCall2( - time_mi_time, - TimeADTGetDatum(newentry->upper), - TimeADTGetDatum(origentry->upper))); - - /* see interval_larger */ - res = Max(intr->time + intr->month * (30 * 86400), 0); - pfree(intr); - - intr = DatumGetIntervalP(DirectFunctionCall2( - time_mi_time, - TimeADTGetDatum(origentry->lower), - TimeADTGetDatum(newentry->lower))); - - /* see interval_larger */ - res += Max(intr->time + intr->month * (30 * 86400), 0); - pfree(intr); - - *result = 0.0; - - if ( res > 0 ){ - intr = DatumGetIntervalP(DirectFunctionCall2( - time_mi_time, - TimeADTGetDatum(origentry->upper), - TimeADTGetDatum(origentry->lower))); - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + intr->time + intr->month * (30 * 86400) ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - pfree ( intr ); - } - - PG_RETURN_POINTER(result); + intr = DatumGetIntervalP(DirectFunctionCall2( + time_mi_time, + TimeADTGetDatum(newentry->upper), + TimeADTGetDatum(origentry->upper))); + + /* see interval_larger */ + res = Max(intr->time + intr->month * (30 * 86400), 0); + pfree(intr); + + intr = DatumGetIntervalP(DirectFunctionCall2( + time_mi_time, + TimeADTGetDatum(origentry->lower), + TimeADTGetDatum(newentry->lower))); + + /* see interval_larger */ + res += Max(intr->time + intr->month * (30 * 86400), 0); + pfree(intr); + + *result = 0.0; + + if (res > 0) + { + intr = DatumGetIntervalP(DirectFunctionCall2( + time_mi_time, + TimeADTGetDatum(origentry->upper), + TimeADTGetDatum(origentry->lower))); + *result += FLT_MIN; + *result += (float) (res / ((double) (res + intr->time + intr->month * (30 * 86400)))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + pfree(intr); + } + + PG_RETURN_POINTER(result); } Datum gbt_time_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_time_same(PG_FUNCTION_ARGS) { - timeKEY *b1 = (timeKEY *) PG_GETARG_POINTER(0); - timeKEY *b2 = (timeKEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + timeKEY *b1 = (timeKEY *) PG_GETARG_POINTER(0); + timeKEY *b2 = (timeKEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } - diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c index 97f209ed74a..f9d801a3fc1 100644 --- a/contrib/btree_gist/btree_ts.c +++ b/contrib/btree_gist/btree_ts.c @@ -3,9 +3,9 @@ typedef struct { - Timestamp lower; - Timestamp upper; -} tsKEY; + Timestamp lower; + Timestamp upper; +} tsKEY; /* ** timestamp ops @@ -19,75 +19,78 @@ PG_FUNCTION_INFO_V1(gbt_tstz_consistent); PG_FUNCTION_INFO_V1(gbt_ts_penalty); PG_FUNCTION_INFO_V1(gbt_ts_same); -Datum gbt_ts_compress(PG_FUNCTION_ARGS); -Datum gbt_tstz_compress(PG_FUNCTION_ARGS); -Datum gbt_ts_union(PG_FUNCTION_ARGS); -Datum gbt_ts_picksplit(PG_FUNCTION_ARGS); -Datum gbt_ts_consistent(PG_FUNCTION_ARGS); -Datum gbt_tstz_consistent(PG_FUNCTION_ARGS); -Datum gbt_ts_penalty(PG_FUNCTION_ARGS); -Datum gbt_ts_same(PG_FUNCTION_ARGS); +Datum gbt_ts_compress(PG_FUNCTION_ARGS); +Datum gbt_tstz_compress(PG_FUNCTION_ARGS); +Datum gbt_ts_union(PG_FUNCTION_ARGS); +Datum gbt_ts_picksplit(PG_FUNCTION_ARGS); +Datum gbt_ts_consistent(PG_FUNCTION_ARGS); +Datum gbt_tstz_consistent(PG_FUNCTION_ARGS); +Datum gbt_ts_penalty(PG_FUNCTION_ARGS); +Datum gbt_ts_same(PG_FUNCTION_ARGS); -static bool gbt_tsgt (const void *a, const void *b) +static bool +gbt_tsgt(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(timestamp_gt,PointerGetDatum( a ), PointerGetDatum( b ) ) - ); + return DatumGetBool( + DirectFunctionCall2(timestamp_gt, PointerGetDatum(a), PointerGetDatum(b)) + ); } -static bool gbt_tsge (const void *a, const void *b) +static bool +gbt_tsge(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(timestamp_ge,PointerGetDatum( a ), PointerGetDatum( b ) ) - ); + return DatumGetBool( + DirectFunctionCall2(timestamp_ge, PointerGetDatum(a), PointerGetDatum(b)) + ); } -static bool gbt_tseq (const void *a, const void *b) +static bool +gbt_tseq(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(timestamp_eq,PointerGetDatum( a ), PointerGetDatum( b ) ) - ); + return DatumGetBool( + DirectFunctionCall2(timestamp_eq, PointerGetDatum(a), PointerGetDatum(b)) + ); } -static bool gbt_tsle (const void *a, const void *b) +static bool +gbt_tsle(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(timestamp_le,PointerGetDatum( a ), PointerGetDatum( b ) ) - ); + return DatumGetBool( + DirectFunctionCall2(timestamp_le, PointerGetDatum(a), PointerGetDatum(b)) + ); } -static bool gbt_tslt (const void *a, const void *b) +static bool +gbt_tslt(const void *a, const void *b) { - return DatumGetBool( - DirectFunctionCall2(timestamp_lt,PointerGetDatum( a ), PointerGetDatum( b ) ) - ); + return DatumGetBool( + DirectFunctionCall2(timestamp_lt, PointerGetDatum(a), PointerGetDatum(b)) + ); } static int gbt_tskey_cmp(const void *a, const void *b) { - if ( gbt_tsgt( (void*)&(((Nsrt *) a)->t[0]) , (void*)&(((Nsrt *) b)->t[0]) ) ){ - return 1; - } else - if ( gbt_tslt( (void*)&(((Nsrt *) a)->t[0]) , (void*)&(((Nsrt *) b)->t[0]) ) ){ - return -1; - } - return 0; + if (gbt_tsgt((void *) &(((Nsrt *) a)->t[0]), (void *) &(((Nsrt *) b)->t[0]))) + return 1; + else if (gbt_tslt((void *) &(((Nsrt *) a)->t[0]), (void *) &(((Nsrt *) b)->t[0]))) + return -1; + return 0; } -static const gbtree_ninfo tinfo = -{ - gbt_t_ts, - sizeof(Timestamp), - gbt_tsgt, - gbt_tsge, - gbt_tseq, - gbt_tsle, - gbt_tslt, - gbt_tskey_cmp +static const gbtree_ninfo tinfo = +{ + gbt_t_ts, + sizeof(Timestamp), + gbt_tsgt, + gbt_tsge, + gbt_tseq, + gbt_tsle, + gbt_tslt, + gbt_tskey_cmp }; @@ -97,26 +100,28 @@ static const gbtree_ninfo tinfo = -static Timestamp * tstz_to_ts_gmt ( Timestamp * gmt, TimestampTz * ts ) +static Timestamp * +tstz_to_ts_gmt(Timestamp *gmt, TimestampTz *ts) { - int val, tz ; + int val, + tz; + + *gmt = *ts; + DecodeSpecial(0, "gmt", &val); - *gmt = *ts; - DecodeSpecial(0, "gmt", &val); - - if ( ! TIMESTAMP_NOT_FINITE(*ts)) - { - tz = val * 60; + if (!TIMESTAMP_NOT_FINITE(*ts)) + { + tz = val * 60; #ifdef HAVE_INT64_TIMESTAMP - *gmt -= (tz * INT64CONST(1000000)); + *gmt -= (tz * INT64CONST(1000000)); #else - *gmt -= tz; - *gmt = JROUND(*gmt); + *gmt -= tz; + *gmt = JROUND(*gmt); #endif - - } - return gmt; + + } + return gmt; } @@ -125,84 +130,86 @@ static Timestamp * tstz_to_ts_gmt ( Timestamp * gmt, TimestampTz * ts ) Datum gbt_ts_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval = NULL; - PG_RETURN_POINTER( gbt_num_compress( retval , entry , &tinfo )); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval = NULL; + + PG_RETURN_POINTER(gbt_num_compress(retval, entry, &tinfo)); } Datum gbt_tstz_compress(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - GISTENTRY *retval ; + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + GISTENTRY *retval; - if (entry->leafkey) - { - tsKEY *r = (tsKEY *) palloc(sizeof(tsKEY)); + if (entry->leafkey) + { + tsKEY *r = (tsKEY *) palloc(sizeof(tsKEY)); - TimestampTz ts = *(TimestampTz *) DatumGetPointer(entry->key); - Timestamp gmt ; + TimestampTz ts = *(TimestampTz *) DatumGetPointer(entry->key); + Timestamp gmt; - tstz_to_ts_gmt ( &gmt, &ts ); + tstz_to_ts_gmt(&gmt, &ts); - retval = palloc(sizeof(GISTENTRY)); - r->lower = r->upper = gmt ; - gistentryinit(*retval, PointerGetDatum(r), - entry->rel, entry->page, - entry->offset, sizeof(tsKEY), FALSE); - } - else - retval = entry; + retval = palloc(sizeof(GISTENTRY)); + r->lower = r->upper = gmt; + gistentryinit(*retval, PointerGetDatum(r), + entry->rel, entry->page, + entry->offset, sizeof(tsKEY), FALSE); + } + else + retval = entry; - PG_RETURN_POINTER( retval ); + PG_RETURN_POINTER(retval); } Datum gbt_ts_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - Timestamp *query = (Timestamp *) PG_GETARG_POINTER(1); - tsKEY *kkk = (tsKEY *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - - key.lower = (GBT_NUMKEY*) &kkk->lower ; - key.upper = (GBT_NUMKEY*) &kkk->upper ; - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)query,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + Timestamp *query = (Timestamp *) PG_GETARG_POINTER(1); + tsKEY *kkk = (tsKEY *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + + key.lower = (GBT_NUMKEY *) & kkk->lower; + key.upper = (GBT_NUMKEY *) & kkk->upper; + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) query, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_tstz_consistent(PG_FUNCTION_ARGS) { - GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - TimestampTz *query = (Timestamp *) PG_GETARG_POINTER(1); - char *kkk = (char *) DatumGetPointer(entry->key); - GBT_NUMKEY_R key ; - StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); - Timestamp qqq ; - - key.lower = (GBT_NUMKEY*) &kkk[0]; - key.upper = (GBT_NUMKEY*) &kkk[MAXALIGN(tinfo.size)]; - tstz_to_ts_gmt ( &qqq, query ); - - PG_RETURN_BOOL( - gbt_num_consistent( &key, (void*)&qqq,&strategy,GIST_LEAF(entry),&tinfo) - ); + GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); + TimestampTz *query = (Timestamp *) PG_GETARG_POINTER(1); + char *kkk = (char *) DatumGetPointer(entry->key); + GBT_NUMKEY_R key; + StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); + Timestamp qqq; + + key.lower = (GBT_NUMKEY *) & kkk[0]; + key.upper = (GBT_NUMKEY *) & kkk[MAXALIGN(tinfo.size)]; + tstz_to_ts_gmt(&qqq, query); + + PG_RETURN_BOOL( + gbt_num_consistent(&key, (void *) &qqq, &strategy, GIST_LEAF(entry), &tinfo) + ); } Datum gbt_ts_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - void *out = palloc(sizeof(tsKEY)); - *(int *) PG_GETARG_POINTER(1) = sizeof(tsKEY); - PG_RETURN_POINTER( gbt_num_union ( (void*)out, entryvec, &tinfo ) ); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + void *out = palloc(sizeof(tsKEY)); + + *(int *) PG_GETARG_POINTER(1) = sizeof(tsKEY); + PG_RETURN_POINTER(gbt_num_union((void *) out, entryvec, &tinfo)); } @@ -210,52 +217,55 @@ Datum gbt_ts_penalty(PG_FUNCTION_ARGS) { - tsKEY *origentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); - tsKEY *newentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); - float *result = (float *) PG_GETARG_POINTER(2); - Interval *intr; + tsKEY *origentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + tsKEY *newentry = (tsKEY *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); + float *result = (float *) PG_GETARG_POINTER(2); + Interval *intr; + #ifdef HAVE_INT64_TIMESTAMP - int64 res; + int64 res; + #else - double res; + double res; #endif - intr = DatumGetIntervalP(DirectFunctionCall2( - timestamp_mi, - TimestampGetDatum(newentry->upper), - TimestampGetDatum(origentry->upper) - )); + intr = DatumGetIntervalP(DirectFunctionCall2( + timestamp_mi, + TimestampGetDatum(newentry->upper), + TimestampGetDatum(origentry->upper) + )); - /* see interval_larger */ + /* see interval_larger */ - res = Max(intr->time + intr->month * (30 * 86400), 0); - pfree(intr); + res = Max(intr->time + intr->month * (30 * 86400), 0); + pfree(intr); - intr = DatumGetIntervalP(DirectFunctionCall2( - timestamp_mi, - TimestampGetDatum(origentry->lower), - TimestampGetDatum(newentry->lower) - )); + intr = DatumGetIntervalP(DirectFunctionCall2( + timestamp_mi, + TimestampGetDatum(origentry->lower), + TimestampGetDatum(newentry->lower) + )); - /* see interval_larger */ - res += Max(intr->time + intr->month * (30 * 86400), 0); - pfree(intr); + /* see interval_larger */ + res += Max(intr->time + intr->month * (30 * 86400), 0); + pfree(intr); - *result = 0.0; + *result = 0.0; - if ( res > 0 ){ - intr = DatumGetIntervalP(DirectFunctionCall2( - timestamp_mi, - TimestampGetDatum(origentry->upper), - TimestampGetDatum(origentry->lower) - )); - *result += FLT_MIN ; - *result += (float) ( res / ( (double) ( res + intr->time + intr->month * (30 * 86400) ) ) ); - *result *= ( FLT_MAX / ( ( (GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1 ) ); - pfree(intr); - } + if (res > 0) + { + intr = DatumGetIntervalP(DirectFunctionCall2( + timestamp_mi, + TimestampGetDatum(origentry->upper), + TimestampGetDatum(origentry->lower) + )); + *result += FLT_MIN; + *result += (float) (res / ((double) (res + intr->time + intr->month * (30 * 86400)))); + *result *= (FLT_MAX / (((GISTENTRY *) PG_GETARG_POINTER(0))->rel->rd_att->natts + 1)); + pfree(intr); + } - PG_RETURN_POINTER(result); + PG_RETURN_POINTER(result); } @@ -263,21 +273,20 @@ gbt_ts_penalty(PG_FUNCTION_ARGS) Datum gbt_ts_picksplit(PG_FUNCTION_ARGS) { - PG_RETURN_POINTER(gbt_num_picksplit( - (GistEntryVector *) PG_GETARG_POINTER(0), - (GIST_SPLITVEC *) PG_GETARG_POINTER(1), - &tinfo - )); + PG_RETURN_POINTER(gbt_num_picksplit( + (GistEntryVector *) PG_GETARG_POINTER(0), + (GIST_SPLITVEC *) PG_GETARG_POINTER(1), + &tinfo + )); } Datum gbt_ts_same(PG_FUNCTION_ARGS) { - tsKEY *b1 = (tsKEY *) PG_GETARG_POINTER(0); - tsKEY *b2 = (tsKEY *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + tsKEY *b1 = (tsKEY *) PG_GETARG_POINTER(0); + tsKEY *b2 = (tsKEY *) PG_GETARG_POINTER(1); + bool *result = (bool *) PG_GETARG_POINTER(2); - *result = gbt_num_same ( (void*)b1, (void*)b2, &tinfo ); - PG_RETURN_POINTER(result); + *result = gbt_num_same((void *) b1, (void *) b2, &tinfo); + PG_RETURN_POINTER(result); } - diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c index bc104de96b3..f5244274245 100644 --- a/contrib/btree_gist/btree_utils_num.c +++ b/contrib/btree_gist/btree_utils_num.c @@ -3,58 +3,60 @@ #include "utils/date.h" extern GISTENTRY * -gbt_num_compress( GISTENTRY *retval , GISTENTRY *entry , const gbtree_ninfo * tinfo ) +gbt_num_compress(GISTENTRY *retval, GISTENTRY *entry, const gbtree_ninfo * tinfo) { - if (entry->leafkey) - { - - union { - int16 i2; - int32 i4; - TimeADT ts; - DateADT dt; - } v ; - - GBT_NUMKEY *r = ( GBT_NUMKEY * ) palloc(2 * tinfo->size ); - void *leaf = NULL; - - switch ( tinfo->t ) - { - case gbt_t_int2 : - v.i2 = DatumGetInt16(entry->key); - leaf = &v.i2; - break; - case gbt_t_int4 : - v.i4 = DatumGetInt32(entry->key); - leaf = &v.i4; - break; - case gbt_t_oid : - v.i4 = DatumGetObjectId(entry->key); - leaf = &v.i4; - break; - case gbt_t_time : - v.ts = DatumGetTimeADT(entry->key); - leaf = &v.ts; - break; - case gbt_t_date : - v.dt = DatumGetDateADT(entry->key); - leaf = &v.dt; - break; - default : - leaf = DatumGetPointer(entry->key); - } - - memset ( (void*) &r[0] , 0 , 2*tinfo->size ); - memcpy ( (void*) &r[0] , leaf, tinfo->size ); - memcpy ( (void*) &r[tinfo->size] , leaf, tinfo->size ); - retval = palloc(sizeof(GISTENTRY)); - gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, - entry->offset,( 2 * tinfo->size ), FALSE); - } else - retval = entry; - - return retval; + if (entry->leafkey) + { + + union + { + int16 i2; + int32 i4; + TimeADT ts; + DateADT dt; + } v; + + GBT_NUMKEY *r = (GBT_NUMKEY *) palloc(2 * tinfo->size); + void *leaf = NULL; + + switch (tinfo->t) + { + case gbt_t_int2: + v.i2 = DatumGetInt16(entry->key); + leaf = &v.i2; + break; + case gbt_t_int4: + v.i4 = DatumGetInt32(entry->key); + leaf = &v.i4; + break; + case gbt_t_oid: + v.i4 = DatumGetObjectId(entry->key); + leaf = &v.i4; + break; + case gbt_t_time: + v.ts = DatumGetTimeADT(entry->key); + leaf = &v.ts; + break; + case gbt_t_date: + v.dt = DatumGetDateADT(entry->key); + leaf = &v.dt; + break; + default: + leaf = DatumGetPointer(entry->key); + } + + memset((void *) &r[0], 0, 2 * tinfo->size); + memcpy((void *) &r[0], leaf, tinfo->size); + memcpy((void *) &r[tinfo->size], leaf, tinfo->size); + retval = palloc(sizeof(GISTENTRY)); + gistentryinit(*retval, PointerGetDatum(r), entry->rel, entry->page, + entry->offset, (2 * tinfo->size), FALSE); + } + else + retval = entry; + + return retval; } @@ -65,34 +67,35 @@ gbt_num_compress( GISTENTRY *retval , GISTENTRY *entry , const gbtree_ninfo * */ extern void * -gbt_num_union( GBT_NUMKEY * out, const GistEntryVector * entryvec, const gbtree_ninfo * tinfo ) +gbt_num_union(GBT_NUMKEY * out, const GistEntryVector *entryvec, const gbtree_ninfo * tinfo) { - int i, - numranges; - GBT_NUMKEY * cur ; - GBT_NUMKEY_R o, c; + int i, + numranges; + GBT_NUMKEY *cur; + GBT_NUMKEY_R o, + c; - numranges = entryvec->n; - cur = (GBT_NUMKEY *) DatumGetPointer((entryvec->vector[0].key)); + numranges = entryvec->n; + cur = (GBT_NUMKEY *) DatumGetPointer((entryvec->vector[0].key)); - o.lower = &((GBT_NUMKEY *)out)[0]; - o.upper = &((GBT_NUMKEY *)out)[tinfo->size]; + o.lower = &((GBT_NUMKEY *) out)[0]; + o.upper = &((GBT_NUMKEY *) out)[tinfo->size]; - memcpy( (void*)out, (void*) cur, 2*tinfo->size ); + memcpy((void *) out, (void *) cur, 2 * tinfo->size); - for (i = 1; i < numranges; i++) - { - cur = (GBT_NUMKEY *) DatumGetPointer((entryvec->vector[i].key)); - c.lower = &cur[0]; - c.upper = &cur[tinfo->size]; - if ( (*tinfo->f_gt)(o.lower, c.lower) ) /* out->lower > cur->lower */ - memcpy( (void* ) o.lower, (void*) c.lower, tinfo->size ); - if ( (*tinfo->f_lt)(o.upper, c.upper) ) /* out->upper < cur->upper */ - memcpy( (void*) o.upper, (void*) c.upper, tinfo->size ); - } + for (i = 1; i < numranges; i++) + { + cur = (GBT_NUMKEY *) DatumGetPointer((entryvec->vector[i].key)); + c.lower = &cur[0]; + c.upper = &cur[tinfo->size]; + if ((*tinfo->f_gt) (o.lower, c.lower)) /* out->lower > cur->lower */ + memcpy((void *) o.lower, (void *) c.lower, tinfo->size); + if ((*tinfo->f_lt) (o.upper, c.upper)) /* out->upper < cur->upper */ + memcpy((void *) o.upper, (void *) c.upper, tinfo->size); + } - return out; + return out; } @@ -101,51 +104,54 @@ gbt_num_union( GBT_NUMKEY * out, const GistEntryVector * entryvec, const gbtree_ ** The GiST same method for numerical values */ -extern bool gbt_num_same ( const GBT_NUMKEY * a, const GBT_NUMKEY * b, const gbtree_ninfo * tinfo ) +extern bool +gbt_num_same(const GBT_NUMKEY * a, const GBT_NUMKEY * b, const gbtree_ninfo * tinfo) { - GBT_NUMKEY_R b1, b2 ; + GBT_NUMKEY_R b1, + b2; - b1.lower = &(((GBT_NUMKEY *)a)[0]); - b1.upper = &(((GBT_NUMKEY *)a)[tinfo->size]); - b2.lower = &(((GBT_NUMKEY *)b)[0]); - b2.upper = &(((GBT_NUMKEY *)b)[tinfo->size]); + b1.lower = &(((GBT_NUMKEY *) a)[0]); + b1.upper = &(((GBT_NUMKEY *) a)[tinfo->size]); + b2.lower = &(((GBT_NUMKEY *) b)[0]); + b2.upper = &(((GBT_NUMKEY *) b)[tinfo->size]); + + if ( + (*tinfo->f_eq) (b1.lower, b2.lower) && + (*tinfo->f_eq) (b1.upper, b2.upper) + ) + return TRUE; + return FALSE; - if ( - (*tinfo->f_eq)( b1.lower, b2.lower) && - (*tinfo->f_eq)( b1.upper, b2.upper) - ) - return TRUE; - return FALSE; - } extern void -gbt_num_bin_union(Datum * u , GBT_NUMKEY * e , const gbtree_ninfo * tinfo ) +gbt_num_bin_union(Datum *u, GBT_NUMKEY * e, const gbtree_ninfo * tinfo) { - GBT_NUMKEY_R rd; - - rd.lower = &e[0]; - rd.upper = &e[tinfo->size]; - - if (!DatumGetPointer(*u)) - { - *u = PointerGetDatum(palloc(2 * tinfo->size)); - memcpy( (void* ) &( ( (GBT_NUMKEY *) DatumGetPointer(*u) )[0] ) , (void*)rd.lower , tinfo->size ); - memcpy( (void* ) &( ( (GBT_NUMKEY *) DatumGetPointer(*u) )[tinfo->size]) , (void*)rd.upper , tinfo->size ); - } - else - { - GBT_NUMKEY_R ur ; - ur.lower = &( ( (GBT_NUMKEY *) DatumGetPointer(*u) )[0] ) ; - ur.upper = &( ( (GBT_NUMKEY *) DatumGetPointer(*u) )[tinfo->size]) ; - if ( (*tinfo->f_gt)((void*)ur.lower, (void*)rd.lower) ) - memcpy( (void*) ur.lower, (void*) rd.lower, tinfo->size ); - if ( (*tinfo->f_lt)((void*)ur.upper, (void*)rd.upper) ) - memcpy( (void*) ur.upper, (void*) rd.upper, tinfo->size ); - } + GBT_NUMKEY_R rd; + + rd.lower = &e[0]; + rd.upper = &e[tinfo->size]; + + if (!DatumGetPointer(*u)) + { + *u = PointerGetDatum(palloc(2 * tinfo->size)); + memcpy((void *) &(((GBT_NUMKEY *) DatumGetPointer(*u))[0]), (void *) rd.lower, tinfo->size); + memcpy((void *) &(((GBT_NUMKEY *) DatumGetPointer(*u))[tinfo->size]), (void *) rd.upper, tinfo->size); + } + else + { + GBT_NUMKEY_R ur; + + ur.lower = &(((GBT_NUMKEY *) DatumGetPointer(*u))[0]); + ur.upper = &(((GBT_NUMKEY *) DatumGetPointer(*u))[tinfo->size]); + if ((*tinfo->f_gt) ((void *) ur.lower, (void *) rd.lower)) + memcpy((void *) ur.lower, (void *) rd.lower, tinfo->size); + if ((*tinfo->f_lt) ((void *) ur.upper, (void *) rd.upper)) + memcpy((void *) ur.upper, (void *) rd.upper, tinfo->size); + } } @@ -154,98 +160,98 @@ gbt_num_bin_union(Datum * u , GBT_NUMKEY * e , const gbtree_ninfo * tinfo ) ** The GiST consistent method */ -extern bool +extern bool gbt_num_consistent( - const GBT_NUMKEY_R * key, - const void * query, - const StrategyNumber * strategy, - bool is_leaf, - const gbtree_ninfo * tinfo + const GBT_NUMKEY_R * key, + const void *query, + const StrategyNumber *strategy, + bool is_leaf, + const gbtree_ninfo * tinfo ) { - bool retval = FALSE; - - switch (*strategy) - { - case BTLessEqualStrategyNumber: - retval = (*tinfo->f_ge)(query, key->lower); - break; - case BTLessStrategyNumber: - if ( is_leaf ) - retval = (*tinfo->f_gt)(query, key->lower); - else - retval = (*tinfo->f_ge)(query, key->lower); - break; - case BTEqualStrategyNumber: - if ( is_leaf ) - retval = (*tinfo->f_eq)(query, key->lower); - else - retval = (*tinfo->f_le)(key->lower, query) && (*tinfo->f_le)(query, key->upper ); - break; - case BTGreaterStrategyNumber: - if ( is_leaf ) - retval = (*tinfo->f_lt)(query, key->upper); - else - retval = (*tinfo->f_le)(query, key->upper); - break; - case BTGreaterEqualStrategyNumber: - retval = (*tinfo->f_le)(query, key->upper); - break; - default: - retval = FALSE; - } - - return (retval); + bool retval = FALSE; + + switch (*strategy) + { + case BTLessEqualStrategyNumber: + retval = (*tinfo->f_ge) (query, key->lower); + break; + case BTLessStrategyNumber: + if (is_leaf) + retval = (*tinfo->f_gt) (query, key->lower); + else + retval = (*tinfo->f_ge) (query, key->lower); + break; + case BTEqualStrategyNumber: + if (is_leaf) + retval = (*tinfo->f_eq) (query, key->lower); + else + retval = (*tinfo->f_le) (key->lower, query) && (*tinfo->f_le) (query, key->upper); + break; + case BTGreaterStrategyNumber: + if (is_leaf) + retval = (*tinfo->f_lt) (query, key->upper); + else + retval = (*tinfo->f_le) (query, key->upper); + break; + case BTGreaterEqualStrategyNumber: + retval = (*tinfo->f_le) (query, key->upper); + break; + default: + retval = FALSE; + } + + return (retval); } GIST_SPLITVEC * -gbt_num_picksplit( const GistEntryVector *entryvec, GIST_SPLITVEC *v, - const gbtree_ninfo * tinfo ) +gbt_num_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v, + const gbtree_ninfo * tinfo) { - OffsetNumber i, - maxoff = entryvec->n - 1; - Nsrt *arr; - int nbytes; - - arr = (Nsrt *) palloc((maxoff+1) * sizeof(Nsrt)); - nbytes = (maxoff + 2) * sizeof(OffsetNumber); - v->spl_left = (OffsetNumber *) palloc(nbytes); - v->spl_right = (OffsetNumber *) palloc(nbytes); - v->spl_ldatum = PointerGetDatum(0); - v->spl_rdatum = PointerGetDatum(0); - v->spl_nleft = 0; - v->spl_nright = 0; - - /* Sort entries */ - - for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) - { - arr[i].t = (GBT_NUMKEY *) DatumGetPointer((entryvec->vector[i].key)); - arr[i].i = i; - } - qsort ( (void*) &arr[FirstOffsetNumber], maxoff-FirstOffsetNumber+1,sizeof(Nsrt), tinfo->f_cmp ); - - /* We do simply create two parts */ - - for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) - { - if (i <= (maxoff - FirstOffsetNumber + 1) / 2) - { - gbt_num_bin_union(&v->spl_ldatum, arr[i].t, tinfo); - v->spl_left[v->spl_nleft] = arr[i].i; - v->spl_nleft++; - } - else - { - gbt_num_bin_union(&v->spl_rdatum, arr[i].t, tinfo); - v->spl_right[v->spl_nright] = arr[i].i; - v->spl_nright++; - } - } + OffsetNumber i, + maxoff = entryvec->n - 1; + Nsrt *arr; + int nbytes; + + arr = (Nsrt *) palloc((maxoff + 1) * sizeof(Nsrt)); + nbytes = (maxoff + 2) * sizeof(OffsetNumber); + v->spl_left = (OffsetNumber *) palloc(nbytes); + v->spl_right = (OffsetNumber *) palloc(nbytes); + v->spl_ldatum = PointerGetDatum(0); + v->spl_rdatum = PointerGetDatum(0); + v->spl_nleft = 0; + v->spl_nright = 0; + + /* Sort entries */ + + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) + { + arr[i].t = (GBT_NUMKEY *) DatumGetPointer((entryvec->vector[i].key)); + arr[i].i = i; + } + qsort((void *) &arr[FirstOffsetNumber], maxoff - FirstOffsetNumber + 1, sizeof(Nsrt), tinfo->f_cmp); + + /* We do simply create two parts */ + + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) + { + if (i <= (maxoff - FirstOffsetNumber + 1) / 2) + { + gbt_num_bin_union(&v->spl_ldatum, arr[i].t, tinfo); + v->spl_left[v->spl_nleft] = arr[i].i; + v->spl_nleft++; + } + else + { + gbt_num_bin_union(&v->spl_rdatum, arr[i].t, tinfo); + v->spl_right[v->spl_nright] = arr[i].i; + v->spl_nright++; + } + } pfree(arr); - return v; + return v; } diff --git a/contrib/btree_gist/btree_utils_num.h b/contrib/btree_gist/btree_utils_num.h index 122fba379a1..714144f676e 100644 --- a/contrib/btree_gist/btree_utils_num.h +++ b/contrib/btree_gist/btree_utils_num.h @@ -4,16 +4,17 @@ typedef char GBT_NUMKEY; /* Better readable key */ typedef struct { - const GBT_NUMKEY * lower, * upper; -} GBT_NUMKEY_R; + const GBT_NUMKEY *lower, + *upper; +} GBT_NUMKEY_R; /* for sorting */ typedef struct { - int i; - GBT_NUMKEY * t; -} Nsrt; + int i; + GBT_NUMKEY *t; +} Nsrt; /* type description */ @@ -21,53 +22,53 @@ typedef struct typedef struct { - /* Attribs */ + /* Attribs */ - enum gbtree_type t ; /* data type */ - int32 size ; /* size of type , 0 means variable */ + enum gbtree_type t; /* data type */ + int32 size; /* size of type , 0 means variable */ - /* Methods */ + /* Methods */ - bool (*f_gt) ( const void * , const void * ); /* greater then */ - bool (*f_ge) ( const void * , const void * ); /* greater equal */ - bool (*f_eq) ( const void * , const void * ); /* equal */ - bool (*f_le) ( const void * , const void * ); /* less equal */ - bool (*f_lt) ( const void * , const void * ); /* less then */ - int (*f_cmp) ( const void * , const void * ); /* key compare function */ -} gbtree_ninfo; + bool (*f_gt) (const void *, const void *); /* greater then */ + bool (*f_ge) (const void *, const void *); /* greater equal */ + bool (*f_eq) (const void *, const void *); /* equal */ + bool (*f_le) (const void *, const void *); /* less equal */ + bool (*f_lt) (const void *, const void *); /* less then */ + int (*f_cmp) (const void *, const void *); /* key compare function */ +} gbtree_ninfo; /* - * Numeric btree functions + * Numeric btree functions */ #define penalty_range_enlarge(olower,oupper,nlower,nupper) do { \ res = 0; \ if ( (nupper) > (oupper) ) \ - res += ( (nupper) - (oupper) ); \ - if ( (olower) > (nlower) ) \ - res += ( (olower) - (nlower) ); \ + res += ( (nupper) - (oupper) ); \ + if ( (olower) > (nlower) ) \ + res += ( (olower) - (nlower) ); \ } while (0); -extern bool gbt_num_consistent( const GBT_NUMKEY_R * key , const void * query, - const StrategyNumber * strategy , bool is_leaf, - const gbtree_ninfo * tinfo ); +extern bool gbt_num_consistent(const GBT_NUMKEY_R * key, const void *query, + const StrategyNumber *strategy, bool is_leaf, + const gbtree_ninfo * tinfo); -extern GIST_SPLITVEC *gbt_num_picksplit ( const GistEntryVector *entryvec, GIST_SPLITVEC *v, - const gbtree_ninfo * tinfo ); +extern GIST_SPLITVEC *gbt_num_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v, + const gbtree_ninfo * tinfo); -extern GISTENTRY *gbt_num_compress( GISTENTRY *retval , GISTENTRY *entry , - const gbtree_ninfo * tinfo ); +extern GISTENTRY *gbt_num_compress(GISTENTRY *retval, GISTENTRY *entry, + const gbtree_ninfo * tinfo); -extern void *gbt_num_union ( GBT_NUMKEY * out, const GistEntryVector * entryvec, - const gbtree_ninfo * tinfo ); +extern void *gbt_num_union(GBT_NUMKEY * out, const GistEntryVector *entryvec, + const gbtree_ninfo * tinfo); -extern bool gbt_num_same ( const GBT_NUMKEY * a, const GBT_NUMKEY * b, - const gbtree_ninfo * tinfo ); +extern bool gbt_num_same(const GBT_NUMKEY * a, const GBT_NUMKEY * b, + const gbtree_ninfo * tinfo); -extern void gbt_num_bin_union(Datum * u , GBT_NUMKEY * e , - const gbtree_ninfo * tinfo ); +extern void gbt_num_bin_union(Datum *u, GBT_NUMKEY * e, + const gbtree_ninfo * tinfo); diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c index f8ced122267..01e29038163 100644 --- a/contrib/btree_gist/btree_utils_var.c +++ b/contrib/btree_gist/btree_utils_var.c @@ -4,52 +4,59 @@ /* Returns a better readable representaion of variable key ( sets pointer ) */ -extern GBT_VARKEY_R gbt_var_key_readable ( const GBT_VARKEY * k ){ - - GBT_VARKEY_R r ; - r.lower = ( bytea * ) &(((char*)k)[VARHDRSZ] ) ; - if ( VARSIZE(k) > ( VARHDRSZ+(VARSIZE(r.lower)) ) ) - r.upper = ( bytea * ) &(((char*)k)[VARHDRSZ+INTALIGN(VARSIZE(r.lower))] ) ; - else - r.upper = r.lower; - return r; +extern GBT_VARKEY_R +gbt_var_key_readable(const GBT_VARKEY * k) +{ + + GBT_VARKEY_R r; + + r.lower = (bytea *) &(((char *) k)[VARHDRSZ]); + if (VARSIZE(k) > (VARHDRSZ + (VARSIZE(r.lower)))) + r.upper = (bytea *) &(((char *) k)[VARHDRSZ + INTALIGN(VARSIZE(r.lower))]); + else + r.upper = r.lower; + return r; } -extern GBT_VARKEY * gbt_var_key_copy ( const GBT_VARKEY_R * u , bool force_node ){ +extern GBT_VARKEY * +gbt_var_key_copy(const GBT_VARKEY_R * u, bool force_node) +{ - GBT_VARKEY * r = NULL; + GBT_VARKEY *r = NULL; - if ( u->lower == u->upper && !force_node ){ /* leaf key mode */ + if (u->lower == u->upper && !force_node) + { /* leaf key mode */ - r = (GBT_VARKEY *) palloc(VARSIZE(u->lower) + VARHDRSZ ); - memcpy ( (void*) VARDATA(r), (void*) u->lower , VARSIZE(u->lower) ); - r->vl_len = VARSIZE(u->lower) + VARHDRSZ ; + r = (GBT_VARKEY *) palloc(VARSIZE(u->lower) + VARHDRSZ); + memcpy((void *) VARDATA(r), (void *) u->lower, VARSIZE(u->lower)); + r->vl_len = VARSIZE(u->lower) + VARHDRSZ; - } else { /* node key mode */ + } + else + { /* node key mode */ - r = (GBT_VARKEY *) palloc(INTALIGN(VARSIZE(u->lower)) + VARSIZE(u->upper) + VARHDRSZ ); - memcpy ( (void*) VARDATA(r) , (void*) u->lower , VARSIZE(u->lower) ); - memcpy ( (void*)&(((char *)r)[VARHDRSZ+INTALIGN(VARSIZE(u->lower))]), (void*) u->upper , VARSIZE(u->upper) ); - r->vl_len = INTALIGN(VARSIZE(u->lower)) + VARSIZE(u->upper) + VARHDRSZ ; + r = (GBT_VARKEY *) palloc(INTALIGN(VARSIZE(u->lower)) + VARSIZE(u->upper) + VARHDRSZ); + memcpy((void *) VARDATA(r), (void *) u->lower, VARSIZE(u->lower)); + memcpy((void *) &(((char *) r)[VARHDRSZ + INTALIGN(VARSIZE(u->lower))]), (void *) u->upper, VARSIZE(u->upper)); + r->vl_len = INTALIGN(VARSIZE(u->lower)) + VARSIZE(u->upper) + VARHDRSZ; - } - return r; + } + return r; } -static GBT_VARKEY * gbt_var_leaf2node ( GBT_VARKEY * leaf, const gbtree_vinfo * tinfo ) +static GBT_VARKEY * +gbt_var_leaf2node(GBT_VARKEY * leaf, const gbtree_vinfo * tinfo) { - GBT_VARKEY *out = leaf ; + GBT_VARKEY *out = leaf; - if ( tinfo->f_l2n ) - { - out = (*tinfo->f_l2n) (leaf); - } + if (tinfo->f_l2n) + out = (*tinfo->f_l2n) (leaf); - return out; + return out; } @@ -57,28 +64,27 @@ static GBT_VARKEY * gbt_var_leaf2node ( GBT_VARKEY * leaf, const gbtree_vinfo * /* * returns the common prefix length of a node key */ -static int32 gbt_var_node_cp_len ( const GBT_VARKEY * node , const gbtree_vinfo * tinfo ) +static int32 +gbt_var_node_cp_len(const GBT_VARKEY * node, const gbtree_vinfo * tinfo) { - int32 i ; - int32 s = (tinfo->str)?(1):(0); - GBT_VARKEY_R r = gbt_var_key_readable ( node ); - int32 t1len = VARSIZE(r.lower) - VARHDRSZ - s; - int32 t2len = VARSIZE(r.upper) - VARHDRSZ - s; - int32 ml = Min(t1len,t2len) ; - - char * p1 = VARDATA(r.lower) , - * p2 = VARDATA(r.upper) ; - - for ( i=0 ; i<ml; i++ ) - { - if ( *p1 != *p2 ) - { - return i; - } - p1++; - p2++; - } - return ( ml ); + int32 i; + int32 s = (tinfo->str) ? (1) : (0); + GBT_VARKEY_R r = gbt_var_key_readable(node); + int32 t1len = VARSIZE(r.lower) - VARHDRSZ - s; + int32 t2len = VARSIZE(r.upper) - VARHDRSZ - s; + int32 ml = Min(t1len, t2len); + + char *p1 = VARDATA(r.lower), + *p2 = VARDATA(r.upper); + + for (i = 0; i < ml; i++) + { + if (*p1 != *p2) + return i; + p1++; + p2++; + } + return (ml); } @@ -87,34 +93,38 @@ static int32 gbt_var_node_cp_len ( const GBT_VARKEY * node , const gbtree_vinfo * returns true, if query matches prefix using common prefix */ -static bool gbt_bytea_pf_match ( const bytea * pf , const bytea * query , const gbtree_vinfo * tinfo ) +static bool +gbt_bytea_pf_match(const bytea *pf, const bytea *query, const gbtree_vinfo * tinfo) { - int k ; - int32 s = (tinfo->str)?(1):(0); - bool out = FALSE ; - int32 qlen = VARSIZE(query) - VARHDRSZ - s ; - int32 nlen = VARSIZE(pf) - VARHDRSZ - s ; - if ( nlen <= qlen ) - { - char *q = VARDATA(query) ; - char *n = VARDATA(pf) ; - out = TRUE; - for ( k=0 ; k<nlen; k++ ) - { - if ( *n != *q ){ - out = FALSE; - break; - } - if ( k < (nlen-1) ) - { - q++; - n++; - } - } - } - - return out; + int k; + int32 s = (tinfo->str) ? (1) : (0); + bool out = FALSE; + int32 qlen = VARSIZE(query) - VARHDRSZ - s; + int32 nlen = VARSIZE(pf) - VARHDRSZ - s; + + if (nlen <= qlen) + { + char *q = VARDATA(query); + char *n = VARDATA(pf); + + out = TRUE; + for (k = 0; k < nlen; k++) + { + if (*n != *q) + { + out = FALSE; + break; + } + if (k < (nlen - 1)) + { + q++; + n++; + } + } + } + + return out; } @@ -124,13 +134,14 @@ static bool gbt_bytea_pf_match ( const bytea * pf , const bytea * query , const * returns true, if query matches node using common prefix */ -static bool gbt_var_node_pf_match ( const GBT_VARKEY_R * node , const bytea * query , const gbtree_vinfo * tinfo ) +static bool +gbt_var_node_pf_match(const GBT_VARKEY_R * node, const bytea *query, const gbtree_vinfo * tinfo) { - return ( - gbt_bytea_pf_match ( node->lower, query , tinfo ) || - gbt_bytea_pf_match ( node->upper, query , tinfo ) - ); + return ( + gbt_bytea_pf_match(node->lower, query, tinfo) || + gbt_bytea_pf_match(node->upper, query, tinfo) + ); } @@ -138,280 +149,299 @@ static bool gbt_var_node_pf_match ( const GBT_VARKEY_R * node , const bytea * qu /* * truncates / compresses the node key */ -static GBT_VARKEY * gbt_var_node_truncate ( const GBT_VARKEY * node , int32 length , const gbtree_vinfo * tinfo ) +static GBT_VARKEY * +gbt_var_node_truncate(const GBT_VARKEY * node, int32 length, const gbtree_vinfo * tinfo) { - int32 s = (tinfo->str)?(1):(0); - GBT_VARKEY * out = NULL; - GBT_VARKEY_R r = gbt_var_key_readable ( node ); - int32 len1 = VARSIZE(r.lower) - VARHDRSZ; - int32 len2 = VARSIZE(r.upper) - VARHDRSZ; - int32 si = 0; - - if (tinfo->str) - length++; /* because of tailing '\0' */ - - len1 = Min( len1, length ) ; - len2 = Min( len2, length ) ; - si = 2*VARHDRSZ + INTALIGN(VARHDRSZ+len1) + len2; - out = (GBT_VARKEY *) palloc ( si ); - out->vl_len = si; - memcpy ( (void*) &(((char*)out)[VARHDRSZ]) , (void*)r.lower, len1+VARHDRSZ-s ); - memcpy ( (void*) &(((char*)out)[VARHDRSZ+INTALIGN(VARHDRSZ+len1)]) , (void*)r.upper, len2+VARHDRSZ-s ); - - if (tinfo->str) - { - ((char*)out)[VARHDRSZ+INTALIGN(VARHDRSZ+len1)-1] = '\0'; - ((char*)out)[2*VARHDRSZ+INTALIGN(VARHDRSZ+len1)+len2-1] = '\0'; - } - *((int32*)&(((char*)out)[VARHDRSZ])) = len1 + VARHDRSZ; - *((int32*)&(((char*)out)[VARHDRSZ+INTALIGN(VARHDRSZ+len1)])) = len2 + VARHDRSZ; - - return out; + int32 s = (tinfo->str) ? (1) : (0); + GBT_VARKEY *out = NULL; + GBT_VARKEY_R r = gbt_var_key_readable(node); + int32 len1 = VARSIZE(r.lower) - VARHDRSZ; + int32 len2 = VARSIZE(r.upper) - VARHDRSZ; + int32 si = 0; + + if (tinfo->str) + length++; /* because of tailing '\0' */ + + len1 = Min(len1, length); + len2 = Min(len2, length); + si = 2 * VARHDRSZ + INTALIGN(VARHDRSZ + len1) + len2; + out = (GBT_VARKEY *) palloc(si); + out->vl_len = si; + memcpy((void *) &(((char *) out)[VARHDRSZ]), (void *) r.lower, len1 + VARHDRSZ - s); + memcpy((void *) &(((char *) out)[VARHDRSZ + INTALIGN(VARHDRSZ + len1)]), (void *) r.upper, len2 + VARHDRSZ - s); + + if (tinfo->str) + { + ((char *) out)[VARHDRSZ + INTALIGN(VARHDRSZ + len1) - 1] = '\0'; + ((char *) out)[2 * VARHDRSZ + INTALIGN(VARHDRSZ + len1) + len2 - 1] = '\0'; + } + *((int32 *) &(((char *) out)[VARHDRSZ])) = len1 + VARHDRSZ; + *((int32 *) &(((char *) out)[VARHDRSZ + INTALIGN(VARHDRSZ + len1)])) = len2 + VARHDRSZ; + + return out; } extern void -gbt_var_bin_union ( Datum * u , GBT_VARKEY * e , const gbtree_vinfo * tinfo ) +gbt_var_bin_union(Datum *u, GBT_VARKEY * e, const gbtree_vinfo * tinfo) { - GBT_VARKEY * nk = NULL; - GBT_VARKEY * tmp = NULL; - GBT_VARKEY_R nr ; - GBT_VARKEY_R eo = gbt_var_key_readable ( e ); - - - if ( eo.lower == eo.upper ) /* leaf */ - { - tmp = gbt_var_leaf2node ( e , tinfo ); - if ( tmp != e ) - eo = gbt_var_key_readable ( tmp ); - } - - if ( DatumGetPointer(*u)) - { - - GBT_VARKEY_R ro = gbt_var_key_readable ( ( GBT_VARKEY *) DatumGetPointer (*u) ); - - if ( (*tinfo->f_cmp) ( (bytea*)ro.lower, (bytea*)eo.lower ) > 0 ) { - nr.lower = eo.lower; - nr.upper = ro.upper; - nk = gbt_var_key_copy ( &nr, TRUE ); - } - if ( (*tinfo->f_cmp) ( (bytea*)ro.upper, (bytea*)eo.upper ) < 0 ) { - nr.upper = eo.upper; - nr.lower = ro.lower; - nk = gbt_var_key_copy ( &nr, TRUE ); - } - if ( nk ) - { - pfree( DatumGetPointer (*u) ); - *u = PointerGetDatum(nk); - } - - - - } - else - { - nr.lower = eo.lower; - nr.upper = eo.upper; - *u = PointerGetDatum( gbt_var_key_copy ( &nr, TRUE ) ); - } - - if ( tmp && tmp != e ) - pfree ( tmp ); + GBT_VARKEY *nk = NULL; + GBT_VARKEY *tmp = NULL; + GBT_VARKEY_R nr; + GBT_VARKEY_R eo = gbt_var_key_readable(e); + + + if (eo.lower == eo.upper) /* leaf */ + { + tmp = gbt_var_leaf2node(e, tinfo); + if (tmp != e) + eo = gbt_var_key_readable(tmp); + } + + if (DatumGetPointer(*u)) + { + + GBT_VARKEY_R ro = gbt_var_key_readable((GBT_VARKEY *) DatumGetPointer(*u)); + + if ((*tinfo->f_cmp) ((bytea *) ro.lower, (bytea *) eo.lower) > 0) + { + nr.lower = eo.lower; + nr.upper = ro.upper; + nk = gbt_var_key_copy(&nr, TRUE); + } + if ((*tinfo->f_cmp) ((bytea *) ro.upper, (bytea *) eo.upper) < 0) + { + nr.upper = eo.upper; + nr.lower = ro.lower; + nk = gbt_var_key_copy(&nr, TRUE); + } + if (nk) + { + pfree(DatumGetPointer(*u)); + *u = PointerGetDatum(nk); + } + + + + } + else + { + nr.lower = eo.lower; + nr.upper = eo.upper; + *u = PointerGetDatum(gbt_var_key_copy(&nr, TRUE)); + } + + if (tmp && tmp != e) + pfree(tmp); } -extern GISTENTRY * -gbt_var_compress ( GISTENTRY *entry , const gbtree_vinfo * tinfo ) +extern GISTENTRY * +gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo * tinfo) { - GISTENTRY * retval; + GISTENTRY *retval; - if (entry->leafkey) - { - GBT_VARKEY * r = NULL; - bytea * tstd = ( bytea * ) DatumGetPointer ( entry->key ); /* toasted */ - bytea * leaf = ( bytea * ) DatumGetPointer ( PG_DETOAST_DATUM ( entry->key ) ); /* untoasted */ - GBT_VARKEY_R u ; + if (entry->leafkey) + { + GBT_VARKEY *r = NULL; + bytea *tstd = (bytea *) DatumGetPointer(entry->key); /* toasted */ + bytea *leaf = (bytea *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); /* untoasted */ + GBT_VARKEY_R u; - u.lower = u.upper = leaf; - r = gbt_var_key_copy ( &u , FALSE ); + u.lower = u.upper = leaf; + r = gbt_var_key_copy(&u, FALSE); - if ( tstd != leaf ){ - pfree(leaf); - } - retval = palloc(sizeof(GISTENTRY)); - gistentryinit(*retval, PointerGetDatum(r), - entry->rel, entry->page, - entry->offset, VARSIZE(r), TRUE); - } else { - retval = entry; + if (tstd != leaf) + pfree(leaf); + retval = palloc(sizeof(GISTENTRY)); + gistentryinit(*retval, PointerGetDatum(r), + entry->rel, entry->page, + entry->offset, VARSIZE(r), TRUE); + } + else + { + retval = entry; - } + } - return (retval); + return (retval); } extern GBT_VARKEY * -gbt_var_union ( const GistEntryVector * entryvec , int32 * size , const gbtree_vinfo * tinfo ) +gbt_var_union(const GistEntryVector *entryvec, int32 *size, const gbtree_vinfo * tinfo) { - int i = 0, - numranges = entryvec->n; - GBT_VARKEY *cur, - *tst=NULL; - Datum out; - GBT_VARKEY_R rk; + int i = 0, + numranges = entryvec->n; + GBT_VARKEY *cur, + *tst = NULL; + Datum out; + GBT_VARKEY_R rk; - *size = sizeof(GBT_VARKEY); + *size = sizeof(GBT_VARKEY); - tst = (GBT_VARKEY *) DatumGetPointer((entryvec->vector[0].key)); - cur = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM((entryvec->vector[0].key))); - rk = gbt_var_key_readable ( cur ); - out = PointerGetDatum ( gbt_var_key_copy( &rk, TRUE ) ); - if ( tst != cur ) pfree ( cur ); + tst = (GBT_VARKEY *) DatumGetPointer((entryvec->vector[0].key)); + cur = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM((entryvec->vector[0].key))); + rk = gbt_var_key_readable(cur); + out = PointerGetDatum(gbt_var_key_copy(&rk, TRUE)); + if (tst != cur) + pfree(cur); - for (i = 1; i < numranges; i++) - { - tst = (GBT_VARKEY *) DatumGetPointer((entryvec->vector[i].key)); - cur = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM((entryvec->vector[i].key))); - gbt_var_bin_union ( &out , cur , tinfo ); - if ( tst != cur ) pfree ( cur ); - } + for (i = 1; i < numranges; i++) + { + tst = (GBT_VARKEY *) DatumGetPointer((entryvec->vector[i].key)); + cur = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM((entryvec->vector[i].key))); + gbt_var_bin_union(&out, cur, tinfo); + if (tst != cur) + pfree(cur); + } - /* Truncate (=compress) key */ + /* Truncate (=compress) key */ - if ( tinfo->trnc ) - { - int32 plen ; - GBT_VARKEY *trc = NULL; + if (tinfo->trnc) + { + int32 plen; + GBT_VARKEY *trc = NULL; - plen = gbt_var_node_cp_len ( (GBT_VARKEY *) DatumGetPointer(out) , tinfo ); - trc = gbt_var_node_truncate ( (GBT_VARKEY *) DatumGetPointer(out) , plen+1 , tinfo ) ; + plen = gbt_var_node_cp_len((GBT_VARKEY *) DatumGetPointer(out), tinfo); + trc = gbt_var_node_truncate((GBT_VARKEY *) DatumGetPointer(out), plen + 1, tinfo); - pfree ( DatumGetPointer(out) ); - out = PointerGetDatum ( trc ); - } + pfree(DatumGetPointer(out)); + out = PointerGetDatum(trc); + } - return ( (GBT_VARKEY *) DatumGetPointer ( out ) ); + return ((GBT_VARKEY *) DatumGetPointer(out)); } -extern bool gbt_var_same ( bool * result, const Datum d1 , const Datum d2 , const gbtree_vinfo * tinfo ){ - - GBT_VARKEY *tst1 = (GBT_VARKEY *) DatumGetPointer(d1); - GBT_VARKEY *t1 = (GBT_VARKEY *) DatumGetPointer( PG_DETOAST_DATUM(d1) ); - GBT_VARKEY *tst2 = (GBT_VARKEY *) DatumGetPointer(d2); - GBT_VARKEY *t2 = (GBT_VARKEY *) DatumGetPointer( PG_DETOAST_DATUM(d2) ); - GBT_VARKEY_R r1, r2; - r1 = gbt_var_key_readable ( t1 ); - r2 = gbt_var_key_readable ( t2 ); - - if (t1 && t2){ - *result = ( ( (*tinfo->f_cmp ) ( (bytea*)r1.lower, (bytea*)r2.lower) == 0 - && (*tinfo->f_cmp) ( (bytea*)r1.upper, (bytea*)r2.upper) == 0 ) ? TRUE : FALSE ); - } else - *result = (t1 == NULL && t2 == NULL) ? TRUE : FALSE; - - if ( tst1 != t1 ) pfree (t1); - if ( tst2 != t2 ) pfree (t2); +extern bool +gbt_var_same(bool *result, const Datum d1, const Datum d2, const gbtree_vinfo * tinfo) +{ - PG_RETURN_POINTER(result); + GBT_VARKEY *tst1 = (GBT_VARKEY *) DatumGetPointer(d1); + GBT_VARKEY *t1 = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(d1)); + GBT_VARKEY *tst2 = (GBT_VARKEY *) DatumGetPointer(d2); + GBT_VARKEY *t2 = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(d2)); + GBT_VARKEY_R r1, + r2; + + r1 = gbt_var_key_readable(t1); + r2 = gbt_var_key_readable(t2); + + if (t1 && t2) + { + *result = (((*tinfo->f_cmp) ((bytea *) r1.lower, (bytea *) r2.lower) == 0 + && (*tinfo->f_cmp) ((bytea *) r1.upper, (bytea *) r2.upper) == 0) ? TRUE : FALSE); + } + else + *result = (t1 == NULL && t2 == NULL) ? TRUE : FALSE; + + if (tst1 != t1) + pfree(t1); + if (tst2 != t2) + pfree(t2); + + PG_RETURN_POINTER(result); } extern float * -gbt_var_penalty ( float * res , const GISTENTRY * o , const GISTENTRY * n, const gbtree_vinfo * tinfo ) +gbt_var_penalty(float *res, const GISTENTRY *o, const GISTENTRY *n, const gbtree_vinfo * tinfo) { - GBT_VARKEY *orgt = (GBT_VARKEY *) DatumGetPointer(o->key); - GBT_VARKEY *orge = (GBT_VARKEY *) DatumGetPointer( PG_DETOAST_DATUM(o->key) ); - GBT_VARKEY *newt = (GBT_VARKEY *) DatumGetPointer(n->key); - GBT_VARKEY *newe = (GBT_VARKEY *) DatumGetPointer( PG_DETOAST_DATUM(n->key) ); - GBT_VARKEY_R ok , nk; - GBT_VARKEY *tmp = NULL; - int32 s = (tinfo->str)?(1):(0); - - *res = 0.0; - - nk = gbt_var_key_readable ( newe ); - if ( nk.lower == nk.upper ) /* leaf */ - { - tmp = gbt_var_leaf2node ( newe , tinfo ); - if ( tmp != newe ) - nk = gbt_var_key_readable ( tmp ); - } - ok = gbt_var_key_readable ( orge ); - - if ( ( VARSIZE(ok.lower) - VARHDRSZ ) == s && ( VARSIZE(ok.upper) - VARHDRSZ ) == s ) - { - *res = 0.0; - } else - if ( ! ( - ( - ( (*tinfo->f_cmp) (nk.lower, ok.lower)>=0 || gbt_bytea_pf_match(ok.lower, nk.lower, tinfo ) ) && - ( (*tinfo->f_cmp) (nk.upper, ok.upper)<=0 || gbt_bytea_pf_match(ok.upper, nk.upper, tinfo ) ) - ) - ) ) - { - Datum d = PointerGetDatum (0); - double dres = 0.0; - int32 ol, ul; - - gbt_var_bin_union ( &d , orge , tinfo ); - ol = gbt_var_node_cp_len ( ( GBT_VARKEY *) DatumGetPointer(d), tinfo ); - gbt_var_bin_union ( &d , newe , tinfo ); - ul = gbt_var_node_cp_len ( ( GBT_VARKEY *) DatumGetPointer(d), tinfo ); - - if ( ul < ol ) { - dres = ( ol-ul ) ; /* lost of common prefix len */ - } else { - GBT_VARKEY_R uk = gbt_var_key_readable ( ( GBT_VARKEY *) DatumGetPointer(d) ); - if ( tinfo->str ) - { - dres = ( VARDATA(ok.lower)[ul]-VARDATA(uk.lower)[ul] ) + - ( VARDATA(uk.upper)[ul]-VARDATA(ok.upper)[ul] ); - } else { - char tmp[4]; - tmp[0] = ( ( VARSIZE(ok.lower) - VARHDRSZ ) == ul )?(CHAR_MIN):(VARDATA(ok.lower)[ul]); - tmp[1] = ( ( VARSIZE(uk.lower) - VARHDRSZ ) == ul )?(CHAR_MIN):(VARDATA(uk.lower)[ul]); - tmp[2] = ( ( VARSIZE(ok.upper) - VARHDRSZ ) == ul )?(CHAR_MIN):(VARDATA(ok.upper)[ul]); - tmp[3] = ( ( VARSIZE(uk.upper) - VARHDRSZ ) == ul )?(CHAR_MIN):(VARDATA(uk.upper)[ul]); - dres = ( tmp[0] - tmp[1] ) + - ( tmp[3] - tmp[2] ); - } - dres /= 256.0; - } - pfree ( DatumGetPointer(d) ); - - *res += FLT_MIN ; - *res += (float) ( dres / ( (double) ( ol +1 ) ) ); - *res *= ( FLT_MAX / ( o->rel->rd_att->natts + 1 ) ); - - } - - if ( tmp && tmp != newe ) - pfree (tmp); - - if ( newe != newt ){ - pfree ( newe ); - } - - if ( orge != orgt ){ - pfree ( orge ); - } - return res ; + GBT_VARKEY *orgt = (GBT_VARKEY *) DatumGetPointer(o->key); + GBT_VARKEY *orge = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(o->key)); + GBT_VARKEY *newt = (GBT_VARKEY *) DatumGetPointer(n->key); + GBT_VARKEY *newe = (GBT_VARKEY *) DatumGetPointer(PG_DETOAST_DATUM(n->key)); + GBT_VARKEY_R ok, + nk; + GBT_VARKEY *tmp = NULL; + int32 s = (tinfo->str) ? (1) : (0); + + *res = 0.0; + + nk = gbt_var_key_readable(newe); + if (nk.lower == nk.upper) /* leaf */ + { + tmp = gbt_var_leaf2node(newe, tinfo); + if (tmp != newe) + nk = gbt_var_key_readable(tmp); + } + ok = gbt_var_key_readable(orge); + + if ((VARSIZE(ok.lower) - VARHDRSZ) == s && (VARSIZE(ok.upper) - VARHDRSZ) == s) + *res = 0.0; + else if (!( + ( + ((*tinfo->f_cmp) (nk.lower, ok.lower) >= 0 || gbt_bytea_pf_match(ok.lower, nk.lower, tinfo)) && + ((*tinfo->f_cmp) (nk.upper, ok.upper) <= 0 || gbt_bytea_pf_match(ok.upper, nk.upper, tinfo)) + ) + )) + { + Datum d = PointerGetDatum(0); + double dres = 0.0; + int32 ol, + ul; + + gbt_var_bin_union(&d, orge, tinfo); + ol = gbt_var_node_cp_len((GBT_VARKEY *) DatumGetPointer(d), tinfo); + gbt_var_bin_union(&d, newe, tinfo); + ul = gbt_var_node_cp_len((GBT_VARKEY *) DatumGetPointer(d), tinfo); + + if (ul < ol) + { + dres = (ol - ul); /* lost of common prefix len */ + } + else + { + GBT_VARKEY_R uk = gbt_var_key_readable((GBT_VARKEY *) DatumGetPointer(d)); + + if (tinfo->str) + { + dres = (VARDATA(ok.lower)[ul] - VARDATA(uk.lower)[ul]) + + (VARDATA(uk.upper)[ul] - VARDATA(ok.upper)[ul]); + } + else + { + char tmp[4]; + + tmp[0] = ((VARSIZE(ok.lower) - VARHDRSZ) == ul) ? (CHAR_MIN) : (VARDATA(ok.lower)[ul]); + tmp[1] = ((VARSIZE(uk.lower) - VARHDRSZ) == ul) ? (CHAR_MIN) : (VARDATA(uk.lower)[ul]); + tmp[2] = ((VARSIZE(ok.upper) - VARHDRSZ) == ul) ? (CHAR_MIN) : (VARDATA(ok.upper)[ul]); + tmp[3] = ((VARSIZE(uk.upper) - VARHDRSZ) == ul) ? (CHAR_MIN) : (VARDATA(uk.upper)[ul]); + dres = (tmp[0] - tmp[1]) + + (tmp[3] - tmp[2]); + } + dres /= 256.0; + } + pfree(DatumGetPointer(d)); + + *res += FLT_MIN; + *res += (float) (dres / ((double) (ol + 1))); + *res *= (FLT_MAX / (o->rel->rd_att->natts + 1)); + + } + + if (tmp && tmp != newe) + pfree(tmp); + + if (newe != newt) + pfree(newe); + + if (orge != orgt) + pfree(orge); + return res; } @@ -419,198 +449,194 @@ gbt_var_penalty ( float * res , const GISTENTRY * o , const GISTENTRY * n, const /* * Fortunately, this sort comparsion routine needn't be reentrant... */ -static const gbtree_vinfo * gbt_vsrt_cmp_tinfo; +static const gbtree_vinfo *gbt_vsrt_cmp_tinfo; static int gbt_vsrt_cmp(const void *a, const void *b) { - GBT_VARKEY_R ar = gbt_var_key_readable ( ((const Vsrt *) a)->t ); - GBT_VARKEY_R br = gbt_var_key_readable ( ((const Vsrt *) b)->t ); + GBT_VARKEY_R ar = gbt_var_key_readable(((const Vsrt *) a)->t); + GBT_VARKEY_R br = gbt_var_key_readable(((const Vsrt *) b)->t); - return (*gbt_vsrt_cmp_tinfo->f_cmp) ( ar.lower, br.lower ); + return (*gbt_vsrt_cmp_tinfo->f_cmp) (ar.lower, br.lower); } extern GIST_SPLITVEC * -gbt_var_picksplit( const GistEntryVector *entryvec, GIST_SPLITVEC *v, const gbtree_vinfo * tinfo ) +gbt_var_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v, const gbtree_vinfo * tinfo) { - OffsetNumber i, - maxoff = entryvec->n - 1; - Vsrt *arr; - int pfrcntr = 0 , - svcntr = 0 , - nbytes ; - char * tst , - * cur ; - char **pfr = NULL ; - GBT_VARKEY **sv = NULL; - - arr = (Vsrt *) palloc((maxoff+1) * sizeof(Vsrt)); - nbytes = (maxoff + 2) * sizeof(OffsetNumber); - v->spl_left = (OffsetNumber *) palloc(nbytes); - v->spl_right = (OffsetNumber *) palloc(nbytes); - v->spl_ldatum = PointerGetDatum(0); - v->spl_rdatum = PointerGetDatum(0); - v->spl_nleft = 0; - v->spl_nright = 0; - - pfr = palloc ( sizeof ( GBT_VARKEY* ) * (maxoff+1) ); - sv = palloc ( sizeof ( bytea * ) * (maxoff+1) ); - - /* Sort entries */ - - for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) - { - GBT_VARKEY_R ro; - tst = (char *) DatumGetPointer((entryvec->vector[i].key)); - cur = (char *) DatumGetPointer(PG_DETOAST_DATUM((entryvec->vector[i].key))); - if ( tst != cur ){ - pfr[pfrcntr] = cur ; - pfrcntr++; - } - ro = gbt_var_key_readable( ( GBT_VARKEY *) cur ); - if ( ro.lower == ro.upper ) /* leaf */ - { - sv[svcntr] = gbt_var_leaf2node ( ( GBT_VARKEY *) cur , tinfo ); - arr[i].t = sv[svcntr]; - if ( sv[svcntr] != ( GBT_VARKEY *) cur ) - svcntr++; - } else { - arr[i].t = ( GBT_VARKEY *) cur; - } - arr[i].i = i; - } - - /* sort */ + OffsetNumber i, + maxoff = entryvec->n - 1; + Vsrt *arr; + int pfrcntr = 0, + svcntr = 0, + nbytes; + char *tst, + *cur; + char **pfr = NULL; + GBT_VARKEY **sv = NULL; + + arr = (Vsrt *) palloc((maxoff + 1) * sizeof(Vsrt)); + nbytes = (maxoff + 2) * sizeof(OffsetNumber); + v->spl_left = (OffsetNumber *) palloc(nbytes); + v->spl_right = (OffsetNumber *) palloc(nbytes); + v->spl_ldatum = PointerGetDatum(0); + v->spl_rdatum = PointerGetDatum(0); + v->spl_nleft = 0; + v->spl_nright = 0; + + pfr = palloc(sizeof(GBT_VARKEY *) * (maxoff + 1)); + sv = palloc(sizeof(bytea *) * (maxoff + 1)); + + /* Sort entries */ + + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) + { + GBT_VARKEY_R ro; + + tst = (char *) DatumGetPointer((entryvec->vector[i].key)); + cur = (char *) DatumGetPointer(PG_DETOAST_DATUM((entryvec->vector[i].key))); + if (tst != cur) + { + pfr[pfrcntr] = cur; + pfrcntr++; + } + ro = gbt_var_key_readable((GBT_VARKEY *) cur); + if (ro.lower == ro.upper) /* leaf */ + { + sv[svcntr] = gbt_var_leaf2node((GBT_VARKEY *) cur, tinfo); + arr[i].t = sv[svcntr]; + if (sv[svcntr] != (GBT_VARKEY *) cur) + svcntr++; + } + else + arr[i].t = (GBT_VARKEY *) cur; + arr[i].i = i; + } + + /* sort */ gbt_vsrt_cmp_tinfo = tinfo; - qsort((void*) &arr[FirstOffsetNumber], - maxoff-FirstOffsetNumber+1, + qsort((void *) &arr[FirstOffsetNumber], + maxoff - FirstOffsetNumber + 1, sizeof(Vsrt), gbt_vsrt_cmp); - /* We do simply create two parts */ - - for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) - { - if (i <= (maxoff - FirstOffsetNumber + 1) / 2) - { - gbt_var_bin_union(&v->spl_ldatum, arr[i].t, tinfo); - v->spl_left[v->spl_nleft] = arr[i].i; - v->spl_nleft++; - } - else - { - gbt_var_bin_union(&v->spl_rdatum, arr[i].t, tinfo); - v->spl_right[v->spl_nright] = arr[i].i; - v->spl_nright++; - } - } - - /* Free detoasted keys */ - for ( i=0 ; i<pfrcntr; i++ ){ - pfree( pfr[i] ); - } - - /* Free strxfrm'ed leafs */ - for ( i=0 ; i<svcntr; i++ ){ - pfree( sv[i] ); - } - - if ( pfr ) - { - pfree (pfr); - } - - if ( sv ) - { - pfree (sv); - } - - /* Truncate (=compress) key */ - - if ( tinfo->trnc ) - { - - int32 ll = gbt_var_node_cp_len ( (GBT_VARKEY *) DatumGetPointer(v->spl_ldatum) , tinfo ); - int32 lr = gbt_var_node_cp_len ( (GBT_VARKEY *) DatumGetPointer(v->spl_rdatum) , tinfo ); - GBT_VARKEY * dl ; - GBT_VARKEY * dr ; - - ll = Max (ll,lr); - ll++; - - dl = gbt_var_node_truncate ( (GBT_VARKEY *) DatumGetPointer(v->spl_ldatum) , ll, tinfo ) ; - dr = gbt_var_node_truncate ( (GBT_VARKEY *) DatumGetPointer(v->spl_rdatum) , ll, tinfo ) ; - pfree( DatumGetPointer(v->spl_ldatum) ); - pfree( DatumGetPointer(v->spl_rdatum) ); - v->spl_ldatum = PointerGetDatum ( dl ); - v->spl_rdatum = PointerGetDatum ( dr ); - - } + /* We do simply create two parts */ + + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) + { + if (i <= (maxoff - FirstOffsetNumber + 1) / 2) + { + gbt_var_bin_union(&v->spl_ldatum, arr[i].t, tinfo); + v->spl_left[v->spl_nleft] = arr[i].i; + v->spl_nleft++; + } + else + { + gbt_var_bin_union(&v->spl_rdatum, arr[i].t, tinfo); + v->spl_right[v->spl_nright] = arr[i].i; + v->spl_nright++; + } + } + + /* Free detoasted keys */ + for (i = 0; i < pfrcntr; i++) + pfree(pfr[i]); + + /* Free strxfrm'ed leafs */ + for (i = 0; i < svcntr; i++) + pfree(sv[i]); + + if (pfr) + pfree(pfr); + + if (sv) + pfree(sv); + + /* Truncate (=compress) key */ + + if (tinfo->trnc) + { + + int32 ll = gbt_var_node_cp_len((GBT_VARKEY *) DatumGetPointer(v->spl_ldatum), tinfo); + int32 lr = gbt_var_node_cp_len((GBT_VARKEY *) DatumGetPointer(v->spl_rdatum), tinfo); + GBT_VARKEY *dl; + GBT_VARKEY *dr; + + ll = Max(ll, lr); + ll++; + + dl = gbt_var_node_truncate((GBT_VARKEY *) DatumGetPointer(v->spl_ldatum), ll, tinfo); + dr = gbt_var_node_truncate((GBT_VARKEY *) DatumGetPointer(v->spl_rdatum), ll, tinfo); + pfree(DatumGetPointer(v->spl_ldatum)); + pfree(DatumGetPointer(v->spl_rdatum)); + v->spl_ldatum = PointerGetDatum(dl); + v->spl_rdatum = PointerGetDatum(dr); + + } pfree(arr); - return v; + return v; } /* * The GiST consistent method */ -extern bool -gbt_var_consistent( - GBT_VARKEY_R * key, - const void * query, - const StrategyNumber * strategy, - bool is_leaf, - const gbtree_vinfo * tinfo +extern bool +gbt_var_consistent( + GBT_VARKEY_R * key, + const void *query, + const StrategyNumber *strategy, + bool is_leaf, + const gbtree_vinfo * tinfo ) { - bool retval = FALSE; - - switch (*strategy) - { - case BTLessEqualStrategyNumber: - if ( is_leaf ) - retval = (*tinfo->f_ge)(query, (void*) key->lower); - else - retval = (*tinfo->f_cmp)((bytea*) query, key->lower) >= 0 - || gbt_var_node_pf_match( key ,query, tinfo ); - break; - case BTLessStrategyNumber: - if ( is_leaf ) - retval = (*tinfo->f_gt)(query, (void*) key->lower); - else - retval = (*tinfo->f_cmp)((bytea*)query, key->lower) >= 0 - || gbt_var_node_pf_match( key, query , tinfo ); - break; - case BTEqualStrategyNumber: - if ( is_leaf ) - retval = (*tinfo->f_eq)(query, (void*) key->lower); - else - retval = ( - ( - (*tinfo->f_cmp) (key->lower,(bytea*) query)<=0 && - (*tinfo->f_cmp) ((bytea*)query, (void*) key->upper)<=0 - ) || gbt_var_node_pf_match( key, query, tinfo ) - ); - break; - case BTGreaterStrategyNumber: - if ( is_leaf ) - retval = (*tinfo->f_lt)(query, (void*) key->upper); - else - retval = (*tinfo->f_cmp)((bytea*)query, key->upper)<=0 - || gbt_var_node_pf_match( key, query, tinfo ); - break; - case BTGreaterEqualStrategyNumber: - if ( is_leaf ) - retval = (*tinfo->f_le)(query, (void*) key->upper); - else - retval = (*tinfo->f_cmp)((bytea*) query, key->upper)<=0 - || gbt_var_node_pf_match( key, query, tinfo ); - break; - default: - retval = FALSE; - } - - return (retval); + bool retval = FALSE; + + switch (*strategy) + { + case BTLessEqualStrategyNumber: + if (is_leaf) + retval = (*tinfo->f_ge) (query, (void *) key->lower); + else + retval = (*tinfo->f_cmp) ((bytea *) query, key->lower) >= 0 + || gbt_var_node_pf_match(key, query, tinfo); + break; + case BTLessStrategyNumber: + if (is_leaf) + retval = (*tinfo->f_gt) (query, (void *) key->lower); + else + retval = (*tinfo->f_cmp) ((bytea *) query, key->lower) >= 0 + || gbt_var_node_pf_match(key, query, tinfo); + break; + case BTEqualStrategyNumber: + if (is_leaf) + retval = (*tinfo->f_eq) (query, (void *) key->lower); + else + retval = ( + ( + (*tinfo->f_cmp) (key->lower, (bytea *) query) <= 0 && + (*tinfo->f_cmp) ((bytea *) query, (void *) key->upper) <= 0 + ) || gbt_var_node_pf_match(key, query, tinfo) + ); + break; + case BTGreaterStrategyNumber: + if (is_leaf) + retval = (*tinfo->f_lt) (query, (void *) key->upper); + else + retval = (*tinfo->f_cmp) ((bytea *) query, key->upper) <= 0 + || gbt_var_node_pf_match(key, query, tinfo); + break; + case BTGreaterEqualStrategyNumber: + if (is_leaf) + retval = (*tinfo->f_le) (query, (void *) key->upper); + else + retval = (*tinfo->f_cmp) ((bytea *) query, key->upper) <= 0 + || gbt_var_node_pf_match(key, query, tinfo); + break; + default: + retval = FALSE; + } + + return (retval); } diff --git a/contrib/btree_gist/btree_utils_var.h b/contrib/btree_gist/btree_utils_var.h index 086891b158f..ecb3f3f3f13 100644 --- a/contrib/btree_gist/btree_utils_var.h +++ b/contrib/btree_gist/btree_utils_var.h @@ -5,15 +5,16 @@ typedef bytea GBT_VARKEY; /* Better readable key */ typedef struct { - bytea * lower, * upper; -} GBT_VARKEY_R; + bytea *lower, + *upper; +} GBT_VARKEY_R; /* used for key sorting */ typedef struct { - int i ; - GBT_VARKEY * t ; -} Vsrt ; + int i; + GBT_VARKEY *t; +} Vsrt; /* type description @@ -23,45 +24,45 @@ typedef struct typedef struct { - /* Attribs */ + /* Attribs */ - enum gbtree_type t ; /* data type */ - bool str ; /* true, if string ( else binary ) */ - bool trnc ; /* truncate (=compress) key */ + enum gbtree_type t; /* data type */ + bool str; /* true, if string ( else binary ) */ + bool trnc; /* truncate (=compress) key */ - /* Methods */ + /* Methods */ - bool (*f_gt) ( const void * , const void * ); /* greater then */ - bool (*f_ge) ( const void * , const void * ); /* greater equal */ - bool (*f_eq) ( const void * , const void * ); /* equal */ - bool (*f_le) ( const void * , const void * ); /* less equal */ - bool (*f_lt) ( const void * , const void * ); /* less then */ - int32 (*f_cmp) ( const bytea * , const bytea * ); /* node compare */ - GBT_VARKEY* (*f_l2n) ( GBT_VARKEY * ); /* convert leaf to node */ -} gbtree_vinfo; + bool (*f_gt) (const void *, const void *); /* greater then */ + bool (*f_ge) (const void *, const void *); /* greater equal */ + bool (*f_eq) (const void *, const void *); /* equal */ + bool (*f_le) (const void *, const void *); /* less equal */ + bool (*f_lt) (const void *, const void *); /* less then */ + int32 (*f_cmp) (const bytea *, const bytea *); /* node compare */ + GBT_VARKEY *(*f_l2n) (GBT_VARKEY *); /* convert leaf to node */ +} gbtree_vinfo; -extern GBT_VARKEY_R gbt_var_key_readable ( const GBT_VARKEY * k ); +extern GBT_VARKEY_R gbt_var_key_readable(const GBT_VARKEY * k); -extern GBT_VARKEY *gbt_var_key_copy ( const GBT_VARKEY_R * u, bool force_node ); +extern GBT_VARKEY *gbt_var_key_copy(const GBT_VARKEY_R * u, bool force_node); -extern GISTENTRY *gbt_var_compress ( GISTENTRY *entry , const gbtree_vinfo * tinfo ); +extern GISTENTRY *gbt_var_compress(GISTENTRY *entry, const gbtree_vinfo * tinfo); -extern GBT_VARKEY *gbt_var_union ( const GistEntryVector * entryvec , int32 * size , - const gbtree_vinfo * tinfo ); +extern GBT_VARKEY *gbt_var_union(const GistEntryVector *entryvec, int32 *size, + const gbtree_vinfo * tinfo); -extern bool gbt_var_same ( bool * result, const Datum d1 , const Datum d2 , - const gbtree_vinfo * tinfo ); +extern bool gbt_var_same(bool *result, const Datum d1, const Datum d2, + const gbtree_vinfo * tinfo); -extern float *gbt_var_penalty ( float * res , const GISTENTRY * o , const GISTENTRY * n, - const gbtree_vinfo * tinfo ); +extern float *gbt_var_penalty(float *res, const GISTENTRY *o, const GISTENTRY *n, + const gbtree_vinfo * tinfo); -extern bool gbt_var_consistent( GBT_VARKEY_R * key , const void * query, - const StrategyNumber * strategy , bool is_leaf, - const gbtree_vinfo * tinfo ); +extern bool gbt_var_consistent(GBT_VARKEY_R * key, const void *query, + const StrategyNumber *strategy, bool is_leaf, + const gbtree_vinfo * tinfo); -extern GIST_SPLITVEC *gbt_var_picksplit ( const GistEntryVector *entryvec, GIST_SPLITVEC *v, - const gbtree_vinfo * tinfo ); -extern void gbt_var_bin_union ( Datum * u , GBT_VARKEY * e , - const gbtree_vinfo * tinfo ); +extern GIST_SPLITVEC *gbt_var_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v, + const gbtree_vinfo * tinfo); +extern void gbt_var_bin_union(Datum *u, GBT_VARKEY * e, + const gbtree_vinfo * tinfo); diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c index b2cf5cf551f..66b7334543b 100644 --- a/contrib/cube/cube.c +++ b/contrib/cube/cube.c @@ -230,7 +230,7 @@ g_cube_union(GistEntryVector *entryvec, int *sizep) for (i = 1; i < entryvec->n; i++) { out = g_cube_binary_union(tmp, (NDBOX *) - DatumGetPointer(entryvec->vector[i].key), + DatumGetPointer(entryvec->vector[i].key), sizep); if (i > 1) pfree(tmp); diff --git a/contrib/dbase/dbf2pg.c b/contrib/dbase/dbf2pg.c index 1c01f7d1d66..9a1f89ea1f8 100644 --- a/contrib/dbase/dbf2pg.c +++ b/contrib/dbase/dbf2pg.c @@ -700,8 +700,8 @@ main(int argc, char **argv) if (verbose > 1) printf("Opening dbf-file\n"); - setlocale(LC_ALL, ""); /* fix for isprint() */ - + setlocale(LC_ALL, ""); /* fix for isprint() */ + if ((dbh = dbf_open(argv[0], O_RDONLY)) == (dbhead *) - 1) { fprintf(stderr, "Couldn't open xbase-file %s\n", argv[0]); diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index 217497bc996..17f36f1306d 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -571,8 +571,8 @@ dblink_fetch(PG_FUNCTION_ARGS) rsinfo->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("function returning record called in context " - "that cannot accept type record"))); + errmsg("function returning record called in context " + "that cannot accept type record"))); /* get the requested return tuple description */ tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); @@ -777,8 +777,8 @@ dblink_record(PG_FUNCTION_ARGS) rsinfo->expectedDesc == NULL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("function returning record called in context " - "that cannot accept type record"))); + errmsg("function returning record called in context " + "that cannot accept type record"))); /* get the requested return tuple description */ tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); diff --git a/contrib/dbmirror/pending.c b/contrib/dbmirror/pending.c index 6cb4155dbda..1102b3a0961 100644 --- a/contrib/dbmirror/pending.c +++ b/contrib/dbmirror/pending.c @@ -1,7 +1,7 @@ /**************************************************************************** * pending.c - * $Id: pending.c,v 1.18 2004/05/26 00:08:26 wieck Exp $ - * $PostgreSQL: pgsql/contrib/dbmirror/pending.c,v 1.18 2004/05/26 00:08:26 wieck Exp $ + * $Id: pending.c,v 1.19 2004/08/29 05:06:35 momjian Exp $ + * $PostgreSQL: pgsql/contrib/dbmirror/pending.c,v 1.19 2004/08/29 05:06:35 momjian Exp $ * * This file contains a trigger for Postgresql-7.x to record changes to tables * to a pending table for mirroring. @@ -43,17 +43,17 @@ enum FieldUsage }; int storePending(char *cpTableName, HeapTuple tBeforeTuple, - HeapTuple tAfterTuple, - TupleDesc tTupdesc, - Oid tableOid, - char cOp); + HeapTuple tAfterTuple, + TupleDesc tTupdesc, + Oid tableOid, + char cOp); int storeKeyInfo(char *cpTableName, HeapTuple tTupleData, TupleDesc tTuplDesc, - Oid tableOid); -int storeData(char *cpTableName, HeapTuple tTupleData, - TupleDesc tTupleDesc,Oid tableOid,int iIncludeKeyData); + Oid tableOid); +int storeData(char *cpTableName, HeapTuple tTupleData, + TupleDesc tTupleDesc, Oid tableOid, int iIncludeKeyData); int2vector *getPrimaryKey(Oid tblOid); @@ -74,10 +74,9 @@ PG_FUNCTION_INFO_V1(recordchange); #define debug_msg(x) elog(NOTICE,x) #define debug_msg3(x,y,z) elog(NOTICE,x,y,z) #else -#define debug_msg2(x,y) +#define debug_msg2(x,y) #define debug_msg(x) #define debug_msg3(x,y,z) - #endif @@ -85,8 +84,8 @@ PG_FUNCTION_INFO_V1(recordchange); extern Datum nextval(PG_FUNCTION_ARGS); extern Datum setval(PG_FUNCTION_ARGS); -int saveSequenceUpdate(const text * sequenceName, - int nextSequenceValue); +int saveSequenceUpdate(const text *sequenceName, + int nextSequenceValue); /***************************************************************************** @@ -107,15 +106,15 @@ recordchange(PG_FUNCTION_ARGS) char op = 0; char *schemaname; char *fullyqualtblname; - char *pkxpress=NULL; + char *pkxpress = NULL; if (fcinfo->context != NULL) { if (SPI_connect() < 0) { - ereport(ERROR,(errcode(ERRCODE_CONNECTION_FAILURE), - errmsg("dbmirror:recordchange could not connect to SPI"))); + ereport(ERROR, (errcode(ERRCODE_CONNECTION_FAILURE), + errmsg("dbmirror:recordchange could not connect to SPI"))); return -1; } trigdata = (TriggerData *) fcinfo->context; @@ -154,13 +153,13 @@ recordchange(PG_FUNCTION_ARGS) } else { - ereport(ERROR,(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), - errmsg("dbmirror:recordchange Unknown operation"))); - + ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), + errmsg("dbmirror:recordchange Unknown operation"))); + } - if (storePending(fullyqualtblname, beforeTuple, afterTuple, - tupdesc, retTuple->t_tableOid, op)) + if (storePending(fullyqualtblname, beforeTuple, afterTuple, + tupdesc, retTuple->t_tableOid, op)) { /* An error occoured. Skip the operation. */ ereport(ERROR, @@ -173,8 +172,8 @@ recordchange(PG_FUNCTION_ARGS) debug_msg("dbmirror:recordchange returning on success"); SPI_pfree(fullyqualtblname); - if(pkxpress != NULL) - SPI_pfree(pkxpress); + if (pkxpress != NULL) + SPI_pfree(pkxpress); SPI_finish(); return PointerGetDatum(retTuple); } @@ -196,20 +195,20 @@ int storePending(char *cpTableName, HeapTuple tBeforeTuple, HeapTuple tAfterTuple, TupleDesc tTupDesc, - Oid tableOid, + Oid tableOid, char cOp) { char *cpQueryBase = "INSERT INTO dbmirror_pending (TableName,Op,XID) VALUES ($1,$2,$3)"; int iResult = 0; HeapTuple tCurTuple; - char nulls[3]=" "; + char nulls[3] = " "; /* Points the current tuple(before or after) */ Datum saPlanData[3]; - Oid taPlanArgTypes[4] = {NAMEOID, - CHAROID, - INT4OID}; + Oid taPlanArgTypes[4] = {NAMEOID, + CHAROID, + INT4OID}; void *vpPlan; tCurTuple = tBeforeTuple ? tBeforeTuple : tAfterTuple; @@ -218,8 +217,8 @@ storePending(char *cpTableName, HeapTuple tBeforeTuple, vpPlan = SPI_prepare(cpQueryBase, 3, taPlanArgTypes); if (vpPlan == NULL) - ereport(ERROR,(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), - errmsg("dbmirror:storePending error creating plan"))); + ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION), + errmsg("dbmirror:storePending error creating plan"))); saPlanData[0] = PointerGetDatum(cpTableName); @@ -228,8 +227,8 @@ storePending(char *cpTableName, HeapTuple tBeforeTuple, iResult = SPI_execp(vpPlan, saPlanData, nulls, 1); if (iResult < 0) - elog(NOTICE, "storedPending fired (%s) returned %d", - cpQueryBase, iResult); + elog(NOTICE, "storedPending fired (%s) returned %d", + cpQueryBase, iResult); @@ -242,8 +241,8 @@ storePending(char *cpTableName, HeapTuple tBeforeTuple, * This is a record of a delete operation. * Just store the key data. */ - iResult = storeKeyInfo(cpTableName, - tBeforeTuple, tTupDesc, tableOid); + iResult = storeKeyInfo(cpTableName, + tBeforeTuple, tTupDesc, tableOid); } else if (cOp == 'i') { @@ -251,18 +250,18 @@ storePending(char *cpTableName, HeapTuple tBeforeTuple, * An Insert operation. * Store all data */ - iResult = storeData(cpTableName, tAfterTuple, - tTupDesc, tableOid,TRUE); + iResult = storeData(cpTableName, tAfterTuple, + tTupDesc, tableOid, TRUE); } else { /* op must be an update. */ - iResult = storeKeyInfo(cpTableName, tBeforeTuple, - tTupDesc, tableOid); - iResult = iResult ? iResult : - storeData(cpTableName, tAfterTuple, tTupDesc, - tableOid,TRUE); + iResult = storeKeyInfo(cpTableName, tBeforeTuple, + tTupDesc, tableOid); + iResult = iResult ? iResult : + storeData(cpTableName, tAfterTuple, tTupDesc, + tableOid, TRUE); } @@ -292,7 +291,7 @@ storeKeyInfo(char *cpTableName, HeapTuple tTupleData, } /* pplan = SPI_saveplan(pplan); */ - cpKeyData = packageData(tTupleData, tTupleDesc,tableOid, PRIMARY); + cpKeyData = packageData(tTupleData, tTupleDesc, tableOid, PRIMARY); if (cpKeyData == NULL) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -312,8 +311,8 @@ storeKeyInfo(char *cpTableName, HeapTuple tTupleData, if (iRetCode != SPI_OK_INSERT) { - ereport(ERROR,(errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION) - ,errmsg("error inserting row in pendingDelete"))); + ereport(ERROR, (errcode(ERRCODE_TRIGGERED_ACTION_EXCEPTION) + ,errmsg("error inserting row in pendingDelete"))); return -1; } @@ -360,8 +359,8 @@ getPrimaryKey(Oid tblOid) * Stores a copy of the non-key data for the row. *****************************************************************************/ int -storeData(char *cpTableName, HeapTuple tTupleData, - TupleDesc tTupleDesc,Oid tableOid, int iIncludeKeyData) +storeData(char *cpTableName, HeapTuple tTupleData, + TupleDesc tTupleDesc, Oid tableOid, int iIncludeKeyData) { Oid planArgTypes[1] = {NAMEOID}; @@ -380,10 +379,10 @@ storeData(char *cpTableName, HeapTuple tTupleData, /* pplan = SPI_saveplan(pplan); */ if (iIncludeKeyData == 0) - cpKeyData = packageData(tTupleData, tTupleDesc, - tableOid, NONPRIMARY); + cpKeyData = packageData(tTupleData, tTupleDesc, + tableOid, NONPRIMARY); else - cpKeyData = packageData(tTupleData, tTupleDesc,tableOid, ALL); + cpKeyData = packageData(tTupleData, tTupleDesc, tableOid, ALL); planData[0] = PointerGetDatum(cpKeyData); iRetValue = SPI_execp(pplan, planData, NULL, 1); @@ -439,10 +438,10 @@ packageData(HeapTuple tTupleData, TupleDesc tTupleDesc, Oid tableOid, } if (tpPKeys != NULL) - { - debug_msg("dbmirror:packageData have primary keys"); + { + debug_msg("dbmirror:packageData have primary keys"); - } + } cpDataBlock = SPI_palloc(BUFFER_SIZE); iDataBlockSize = BUFFER_SIZE; @@ -463,18 +462,18 @@ packageData(HeapTuple tTupleData, TupleDesc tTupleDesc, Oid tableOid, /* Determine if this is a primary key or not. */ iIsPrimaryKey = 0; for (iPrimaryKeyIndex = 0; - (*tpPKeys)[iPrimaryKeyIndex] != 0; + (*tpPKeys)[iPrimaryKeyIndex] != 0; iPrimaryKeyIndex++) { - if ((*tpPKeys)[iPrimaryKeyIndex] - == iColumnCounter) + if ((*tpPKeys)[iPrimaryKeyIndex] + == iColumnCounter) { iIsPrimaryKey = 1; break; } } - if (iIsPrimaryKey ? (eKeyUsage != PRIMARY) : - (eKeyUsage != NONPRIMARY)) + if (iIsPrimaryKey ? (eKeyUsage != PRIMARY) : + (eKeyUsage != NONPRIMARY)) { /** * Don't use. @@ -486,34 +485,34 @@ packageData(HeapTuple tTupleData, TupleDesc tTupleDesc, Oid tableOid, } } /* KeyUsage!=ALL */ - if(tTupleDesc->attrs[iColumnCounter-1]->attisdropped) - { - /** - * This column has been dropped. - * Do not mirror it. - */ - continue; - } + if (tTupleDesc->attrs[iColumnCounter - 1]->attisdropped) + { + /** + * This column has been dropped. + * Do not mirror it. + */ + continue; + } cpFieldName = DatumGetPointer(NameGetDatum - - (&tTupleDesc->attrs - [iColumnCounter - 1]->attname)); + + (&tTupleDesc->attrs + [iColumnCounter - 1]->attname)); debug_msg2("dbmirror:packageData field name: %s", cpFieldName); - while (iDataBlockSize - iUsedDataBlock < - strlen(cpFieldName) + 6) + while (iDataBlockSize - iUsedDataBlock < + strlen(cpFieldName) + 6) { - cpDataBlock = SPI_repalloc(cpDataBlock, - iDataBlockSize + - BUFFER_SIZE); + cpDataBlock = SPI_repalloc(cpDataBlock, + iDataBlockSize + + BUFFER_SIZE); iDataBlockSize = iDataBlockSize + BUFFER_SIZE; } sprintf(cpDataBlock + iUsedDataBlock, "\"%s\"=", cpFieldName); iUsedDataBlock = iUsedDataBlock + strlen(cpFieldName) + 3; - cpFieldData = SPI_getvalue(tTupleData, tTupleDesc, - iColumnCounter); + cpFieldData = SPI_getvalue(tTupleData, tTupleDesc, + iColumnCounter); cpUnFormatedPtr = cpFieldData; cpFormatedPtr = cpDataBlock + iUsedDataBlock; @@ -531,17 +530,17 @@ packageData(HeapTuple tTupleData, TupleDesc tTupleDesc, Oid tableOid, continue; } - debug_msg2("dbmirror:packageData field data: \"%s\"", - cpFieldData); + debug_msg2("dbmirror:packageData field data: \"%s\"", + cpFieldData); debug_msg("dbmirror:packageData starting format loop"); while (*cpUnFormatedPtr != 0) { while (iDataBlockSize - iUsedDataBlock < 2) { - cpDataBlock = SPI_repalloc(cpDataBlock, - iDataBlockSize - + BUFFER_SIZE); + cpDataBlock = SPI_repalloc(cpDataBlock, + iDataBlockSize + + BUFFER_SIZE); iDataBlockSize = iDataBlockSize + BUFFER_SIZE; cpFormatedPtr = cpDataBlock + iUsedDataBlock; } @@ -561,25 +560,25 @@ packageData(HeapTuple tTupleData, TupleDesc tTupleDesc, Oid tableOid, while (iDataBlockSize - iUsedDataBlock < 3) { - cpDataBlock = SPI_repalloc(cpDataBlock, - iDataBlockSize + - BUFFER_SIZE); + cpDataBlock = SPI_repalloc(cpDataBlock, + iDataBlockSize + + BUFFER_SIZE); iDataBlockSize = iDataBlockSize + BUFFER_SIZE; cpFormatedPtr = cpDataBlock + iUsedDataBlock; } sprintf(cpFormatedPtr, "' "); iUsedDataBlock = iUsedDataBlock + 2; - debug_msg2("dbmirror:packageData data block: \"%s\"", - cpDataBlock); + debug_msg2("dbmirror:packageData data block: \"%s\"", + cpDataBlock); } /* for iColumnCounter */ if (tpPKeys != NULL) SPI_pfree(tpPKeys); - debug_msg3("dbmirror:packageData returning DataBlockSize:%d iUsedDataBlock:%d", - iDataBlockSize, - iUsedDataBlock); + debug_msg3("dbmirror:packageData returning DataBlockSize:%d iUsedDataBlock:%d", + iDataBlockSize, + iUsedDataBlock); memset(cpDataBlock + iUsedDataBlock, 0, iDataBlockSize - iUsedDataBlock); @@ -590,54 +589,55 @@ packageData(HeapTuple tTupleData, TupleDesc tTupleDesc, Oid tableOid, PG_FUNCTION_INFO_V1(setval); -Datum setval(PG_FUNCTION_ARGS) +Datum +setval(PG_FUNCTION_ARGS) { - text * sequenceName; - - Oid setvalArgTypes[2] = {TEXTOID,INT4OID}; - int nextValue; - void * setvalPlan=NULL; - Datum setvalData[2]; - const char * setvalQuery = "SELECT setval_pg($1,$2)"; - int ret; - - sequenceName = PG_GETARG_TEXT_P(0); - nextValue = PG_GETARG_INT32(1); - - setvalData[0] = PointerGetDatum(sequenceName); - setvalData[1] = Int32GetDatum(nextValue); - - if (SPI_connect() < 0) - { - ereport(ERROR,(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), - errmsg("dbmirror:setval could not connect to SPI"))); - return -1; - } - - setvalPlan = SPI_prepare(setvalQuery,2,setvalArgTypes); - if(setvalPlan == NULL) - { - ereport(ERROR,(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), - errmsg("dbmirror:setval could not prepare plan"))); - return -1; - } - - ret = SPI_execp(setvalPlan,setvalData,NULL,1); - - if(ret != SPI_OK_SELECT || SPI_processed != 1) - return -1; - - debug_msg2("dbmirror:setval: setval_pg returned ok:%d",nextValue); - - ret = saveSequenceUpdate(sequenceName,nextValue); - - SPI_pfree(setvalPlan); - - SPI_finish(); - debug_msg("dbmirror:setval about to return"); - return Int64GetDatum(nextValue); + text *sequenceName; + + Oid setvalArgTypes[2] = {TEXTOID, INT4OID}; + int nextValue; + void *setvalPlan = NULL; + Datum setvalData[2]; + const char *setvalQuery = "SELECT setval_pg($1,$2)"; + int ret; + + sequenceName = PG_GETARG_TEXT_P(0); + nextValue = PG_GETARG_INT32(1); + + setvalData[0] = PointerGetDatum(sequenceName); + setvalData[1] = Int32GetDatum(nextValue); + + if (SPI_connect() < 0) + { + ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("dbmirror:setval could not connect to SPI"))); + return -1; + } + + setvalPlan = SPI_prepare(setvalQuery, 2, setvalArgTypes); + if (setvalPlan == NULL) + { + ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("dbmirror:setval could not prepare plan"))); + return -1; + } + + ret = SPI_execp(setvalPlan, setvalData, NULL, 1); + + if (ret != SPI_OK_SELECT || SPI_processed != 1) + return -1; + + debug_msg2("dbmirror:setval: setval_pg returned ok:%d", nextValue); + + ret = saveSequenceUpdate(sequenceName, nextValue); + + SPI_pfree(setvalPlan); + + SPI_finish(); + debug_msg("dbmirror:setval about to return"); + return Int64GetDatum(nextValue); } @@ -645,134 +645,131 @@ Datum setval(PG_FUNCTION_ARGS) PG_FUNCTION_INFO_V1(nextval); -Datum +Datum nextval(PG_FUNCTION_ARGS) { - text * sequenceName; - - const char * nextvalQuery = "SELECT nextval_pg($1)"; - Oid nextvalArgTypes[1] = {TEXTOID}; - void * nextvalPlan=NULL; - Datum nextvalData[1]; - - - int ret; - HeapTuple resTuple; - char isNull; - int nextSequenceValue; - + text *sequenceName; + const char *nextvalQuery = "SELECT nextval_pg($1)"; + Oid nextvalArgTypes[1] = {TEXTOID}; + void *nextvalPlan = NULL; + Datum nextvalData[1]; - debug_msg("dbmirror:nextval Starting pending.so:nextval"); + int ret; + HeapTuple resTuple; + char isNull; + int nextSequenceValue; - sequenceName = PG_GETARG_TEXT_P(0); - if (SPI_connect() < 0) - { - ereport(ERROR,(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), - errmsg("dbmirror:nextval could not connect to SPI"))); - return -1; - } - - nextvalPlan = SPI_prepare(nextvalQuery,1,nextvalArgTypes); - - debug_msg("prepared plan to call nextval_pg"); + debug_msg("dbmirror:nextval Starting pending.so:nextval"); - if(nextvalPlan==NULL) - { - ereport(ERROR,(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), - errmsg("dbmirror:nextval error creating plan"))); - return -1; - } - nextvalData[0] = PointerGetDatum(sequenceName); + sequenceName = PG_GETARG_TEXT_P(0); - ret = SPI_execp(nextvalPlan,nextvalData,NULL,1); + if (SPI_connect() < 0) + { + ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("dbmirror:nextval could not connect to SPI"))); + return -1; + } - debug_msg("dbmirror:Executed call to nextval_pg"); + nextvalPlan = SPI_prepare(nextvalQuery, 1, nextvalArgTypes); - if(ret != SPI_OK_SELECT || SPI_processed != 1) - return -1; + debug_msg("prepared plan to call nextval_pg"); - resTuple = SPI_tuptable->vals[0]; - debug_msg("dbmirror:nextval Set resTuple"); + if (nextvalPlan == NULL) + { + ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("dbmirror:nextval error creating plan"))); + return -1; + } + nextvalData[0] = PointerGetDatum(sequenceName); - nextSequenceValue =* (unsigned int *)(DatumGetPointer(SPI_getbinval(resTuple, - SPI_tuptable->tupdesc, - 1,&isNull))); + ret = SPI_execp(nextvalPlan, nextvalData, NULL, 1); - + debug_msg("dbmirror:Executed call to nextval_pg"); - debug_msg2("dbmirror:nextval Set SPI_getbinval:%d",nextSequenceValue); - - saveSequenceUpdate(sequenceName,nextSequenceValue); - SPI_pfree(resTuple); - SPI_pfree(nextvalPlan); + if (ret != SPI_OK_SELECT || SPI_processed != 1) + return -1; + + resTuple = SPI_tuptable->vals[0]; + + debug_msg("dbmirror:nextval Set resTuple"); + + nextSequenceValue = *(unsigned int *) (DatumGetPointer(SPI_getbinval(resTuple, + SPI_tuptable->tupdesc, + 1, &isNull))); + + + + debug_msg2("dbmirror:nextval Set SPI_getbinval:%d", nextSequenceValue); - SPI_finish(); - return Int64GetDatum(nextSequenceValue); + saveSequenceUpdate(sequenceName, nextSequenceValue); + SPI_pfree(resTuple); + SPI_pfree(nextvalPlan); + + SPI_finish(); + + return Int64GetDatum(nextSequenceValue); } int -saveSequenceUpdate(const text * sequenceName, - int nextSequenceVal) +saveSequenceUpdate(const text *sequenceName, + int nextSequenceVal) { - Oid insertArgTypes[2] = {TEXTOID,INT4OID}; - Oid insertDataArgTypes[1] = {NAMEOID}; - void * insertPlan=NULL; - void * insertDataPlan=NULL; - Datum insertDatum[2]; - Datum insertDataDatum[1]; - char nextSequenceText[32]; + Oid insertArgTypes[2] = {TEXTOID, INT4OID}; + Oid insertDataArgTypes[1] = {NAMEOID}; + void *insertPlan = NULL; + void *insertDataPlan = NULL; + Datum insertDatum[2]; + Datum insertDataDatum[1]; + char nextSequenceText[32]; + + const char *insertQuery = + "INSERT INTO dbmirror_Pending (TableName,Op,XID) VALUES" \ + "($1,'s',$2)"; + const char *insertDataQuery = + "INSERT INTO dbmirror_PendingData(SeqId,IsKey,Data) VALUES " \ + "(currval('dbmirror_pending_seqid_seq'),'t',$1)"; + + int ret; - const char * insertQuery = - "INSERT INTO dbmirror_Pending (TableName,Op,XID) VALUES" \ - "($1,'s',$2)"; - const char * insertDataQuery = - "INSERT INTO dbmirror_PendingData(SeqId,IsKey,Data) VALUES " \ - "(currval('dbmirror_pending_seqid_seq'),'t',$1)"; - - int ret; + insertPlan = SPI_prepare(insertQuery, 2, insertArgTypes); + insertDataPlan = SPI_prepare(insertDataQuery, 1, insertDataArgTypes); - insertPlan = SPI_prepare(insertQuery,2,insertArgTypes); - insertDataPlan = SPI_prepare(insertDataQuery,1,insertDataArgTypes); + debug_msg("Prepared insert query"); - debug_msg("Prepared insert query"); + if (insertPlan == NULL || insertDataPlan == NULL) + ereport(ERROR, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg("dbmirror:nextval error creating plan"))); - if(insertPlan == NULL || insertDataPlan == NULL) - { - ereport(ERROR,(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),errmsg("dbmirror:nextval error creating plan"))); - } + insertDatum[1] = Int32GetDatum(GetCurrentTransactionId()); + insertDatum[0] = PointerGetDatum(sequenceName); - insertDatum[1] = Int32GetDatum(GetCurrentTransactionId()); - insertDatum[0] = PointerGetDatum(sequenceName); + sprintf(nextSequenceText, "%d", nextSequenceVal); + insertDataDatum[0] = PointerGetDatum(nextSequenceText); + debug_msg2("dbmirror:savesequenceupdate: Setting value %s", + nextSequenceText); - sprintf(nextSequenceText,"%d",nextSequenceVal); - insertDataDatum[0] = PointerGetDatum(nextSequenceText); - debug_msg2("dbmirror:savesequenceupdate: Setting value %s", - nextSequenceText); + debug_msg("dbmirror:About to execute insert query"); - debug_msg("dbmirror:About to execute insert query"); + ret = SPI_execp(insertPlan, insertDatum, NULL, 1); - ret = SPI_execp(insertPlan,insertDatum,NULL,1); - - ret = SPI_execp(insertDataPlan,insertDataDatum,NULL,1); + ret = SPI_execp(insertDataPlan, insertDataDatum, NULL, 1); - debug_msg("dbmirror:Insert query finished"); - SPI_pfree(insertPlan); - SPI_pfree(insertDataPlan); - - return ret; + debug_msg("dbmirror:Insert query finished"); + SPI_pfree(insertPlan); + SPI_pfree(insertDataPlan); -} + return ret; +} diff --git a/contrib/dbsize/dbsize.c b/contrib/dbsize/dbsize.c index b425a3979b4..9cf2c0f9369 100644 --- a/contrib/dbsize/dbsize.c +++ b/contrib/dbsize/dbsize.c @@ -16,7 +16,7 @@ static int64 -get_tablespace_size(Oid dbid, Oid spcid, bool baddirOK); + get_tablespace_size(Oid dbid, Oid spcid, bool baddirOK); static char * psnprintf(size_t len, const char *fmt,...) @@ -50,10 +50,11 @@ database_size(PG_FUNCTION_ARGS) Oid dbid; int64 totalsize; + #ifdef SYMLINK - Relation dbrel; - HeapScanDesc scan; - HeapTuple tuple; + Relation dbrel; + HeapScanDesc scan; + HeapTuple tuple; #endif dbid = get_database_oid(NameStr(*dbname)); @@ -62,17 +63,18 @@ database_size(PG_FUNCTION_ARGS) (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", NameStr(*dbname)))); -#ifdef SYMLINK +#ifdef SYMLINK - dbrel = heap_openr(TableSpaceRelationName, AccessShareLock); + dbrel = heap_openr(TableSpaceRelationName, AccessShareLock); scan = heap_beginscan(dbrel, SnapshotNow, 0, (ScanKey) NULL); totalsize = 0; - while((tuple = heap_getnext(scan, ForwardScanDirection))) + while ((tuple = heap_getnext(scan, ForwardScanDirection))) { - Oid spcid = HeapTupleGetOid(tuple); - if(spcid != GLOBALTABLESPACE_OID) + Oid spcid = HeapTupleGetOid(tuple); + + if (spcid != GLOBALTABLESPACE_OID) totalsize += get_tablespace_size(dbid, spcid, true); } heap_endscan(scan); @@ -94,8 +96,8 @@ database_size(PG_FUNCTION_ARGS) static int64 get_tablespace_size(Oid dbid, Oid spcid, bool baddirOK) { - char *dbpath; - DIR *dirdesc; + char *dbpath; + DIR *dirdesc; struct dirent *direntry; int64 totalsize; @@ -104,11 +106,11 @@ get_tablespace_size(Oid dbid, Oid spcid, bool baddirOK) dirdesc = AllocateDir(dbpath); if (!dirdesc) { - if(baddirOK) + if (baddirOK) return 0; else ereport(ERROR, - (errcode_for_file_access(), + (errcode_for_file_access(), errmsg("could not open directory \"%s\": %m", dbpath))); } totalsize = 0; diff --git a/contrib/fuzzystrmatch/dmetaphone.c b/contrib/fuzzystrmatch/dmetaphone.c index 49cd284e755..331b71df729 100644 --- a/contrib/fuzzystrmatch/dmetaphone.c +++ b/contrib/fuzzystrmatch/dmetaphone.c @@ -1,36 +1,36 @@ /* * This is a port of the Double Metaphone algorithm for use in PostgreSQL. - * + * * Double Metaphone computes 2 "sounds like" strings - a primary and an * alternate. In most cases they are the same, but for foreign names * especially they can be a bit different, depending on pronunciation. * * Information on using Double Metaphone can be found at - * http://www.codeproject.com/useritems/dmetaphone1.asp + * http://www.codeproject.com/useritems/dmetaphone1.asp * and the original article describing it can be found at - * http://www.cuj.com/documents/s=8038/cuj0006philips/ + * http://www.cuj.com/documents/s=8038/cuj0006philips/ * * For PostgrSQL we provide 2 functions - one for the primary and one for * the alternate. That way the functions are pure text->text mappings that * are useful in functional indexes. These are 'dmetaphone' for the * primary and 'dmetaphone_alt' for the alternate. * - * Assuming that dmetaphone.so is in $libdir, the SQL to set up the + * Assuming that dmetaphone.so is in $libdir, the SQL to set up the * functions looks like this: * - * CREATE FUNCTION dmetaphone (text) RETURNS text - * LANGUAGE C IMMUTABLE STRICT - * AS '$libdir/dmetaphone', 'dmetaphone'; + * CREATE FUNCTION dmetaphone (text) RETURNS text + * LANGUAGE C IMMUTABLE STRICT + * AS '$libdir/dmetaphone', 'dmetaphone'; * - * CREATE FUNCTION dmetaphone_alt (text) RETURNS text - * LANGUAGE C IMMUTABLE STRICT - * AS '$libdir/dmetaphone', 'dmetaphone_alt'; + * CREATE FUNCTION dmetaphone_alt (text) RETURNS text + * LANGUAGE C IMMUTABLE STRICT + * AS '$libdir/dmetaphone', 'dmetaphone_alt'; * * Note that you have to declare the functions IMMUTABLE if you want to * use them in functional indexes, and you have to declare them as STRICT - * as they do not check for NULL input, and will segfault if given NULL input. - * (See below for alternative ) Declaring them as STRICT means PostgreSQL - * will never call them with NULL, but instead assume the result is NULL, + * as they do not check for NULL input, and will segfault if given NULL input. + * (See below for alternative ) Declaring them as STRICT means PostgreSQL + * will never call them with NULL, but instead assume the result is NULL, * which is what we (I) want. * * Alternatively, compile with -DDMETAPHONE_NOSTRICT and the functions @@ -42,25 +42,25 @@ * need. That's the way the perl module was written, because perl can handle * a list return more easily than we can in PostgreSQL. The result has been * fast enough for my needs, but it could maybe be optimized a bit to remove - * that behaviour. + * that behaviour. * */ /* - * $Revision: 1.2 $ - * $Id: dmetaphone.c,v 1.2 2004/08/20 19:48:14 momjian Exp $ + * $Revision: 1.3 $ + * $Id: dmetaphone.c,v 1.3 2004/08/29 05:06:35 momjian Exp $ */ /***************************** COPYRIGHT NOTICES *********************** Most of this code is directly from the Text::DoubleMetaphone perl module -version 0.05 available from http://www.cpan.org. +version 0.05 available from http://www.cpan.org. It bears this copyright notice: - Copyright 2000, Maurice Aubrey <maurice@hevanet.com>. + Copyright 2000, Maurice Aubrey <maurice@hevanet.com>. All rights reserved. This code is based heavily on the C++ implementation by @@ -73,7 +73,7 @@ It bears this copyright notice: The remaining code is authored by Andrew Dunstan <amdunstan@ncshp.org> and <andrew@dunslane.net> and is covered this copyright: - Copyright 2003, North Carolina State Highway Patrol. + Copyright 2003, North Carolina State Highway Patrol. All rights reserved. Permission to use, copy, modify, and distribute this software and its @@ -81,14 +81,14 @@ The remaining code is authored by Andrew Dunstan <amdunstan@ncshp.org> and is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies. - IN NO EVENT SHALL THE NORTH CAROLINA STATE HIGHWAY PATROL BE LIABLE TO ANY - PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, + IN NO EVENT SHALL THE NORTH CAROLINA STATE HIGHWAY PATROL BE LIABLE TO ANY + PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS - DOCUMENTATION, EVEN IF THE NORTH CAROLINA STATE HIGHWAY PATROL HAS BEEN + DOCUMENTATION, EVEN IF THE NORTH CAROLINA STATE HIGHWAY PATROL HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - THE NORTH CAROLINA STATE HIGHWAY PATROL SPECIFICALLY DISCLAIMS ANY - WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + THE NORTH CAROLINA STATE HIGHWAY PATROL SPECIFICALLY DISCLAIMS ANY + WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE NORTH CAROLINA STATE HIGHWAY PATROL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR @@ -109,7 +109,6 @@ The remaining code is authored by Andrew Dunstan <amdunstan@ncshp.org> and /* turn off assertions for embedded function */ #define NDEBUG - #endif #include <stdio.h> @@ -124,7 +123,7 @@ extern Datum dmetaphone_alt(PG_FUNCTION_ARGS); /* prototype for the main function we got from the perl module */ static void -DoubleMetaphone(char *, char **); + DoubleMetaphone(char *, char **); #ifndef DMETAPHONE_MAIN @@ -138,37 +137,41 @@ PG_FUNCTION_INFO_V1(dmetaphone); Datum dmetaphone(PG_FUNCTION_ARGS) { - text * arg, * result; - int alen, rsize; - char * aptr, *codes[2], * code, * rptr; + text *arg, + *result; + int alen, + rsize; + char *aptr, + *codes[2], + *code, + *rptr; #ifdef DMETAPHONE_NOSTRICT if (PG_ARGISNULL(0)) PG_RETURNNULL(); #endif arg = PG_GETARG_TEXT_P(0); - alen = VARSIZE(arg)-VARHDRSZ; - - /* - * Postgres' string values might not have trailing nuls. - * The VARSIZE will not include the nul in any case - * so we copy things out and add a trailing nul. - * When we copy back we ignore the nul - * (and we don't make space for it). + alen = VARSIZE(arg) - VARHDRSZ; + + /* + * Postgres' string values might not have trailing nuls. The VARSIZE + * will not include the nul in any case so we copy things out and add + * a trailing nul. When we copy back we ignore the nul (and we don't + * make space for it). */ - aptr = palloc(alen+1); - memcpy(aptr,VARDATA(arg),alen); - aptr[alen]=0; - DoubleMetaphone(aptr,codes); + aptr = palloc(alen + 1); + memcpy(aptr, VARDATA(arg), alen); + aptr[alen] = 0; + DoubleMetaphone(aptr, codes); code = codes[0]; if (!code) code = ""; - rsize = VARHDRSZ + strlen(code) ; + rsize = VARHDRSZ + strlen(code); result = (text *) palloc(rsize); - memset(result,0,rsize); + memset(result, 0, rsize); rptr = VARDATA(result); - memcpy(rptr,code,strlen(code)); + memcpy(rptr, code, strlen(code)); VARATT_SIZEP(result) = rsize; PG_RETURN_TEXT_P(result); } @@ -183,28 +186,33 @@ PG_FUNCTION_INFO_V1(dmetaphone_alt); Datum dmetaphone_alt(PG_FUNCTION_ARGS) { - text * arg, * result; - int alen, rsize; - char * aptr, * codes[2], * code, * rptr; + text *arg, + *result; + int alen, + rsize; + char *aptr, + *codes[2], + *code, + *rptr; #ifdef DMETAPHONE_NOSTRICT if (PG_ARGISNULL(0)) PG_RETURNNULL(); #endif arg = PG_GETARG_TEXT_P(0); - alen = VARSIZE(arg)-VARHDRSZ; - aptr = palloc(alen+1); - memcpy(aptr,VARDATA(arg),alen); - aptr[alen]=0; - DoubleMetaphone(aptr,codes); + alen = VARSIZE(arg) - VARHDRSZ; + aptr = palloc(alen + 1); + memcpy(aptr, VARDATA(arg), alen); + aptr[alen] = 0; + DoubleMetaphone(aptr, codes); code = codes[1]; if (!code) code = ""; - rsize = VARHDRSZ + strlen(code) ; + rsize = VARHDRSZ + strlen(code); result = (text *) palloc(rsize); - memset(result,0,rsize); + memset(result, 0, rsize); rptr = VARDATA(result); - memcpy(rptr,code,strlen(code)); + memcpy(rptr, code, strlen(code)); VARATT_SIZEP(result) = rsize; PG_RETURN_TEXT_P(result); } @@ -215,141 +223,139 @@ dmetaphone_alt(PG_FUNCTION_ARGS) /* all memory handling is done with these macros */ #define META_MALLOC(v,n,t) \ - (v = (t*)palloc(((n)*sizeof(t)))) + (v = (t*)palloc(((n)*sizeof(t)))) #define META_REALLOC(v,n,t) \ - (v = (t*)repalloc((v),((n)*sizeof(t)))) + (v = (t*)repalloc((v),((n)*sizeof(t)))) -/* +/* * Don't do pfree - it seems to cause a segv sometimes - which might have just * been caused by reloading the module in development. * So we rely on context cleanup - Tom Lane says pfree shouldn't be necessary * in a case like this. */ -#define META_FREE(x) /* pfree((x)) */ +#define META_FREE(x) /* pfree((x)) */ -#else /* not defined DMETAPHONE_MAIN */ +#else /* not defined DMETAPHONE_MAIN */ /* use the standard malloc library when not running in PostgreSQL */ #define META_MALLOC(v,n,t) \ - (v = (t*)malloc(((n)*sizeof(t)))) + (v = (t*)malloc(((n)*sizeof(t)))) #define META_REALLOC(v,n,t) \ - (v = (t*)realloc((v),((n)*sizeof(t)))) + (v = (t*)realloc((v),((n)*sizeof(t)))) #define META_FREE(x) free((x)) - -#endif /* defined DMETAPHONE_MAIN */ +#endif /* defined DMETAPHONE_MAIN */ -/* this typedef was orignally in the perl module's .h file */ +/* this typedef was orignally in the perl module's .h file */ typedef struct { - char *str; - int length; - int bufsize; - int free_string_on_destroy; + char *str; + int length; + int bufsize; + int free_string_on_destroy; } -metastring; -/* + metastring; + +/* * remaining perl module funcs unchanged except for declaring them static * and reformatting to PostgreSQL indentation and to fit in 80 cols. * - */ + */ static metastring * NewMetaString(char *init_str) { - metastring *s; - char empty_string[] = ""; + metastring *s; + char empty_string[] = ""; - META_MALLOC(s, 1, metastring); - assert( s != NULL ); + META_MALLOC(s, 1, metastring); + assert(s != NULL); - if (init_str == NULL) + if (init_str == NULL) init_str = empty_string; - s->length = strlen(init_str); - /* preallocate a bit more for potential growth */ - s->bufsize = s->length + 7; + s->length = strlen(init_str); + /* preallocate a bit more for potential growth */ + s->bufsize = s->length + 7; + + META_MALLOC(s->str, s->bufsize, char); + assert(s->str != NULL); - META_MALLOC(s->str, s->bufsize, char); - assert( s->str != NULL ); - - strncpy(s->str, init_str, s->length + 1); - s->free_string_on_destroy = 1; + strncpy(s->str, init_str, s->length + 1); + s->free_string_on_destroy = 1; - return s; + return s; } static void DestroyMetaString(metastring * s) { - if (s == NULL) + if (s == NULL) return; - if (s->free_string_on_destroy && (s->str != NULL)) + if (s->free_string_on_destroy && (s->str != NULL)) META_FREE(s->str); - META_FREE(s); + META_FREE(s); } static void IncreaseBuffer(metastring * s, int chars_needed) { - META_REALLOC(s->str, (s->bufsize + chars_needed + 10), char); - assert( s->str != NULL ); - s->bufsize = s->bufsize + chars_needed + 10; + META_REALLOC(s->str, (s->bufsize + chars_needed + 10), char); + assert(s->str != NULL); + s->bufsize = s->bufsize + chars_needed + 10; } static void MakeUpper(metastring * s) { - char *i; + char *i; - for (i = s->str; *i; i++) - { + for (i = s->str; *i; i++) *i = toupper(*i); - } } static int IsVowel(metastring * s, int pos) { - char c; + char c; - if ((pos < 0) || (pos >= s->length)) + if ((pos < 0) || (pos >= s->length)) return 0; - c = *(s->str + pos); - if ((c == 'A') || (c == 'E') || (c == 'I') || (c =='O') || - (c =='U') || (c == 'Y')) + c = *(s->str + pos); + if ((c == 'A') || (c == 'E') || (c == 'I') || (c == 'O') || + (c == 'U') || (c == 'Y')) return 1; - return 0; + return 0; } static int SlavoGermanic(metastring * s) { - if ((char *) strstr(s->str, "W")) + if ((char *) strstr(s->str, "W")) return 1; - else if ((char *) strstr(s->str, "K")) + else if ((char *) strstr(s->str, "K")) return 1; - else if ((char *) strstr(s->str, "CZ")) + else if ((char *) strstr(s->str, "CZ")) return 1; - else if ((char *) strstr(s->str, "WITZ")) + else if ((char *) strstr(s->str, "WITZ")) return 1; - else + else return 0; } @@ -357,117 +363,115 @@ SlavoGermanic(metastring * s) static char GetAt(metastring * s, int pos) { - if ((pos < 0) || (pos >= s->length)) + if ((pos < 0) || (pos >= s->length)) return '\0'; - return ((char) *(s->str + pos)); + return ((char) *(s->str + pos)); } static void SetAt(metastring * s, int pos, char c) { - if ((pos < 0) || (pos >= s->length)) + if ((pos < 0) || (pos >= s->length)) return; - *(s->str + pos) = c; + *(s->str + pos) = c; } -/* +/* Caveats: the START value is 0 based */ static int -StringAt(metastring * s, int start, int length, ...) +StringAt(metastring * s, int start, int length,...) { - char *test; - char *pos; - va_list ap; + char *test; + char *pos; + va_list ap; - if ((start < 0) || (start >= s->length)) - return 0; + if ((start < 0) || (start >= s->length)) + return 0; - pos = (s->str + start); - va_start(ap, length); + pos = (s->str + start); + va_start(ap, length); - do + do { test = va_arg(ap, char *); if (*test && (strncmp(pos, test, length) == 0)) return 1; } - while (strcmp(test, "")); + while (strcmp(test, "")); - va_end(ap); + va_end(ap); - return 0; + return 0; } static void MetaphAdd(metastring * s, char *new_str) { - int add_length; + int add_length; - if (new_str == NULL) + if (new_str == NULL) return; - add_length = strlen(new_str); - if ((s->length + add_length) > (s->bufsize - 1)) - { + add_length = strlen(new_str); + if ((s->length + add_length) > (s->bufsize - 1)) IncreaseBuffer(s, add_length); - } - strcat(s->str, new_str); - s->length += add_length; + strcat(s->str, new_str); + s->length += add_length; } static void DoubleMetaphone(char *str, char **codes) { - int length; - metastring *original; - metastring *primary; - metastring *secondary; - int current; - int last; - - current = 0; - /* we need the real length and last prior to padding */ - length = strlen(str); - last = length - 1; - original = NewMetaString(str); - /* Pad original so we can index beyond end */ - MetaphAdd(original, " "); - - primary = NewMetaString(""); - secondary = NewMetaString(""); - primary->free_string_on_destroy = 0; - secondary->free_string_on_destroy = 0; - - MakeUpper(original); - - /* skip these when at start of word */ - if (StringAt(original, 0, 2, "GN", "KN", "PN", "WR", "PS", "")) + int length; + metastring *original; + metastring *primary; + metastring *secondary; + int current; + int last; + + current = 0; + /* we need the real length and last prior to padding */ + length = strlen(str); + last = length - 1; + original = NewMetaString(str); + /* Pad original so we can index beyond end */ + MetaphAdd(original, " "); + + primary = NewMetaString(""); + secondary = NewMetaString(""); + primary->free_string_on_destroy = 0; + secondary->free_string_on_destroy = 0; + + MakeUpper(original); + + /* skip these when at start of word */ + if (StringAt(original, 0, 2, "GN", "KN", "PN", "WR", "PS", "")) current += 1; - /* Initial 'X' is pronounced 'Z' e.g. 'Xavier' */ - if (GetAt(original, 0) == 'X') + /* Initial 'X' is pronounced 'Z' e.g. 'Xavier' */ + if (GetAt(original, 0) == 'X') { MetaphAdd(primary, "S"); /* 'Z' maps to 'S' */ MetaphAdd(secondary, "S"); current += 1; } - /* main loop */ - while ((primary->length < 4) || (secondary->length < 4)) + /* main loop */ + while ((primary->length < 4) || (secondary->length < 4)) { if (current >= length) break; switch (GetAt(original, current)) - { + { case 'A': case 'E': case 'I': @@ -508,8 +512,8 @@ DoubleMetaphone(char *str, char **codes) && StringAt(original, (current - 1), 3, "ACH", "") && ((GetAt(original, current + 2) != 'I') && ((GetAt(original, current + 2) != 'E') - || StringAt(original, (current - 2), 6, "BACHER", - "MACHER", "")))) + || StringAt(original, (current - 2), 6, "BACHER", + "MACHER", "")))) { MetaphAdd(primary, "K"); MetaphAdd(secondary, "K"); @@ -550,7 +554,7 @@ DoubleMetaphone(char *str, char **codes) /* greek roots e.g. 'chemistry', 'chorus' */ if ((current == 0) - && (StringAt(original, (current + 1), 5, + && (StringAt(original, (current + 1), 5, "HARAC", "HARIS", "") || StringAt(original, (current + 1), 3, "HOR", "HYM", "HIA", "HEM", "")) @@ -566,19 +570,21 @@ DoubleMetaphone(char *str, char **codes) if ( (StringAt(original, 0, 4, "VAN ", "VON ", "") || StringAt(original, 0, 3, "SCH", "")) - /* 'architect but not 'arch', 'orchestra', 'orchid' */ + /* 'architect but not 'arch', 'orchestra', 'orchid' */ || StringAt(original, (current - 2), 6, "ORCHES", "ARCHIT", "ORCHID", "") || StringAt(original, (current + 2), 1, "T", "S", "") - || ((StringAt(original, (current - 1), 1, - "A", "O", "U", "E", "") + || ((StringAt(original, (current - 1), 1, + "A", "O", "U", "E", "") || (current == 0)) - /* e.g., 'wachtler', 'wechsler', - but not 'tichner' */ - && StringAt(original, (current + 2), 1, "L", "R", - "N", "M", "B", "H", "F", "V", "W", - " ", ""))) + + /* + * e.g., 'wachtler', 'wechsler', but not 'tichner' + */ + && StringAt(original, (current + 2), 1, "L", "R", + "N", "M", "B", "H", "F", "V", "W", + " ", ""))) { MetaphAdd(primary, "K"); MetaphAdd(secondary, "K"); @@ -633,14 +639,14 @@ DoubleMetaphone(char *str, char **codes) { /* 'bellocchio' but not 'bacchus' */ if (StringAt(original, (current + 2), 1, "I", "E", "H", "") - && !StringAt(original, (current + 2), 2, "HU", "")) + && !StringAt(original, (current + 2), 2, "HU", "")) { /* 'accident', 'accede' 'succeed' */ if ( ((current == 1) && (GetAt(original, current - 1) == 'A')) - || StringAt(original, (current - 1), 5, "UCCEE", - "UCCES", "")) + || StringAt(original, (current - 1), 5, "UCCEE", + "UCCES", "")) { MetaphAdd(primary, "KS"); MetaphAdd(secondary, "KS"); @@ -655,7 +661,7 @@ DoubleMetaphone(char *str, char **codes) break; } else - { /* Pierce's rule */ + { /* Pierce's rule */ MetaphAdd(primary, "K"); MetaphAdd(secondary, "K"); current += 2; @@ -696,19 +702,18 @@ DoubleMetaphone(char *str, char **codes) /* name sent in 'mac caffrey', 'mac gregor */ if (StringAt(original, (current + 1), 2, " C", " Q", " G", "")) current += 3; + else if (StringAt(original, (current + 1), 1, "C", "K", "Q", "") + && !StringAt(original, (current + 1), 2, + "CE", "CI", "")) + current += 2; else - if (StringAt(original, (current + 1), 1, "C", "K", "Q", "") - && !StringAt(original, (current + 1), 2, - "CE", "CI", "")) - current += 2; - else - current += 1; + current += 1; break; case 'D': if (StringAt(original, current, 2, "DG", "")) { - if (StringAt(original, (current + 2), 1, + if (StringAt(original, (current + 2), 1, "I", "E", "Y", "")) { /* e.g. 'edge' */ @@ -780,19 +785,22 @@ DoubleMetaphone(char *str, char **codes) break; } } - /* Parker's rule (with some further refinements) - - e.g., 'hugh' */ + + /* + * Parker's rule (with some further refinements) - + * e.g., 'hugh' + */ if ( ((current > 1) - && StringAt(original, (current - 2), 1, + && StringAt(original, (current - 2), 1, "B", "H", "D", "")) - /* e.g., 'bough' */ + /* e.g., 'bough' */ || ((current > 2) - && StringAt(original, (current - 3), 1, + && StringAt(original, (current - 3), 1, "B", "H", "D", "")) - /* e.g., 'broughton' */ + /* e.g., 'broughton' */ || ((current > 3) - && StringAt(original, (current - 4), 1, + && StringAt(original, (current - 4), 1, "B", "H", ""))) { current += 2; @@ -800,8 +808,10 @@ DoubleMetaphone(char *str, char **codes) } else { - /* e.g., 'laugh', 'McLaughlin', 'cough', - 'gough', 'rough', 'tough' */ + /* + * e.g., 'laugh', 'McLaughlin', 'cough', 'gough', + * 'rough', 'tough' + */ if ((current > 2) && (GetAt(original, current - 1) == 'U') && StringAt(original, (current - 3), 1, "C", @@ -837,15 +847,15 @@ DoubleMetaphone(char *str, char **codes) if (!StringAt(original, (current + 2), 2, "EY", "") && (GetAt(original, current + 1) != 'Y') && !SlavoGermanic(original)) - { - MetaphAdd(primary, "N"); - MetaphAdd(secondary, "KN"); - } - else - { - MetaphAdd(primary, "KN"); - MetaphAdd(secondary, "KN"); - } + { + MetaphAdd(primary, "N"); + MetaphAdd(secondary, "KN"); + } + else + { + MetaphAdd(primary, "KN"); + MetaphAdd(secondary, "KN"); + } current += 2; break; } @@ -863,9 +873,9 @@ DoubleMetaphone(char *str, char **codes) /* -ges-,-gep-,-gel-, -gie- at beginning */ if ((current == 0) && ((GetAt(original, current + 1) == 'Y') - || StringAt(original, (current + 1), 2, "ES", "EP", - "EB", "EL", "EY", "IB", "IL", "IN", "IE", - "EI", "ER", ""))) + || StringAt(original, (current + 1), 2, "ES", "EP", + "EB", "EL", "EY", "IB", "IL", "IN", "IE", + "EI", "ER", ""))) { MetaphAdd(primary, "K"); MetaphAdd(secondary, "J"); @@ -873,11 +883,11 @@ DoubleMetaphone(char *str, char **codes) break; } - /* -ger-, -gy- */ + /* -ger-, -gy- */ if ( (StringAt(original, (current + 1), 2, "ER", "") || (GetAt(original, current + 1) == 'Y')) - && !StringAt(original, 0, 6, + && !StringAt(original, 0, 6, "DANGER", "RANGER", "MANGER", "") && !StringAt(original, (current - 1), 1, "E", "I", "") && !StringAt(original, (current - 1), 3, "RGY", "OGY", @@ -889,9 +899,9 @@ DoubleMetaphone(char *str, char **codes) break; } - /* italian e.g, 'biaggi' */ + /* italian e.g, 'biaggi' */ if (StringAt(original, (current + 1), 1, "E", "I", "Y", "") - || StringAt(original, (current - 1), 4, + || StringAt(original, (current - 1), 4, "AGGI", "OGGI", "")) { /* obvious germanic */ @@ -939,7 +949,8 @@ DoubleMetaphone(char *str, char **codes) MetaphAdd(secondary, "H"); current += 2; } - else /* also takes care of 'HH' */ + else +/* also takes care of 'HH' */ current += 1; break; @@ -991,9 +1002,9 @@ DoubleMetaphone(char *str, char **codes) else { if (!StringAt(original, (current + 1), 1, "L", "T", - "K", "S", "N", "M", "B", "Z", "") + "K", "S", "N", "M", "B", "Z", "") && !StringAt(original, (current - 1), 1, - "S", "K", "L", "")) + "S", "K", "L", "")) { MetaphAdd(primary, "J"); MetaphAdd(secondary, "J"); @@ -1002,7 +1013,7 @@ DoubleMetaphone(char *str, char **codes) } } - if (GetAt(original, current + 1) == 'J') /* it could happen! */ + if (GetAt(original, current + 1) == 'J') /* it could happen! */ current += 2; else current += 1; @@ -1024,10 +1035,10 @@ DoubleMetaphone(char *str, char **codes) if (((current == (length - 3)) && StringAt(original, (current - 1), 4, "ILLO", "ILLA", "ALLE", "")) - || ((StringAt(original, (last - 1), 2, "AS", "OS", "") - || StringAt(original, last, 1, "A", "O", "")) - && StringAt(original, (current - 1), 4, - "ALLE", ""))) + || ((StringAt(original, (last - 1), 2, "AS", "OS", "") + || StringAt(original, last, 1, "A", "O", "")) + && StringAt(original, (current - 1), 4, + "ALLE", ""))) { MetaphAdd(primary, "L"); MetaphAdd(secondary, ""); @@ -1045,8 +1056,8 @@ DoubleMetaphone(char *str, char **codes) case 'M': if ((StringAt(original, (current - 1), 3, "UMB", "") && (((current + 1) == last) - || StringAt(original, (current + 2), 2, "ER", ""))) - /* 'dumb','thumb' */ + || StringAt(original, (current + 2), 2, "ER", ""))) + /* 'dumb','thumb' */ || (GetAt(original, current + 1) == 'M')) current += 2; else @@ -1102,7 +1113,7 @@ DoubleMetaphone(char *str, char **codes) if ((current == last) && !SlavoGermanic(original) && StringAt(original, (current - 2), 2, "IE", "") - && !StringAt(original, (current - 4), 2, "ME", "MA", "")) + && !StringAt(original, (current - 4), 2, "ME", "MA", "")) { MetaphAdd(primary, ""); MetaphAdd(secondary, "R"); @@ -1141,8 +1152,8 @@ DoubleMetaphone(char *str, char **codes) { /* germanic */ if (StringAt - (original, (current + 1), 4, "HEIM", "HOEK", "HOLM", - "HOLZ", "")) + (original, (current + 1), 4, "HEIM", "HOEK", "HOLM", + "HOLZ", "")) { MetaphAdd(primary, "S"); MetaphAdd(secondary, "S"); @@ -1174,12 +1185,13 @@ DoubleMetaphone(char *str, char **codes) break; } - /* german & anglicisations, e.g. 'smith' match 'schmidt', - 'snider' match 'schneider' - also, -sz- in slavic language altho in hungarian it is - pronounced 's' */ + /* + * german & anglicisations, e.g. 'smith' match 'schmidt', + * 'snider' match 'schneider' also, -sz- in slavic + * language altho in hungarian it is pronounced 's' + */ if (((current == 0) - && StringAt(original, (current + 1), 1, + && StringAt(original, (current + 1), 1, "M", "N", "L", "W", "")) || StringAt(original, (current + 1), 1, "Z", "")) { @@ -1198,12 +1210,12 @@ DoubleMetaphone(char *str, char **codes) if (GetAt(original, current + 2) == 'H') { /* dutch origin, e.g. 'school', 'schooner' */ - if (StringAt(original, (current + 3), 2, + if (StringAt(original, (current + 3), 2, "OO", "ER", "EN", "UY", "ED", "EM", "")) { /* 'schermerhorn', 'schenker' */ - if (StringAt(original, (current + 3), 2, + if (StringAt(original, (current + 3), 2, "ER", "EN", "")) { MetaphAdd(primary, "X"); @@ -1235,7 +1247,7 @@ DoubleMetaphone(char *str, char **codes) } } - if (StringAt(original, (current + 2), 1, + if (StringAt(original, (current + 2), 1, "I", "E", "Y", "")) { MetaphAdd(primary, "S"); @@ -1252,7 +1264,7 @@ DoubleMetaphone(char *str, char **codes) /* french e.g. 'resnais', 'artois' */ if ((current == last) - && StringAt(original, (current - 2), 2, "AI", "OI", "")) + && StringAt(original, (current - 2), 2, "AI", "OI", "")) { MetaphAdd(primary, ""); MetaphAdd(secondary, "S"); @@ -1353,8 +1365,8 @@ DoubleMetaphone(char *str, char **codes) /* Arnow should match Arnoff */ if (((current == last) && IsVowel(original, current - 1)) - || StringAt(original, (current - 1), 5, "EWSKI", "EWSKY", - "OWSKI", "OWSKY", "") + || StringAt(original, (current - 1), 5, "EWSKI", "EWSKY", + "OWSKI", "OWSKY", "") || StringAt(original, 0, 3, "SCH", "")) { MetaphAdd(primary, ""); @@ -1379,15 +1391,15 @@ DoubleMetaphone(char *str, char **codes) case 'X': /* french e.g. breaux */ if (!((current == last) - && (StringAt(original, (current - 3), 3, + && (StringAt(original, (current - 3), 3, "IAU", "EAU", "") - || StringAt(original, (current - 2), 2, + || StringAt(original, (current - 2), 2, "AU", "OU", "")))) { MetaphAdd(primary, "KS"); MetaphAdd(secondary, "KS"); } - + if (StringAt(original, (current + 1), 1, "C", "X", "")) current += 2; @@ -1404,7 +1416,7 @@ DoubleMetaphone(char *str, char **codes) current += 2; break; } - else if (StringAt(original, (current + 1), 2, + else if (StringAt(original, (current + 1), 2, "ZO", "ZI", "ZA", "") || (SlavoGermanic(original) && ((current > 0) @@ -1427,38 +1439,42 @@ DoubleMetaphone(char *str, char **codes) default: current += 1; - } - /* printf("PRIMARY: %s\n", primary->str); - printf("SECONDARY: %s\n", secondary->str); */ + } + + /* + * printf("PRIMARY: %s\n", primary->str); printf("SECONDARY: + * %s\n", secondary->str); + */ } - if (primary->length > 4) + if (primary->length > 4) SetAt(primary, 4, '\0'); - if (secondary->length > 4) + if (secondary->length > 4) SetAt(secondary, 4, '\0'); - *codes = primary->str; - *++codes = secondary->str; + *codes = primary->str; + *++codes = secondary->str; - DestroyMetaString(original); - DestroyMetaString(primary); - DestroyMetaString(secondary); + DestroyMetaString(original); + DestroyMetaString(primary); + DestroyMetaString(secondary); } #ifdef DMETAPHONE_MAIN /* just for testing - not part of the perl code */ -main(int argc, char ** argv) +main(int argc, char **argv) { - char * codes[2]; + char *codes[2]; + if (argc > 1) { - DoubleMetaphone(argv[1],codes); - printf("%s|%s\n",codes[0],codes[1]); - } + DoubleMetaphone(argv[1], codes); + printf("%s|%s\n", codes[0], codes[1]); + } } #endif diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c index ebf4ba3166c..e4b923523b1 100644 --- a/contrib/intarray/_int_gist.c +++ b/contrib/intarray/_int_gist.c @@ -87,7 +87,7 @@ g_int_consistent(PG_FUNCTION_ARGS) Datum g_int_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); int *size = (int *) PG_GETARG_POINTER(1); int4 i; ArrayType *res; @@ -317,7 +317,7 @@ comparecost(const void *a, const void *b) Datum g_int_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); OffsetNumber i, j; @@ -359,10 +359,10 @@ g_int_picksplit(PG_FUNCTION_ARGS) waste = 0.0; for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i)) { - datum_alpha = GETENTRY(entryvec,i); + datum_alpha = GETENTRY(entryvec, i); for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j)) { - datum_beta = GETENTRY(entryvec,j); + datum_beta = GETENTRY(entryvec, j); /* compute the wasted space by unioning these guys */ /* size_waste = size_union - size_inter; */ @@ -402,10 +402,10 @@ g_int_picksplit(PG_FUNCTION_ARGS) seed_2 = 2; } - datum_alpha = GETENTRY(entryvec,seed_1); + datum_alpha = GETENTRY(entryvec, seed_1); datum_l = copy_intArrayType(datum_alpha); rt__int_size(datum_l, &size_l); - datum_beta = GETENTRY(entryvec,seed_2); + datum_beta = GETENTRY(entryvec, seed_2); datum_r = copy_intArrayType(datum_beta); rt__int_size(datum_r, &size_r); @@ -418,7 +418,7 @@ g_int_picksplit(PG_FUNCTION_ARGS) for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { costvector[i - 1].pos = i; - datum_alpha = GETENTRY(entryvec,i); + datum_alpha = GETENTRY(entryvec, i); union_d = inner_int_union(datum_l, datum_alpha); rt__int_size(union_d, &size_alpha); pfree(union_d); @@ -466,7 +466,7 @@ g_int_picksplit(PG_FUNCTION_ARGS) } /* okay, which page needs least enlargement? */ - datum_alpha = GETENTRY(entryvec,i); + datum_alpha = GETENTRY(entryvec, i); union_dl = inner_int_union(datum_l, datum_alpha); union_dr = inner_int_union(datum_r, datum_alpha); rt__int_size(union_dl, &size_alpha); diff --git a/contrib/intarray/_intbig_gist.c b/contrib/intarray/_intbig_gist.c index 04d26d90f97..efd70758dc2 100644 --- a/contrib/intarray/_intbig_gist.c +++ b/contrib/intarray/_intbig_gist.c @@ -20,52 +20,55 @@ Datum g_intbig_picksplit(PG_FUNCTION_ARGS); Datum g_intbig_union(PG_FUNCTION_ARGS); Datum g_intbig_same(PG_FUNCTION_ARGS); -#define SUMBIT(val) ( \ - GETBITBYTE((val),0) + \ - GETBITBYTE((val),1) + \ - GETBITBYTE((val),2) + \ - GETBITBYTE((val),3) + \ - GETBITBYTE((val),4) + \ - GETBITBYTE((val),5) + \ - GETBITBYTE((val),6) + \ - GETBITBYTE((val),7) \ +#define SUMBIT(val) ( \ + GETBITBYTE((val),0) + \ + GETBITBYTE((val),1) + \ + GETBITBYTE((val),2) + \ + GETBITBYTE((val),3) + \ + GETBITBYTE((val),4) + \ + GETBITBYTE((val),5) + \ + GETBITBYTE((val),6) + \ + GETBITBYTE((val),7) \ ) PG_FUNCTION_INFO_V1(_intbig_in); -Datum _intbig_in(PG_FUNCTION_ARGS); - +Datum _intbig_in(PG_FUNCTION_ARGS); + PG_FUNCTION_INFO_V1(_intbig_out); -Datum _intbig_out(PG_FUNCTION_ARGS); - - +Datum _intbig_out(PG_FUNCTION_ARGS); + + Datum -_intbig_in(PG_FUNCTION_ARGS) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("_intbig_in() not implemented"))); - PG_RETURN_DATUM(0); +_intbig_in(PG_FUNCTION_ARGS) +{ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("_intbig_in() not implemented"))); + PG_RETURN_DATUM(0); } - + Datum -_intbig_out(PG_FUNCTION_ARGS) { - ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("_intbig_out() not implemented"))); - PG_RETURN_DATUM(0); -} +_intbig_out(PG_FUNCTION_ARGS) +{ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("_intbig_out() not implemented"))); + PG_RETURN_DATUM(0); +} /********************************************************************* ** intbig functions *********************************************************************/ static bool -_intbig_overlap(GISTTYPE *a, ArrayType *b) +_intbig_overlap(GISTTYPE * a, ArrayType *b) { - int num=ARRNELEMS(b); - int4 *ptr=ARRPTR(b); + int num = ARRNELEMS(b); + int4 *ptr = ARRPTR(b); - while(num--) { - if (GETBIT(GETSIGN(a),HASHVAL(*ptr))) + while (num--) + { + if (GETBIT(GETSIGN(a), HASHVAL(*ptr))) return true; ptr++; } @@ -74,13 +77,14 @@ _intbig_overlap(GISTTYPE *a, ArrayType *b) } static bool -_intbig_contains(GISTTYPE *a, ArrayType *b) +_intbig_contains(GISTTYPE * a, ArrayType *b) { - int num=ARRNELEMS(b); - int4 *ptr=ARRPTR(b); + int num = ARRNELEMS(b); + int4 *ptr = ARRPTR(b); - while(num--) { - if (!GETBIT(GETSIGN(a),HASHVAL(*ptr))) + while (num--) + { + if (!GETBIT(GETSIGN(a), HASHVAL(*ptr))) return false; ptr++; } @@ -89,10 +93,11 @@ _intbig_contains(GISTTYPE *a, ArrayType *b) } Datum -g_intbig_same(PG_FUNCTION_ARGS) { +g_intbig_same(PG_FUNCTION_ARGS) +{ GISTTYPE *a = (GISTTYPE *) PG_GETARG_POINTER(0); GISTTYPE *b = (GISTTYPE *) PG_GETARG_POINTER(1); - bool *result = (bool *) PG_GETARG_POINTER(2); + bool *result = (bool *) PG_GETARG_POINTER(2); if (ISALLTRUE(a) && ISALLTRUE(b)) *result = true; @@ -100,16 +105,19 @@ g_intbig_same(PG_FUNCTION_ARGS) { *result = false; else if (ISALLTRUE(b)) *result = false; - else { - int4 i; - BITVECP sa = GETSIGN(a), - sb = GETSIGN(b); + else + { + int4 i; + BITVECP sa = GETSIGN(a), + sb = GETSIGN(b); + *result = true; LOOPBYTE( - if (sa[i] != sb[i]) { - *result = false; - break; - } + if (sa[i] != sb[i]) + { + *result = false; + break; + } ); } PG_RETURN_POINTER(result); @@ -120,93 +128,105 @@ g_intbig_compress(PG_FUNCTION_ARGS) { GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0); - if (entry->leafkey) { + if (entry->leafkey) + { GISTENTRY *retval; - ArrayType *in = (ArrayType *) PG_DETOAST_DATUM(entry->key); - int4 *ptr; - int num; - GISTTYPE *res=(GISTTYPE*)palloc(CALCGTSIZE(0)); + ArrayType *in = (ArrayType *) PG_DETOAST_DATUM(entry->key); + int4 *ptr; + int num; + GISTTYPE *res = (GISTTYPE *) palloc(CALCGTSIZE(0)); ARRISVOID(in); - ptr=ARRPTR(in); - num=ARRNELEMS(in); - memset(res,0,CALCGTSIZE(0)); - res->len=CALCGTSIZE(0); + ptr = ARRPTR(in); + num = ARRNELEMS(in); + memset(res, 0, CALCGTSIZE(0)); + res->len = CALCGTSIZE(0); - while(num--) { - HASH(GETSIGN(res),*ptr); + while (num--) + { + HASH(GETSIGN(res), *ptr); ptr++; } retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(res), - entry->rel, entry->page, - entry->offset, res->len, FALSE); - - if ( in!=(ArrayType *) PG_DETOAST_DATUM(entry->key) ) + entry->rel, entry->page, + entry->offset, res->len, FALSE); + + if (in != (ArrayType *) PG_DETOAST_DATUM(entry->key)) pfree(in); PG_RETURN_POINTER(retval); - } else if ( !ISALLTRUE(DatumGetPointer(entry->key)) ) { + } + else if (!ISALLTRUE(DatumGetPointer(entry->key))) + { GISTENTRY *retval; - int i; - BITVECP sign = GETSIGN(DatumGetPointer(entry->key)); + int i; + BITVECP sign = GETSIGN(DatumGetPointer(entry->key)); GISTTYPE *res; LOOPBYTE( - if ((sign[i] & 0xff) != 0xff) - PG_RETURN_POINTER(entry); + if ((sign[i] & 0xff) != 0xff) + PG_RETURN_POINTER(entry); ); - res=(GISTTYPE*)palloc(CALCGTSIZE(ALLISTRUE)); - res->len=CALCGTSIZE(ALLISTRUE); + res = (GISTTYPE *) palloc(CALCGTSIZE(ALLISTRUE)); + res->len = CALCGTSIZE(ALLISTRUE); res->flag = ALLISTRUE; retval = (GISTENTRY *) palloc(sizeof(GISTENTRY)); gistentryinit(*retval, PointerGetDatum(res), - entry->rel, entry->page, - entry->offset, res->len, FALSE); - + entry->rel, entry->page, + entry->offset, res->len, FALSE); + PG_RETURN_POINTER(retval); } - + PG_RETURN_POINTER(entry); } static int4 -sizebitvec(BITVECP sign) { - int4 size = 0, i; +sizebitvec(BITVECP sign) +{ + int4 size = 0, + i; + LOOPBYTE( - size += SUMBIT(sign); - sign = (BITVECP) (((char *) sign) + 1); + size += SUMBIT(sign); + sign = (BITVECP) (((char *) sign) + 1); ); - return size; + return size; } static int -hemdistsign(BITVECP a, BITVECP b) { - int i,dist=0; - - LOOPBIT( - if ( GETBIT(a,i) != GETBIT(b,i) ) - dist++; - ); - return dist; +hemdistsign(BITVECP a, BITVECP b) +{ + int i, + dist = 0; + + LOOPBIT( + if (GETBIT(a, i) != GETBIT(b, i)) + dist++; + ); + return dist; } static int -hemdist(GISTTYPE *a, GISTTYPE *b) { - if ( ISALLTRUE(a) ) { - if (ISALLTRUE(b)) - return 0; - else - return SIGLENBIT-sizebitvec(GETSIGN(b)); - } else if (ISALLTRUE(b)) - return SIGLENBIT-sizebitvec(GETSIGN(a)); - - return hemdistsign( GETSIGN(a), GETSIGN(b) ); +hemdist(GISTTYPE * a, GISTTYPE * b) +{ + if (ISALLTRUE(a)) + { + if (ISALLTRUE(b)) + return 0; + else + return SIGLENBIT - sizebitvec(GETSIGN(b)); + } + else if (ISALLTRUE(b)) + return SIGLENBIT - sizebitvec(GETSIGN(a)); + + return hemdistsign(GETSIGN(a), GETSIGN(b)); } Datum @@ -218,29 +238,33 @@ g_intbig_decompress(PG_FUNCTION_ARGS) static int4 unionkey(BITVECP sbase, GISTTYPE * add) { - int4 i; - BITVECP sadd = GETSIGN(add); + int4 i; + BITVECP sadd = GETSIGN(add); if (ISALLTRUE(add)) return 1; LOOPBYTE( - sbase[i] |= sadd[i]; + sbase[i] |= sadd[i]; ); return 0; } Datum -g_intbig_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - int *size = (int *) PG_GETARG_POINTER(1); - BITVEC base; - int4 i, len; - int4 flag = 0; +g_intbig_union(PG_FUNCTION_ARGS) +{ + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + int *size = (int *) PG_GETARG_POINTER(1); + BITVEC base; + int4 i, + len; + int4 flag = 0; GISTTYPE *result; MemSet((void *) base, 0, sizeof(BITVEC)); - for (i = 0; i < entryvec->n; i++) { - if (unionkey(base, GETENTRY(entryvec, i))) { + for (i = 0; i < entryvec->n; i++) + { + if (unionkey(base, GETENTRY(entryvec, i))) + { flag = ALLISTRUE; break; } @@ -252,187 +276,201 @@ g_intbig_union(PG_FUNCTION_ARGS) { result->flag = flag; if (!ISALLTRUE(result)) memcpy((void *) GETSIGN(result), (void *) base, sizeof(BITVEC)); - + PG_RETURN_POINTER(result); } Datum -g_intbig_penalty(PG_FUNCTION_ARGS) { +g_intbig_penalty(PG_FUNCTION_ARGS) +{ GISTENTRY *origentry = (GISTENTRY *) PG_GETARG_POINTER(0); /* always ISSIGNKEY */ GISTENTRY *newentry = (GISTENTRY *) PG_GETARG_POINTER(1); - float *penalty = (float *) PG_GETARG_POINTER(2); + float *penalty = (float *) PG_GETARG_POINTER(2); GISTTYPE *origval = (GISTTYPE *) DatumGetPointer(origentry->key); GISTTYPE *newval = (GISTTYPE *) DatumGetPointer(newentry->key); - *penalty=hemdist(origval,newval); + *penalty = hemdist(origval, newval); PG_RETURN_POINTER(penalty); } -typedef struct { +typedef struct +{ OffsetNumber pos; - int4 cost; + int4 cost; } SPLITCOST; static int -comparecost(const void *a, const void *b) { +comparecost(const void *a, const void *b) +{ return ((SPLITCOST *) a)->cost - ((SPLITCOST *) b)->cost; } Datum -g_intbig_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); - OffsetNumber k, - j; - GISTTYPE *datum_l, - *datum_r; - BITVECP union_l, - union_r; - int4 size_alpha, size_beta; - int4 size_waste, - waste = -1; - int4 nbytes; - OffsetNumber seed_1 = 0, - seed_2 = 0; - OffsetNumber *left, - *right; - OffsetNumber maxoff; - BITVECP ptr; - int i; - SPLITCOST *costvector; - GISTTYPE *_k, - *_j; - - maxoff = entryvec->n - 2; - nbytes = (maxoff + 2) * sizeof(OffsetNumber); - v->spl_left = (OffsetNumber *) palloc(nbytes); - v->spl_right = (OffsetNumber *) palloc(nbytes); - - for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) { - _k = GETENTRY(entryvec, k); - for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) { - size_waste=hemdist(_k, GETENTRY(entryvec, j)); - if (size_waste > waste ) { - waste = size_waste; - seed_1 = k; - seed_2 = j; - } - } - } - - left = v->spl_left; - v->spl_nleft = 0; - right = v->spl_right; - v->spl_nright = 0; - - if (seed_1 == 0 || seed_2 == 0) - { - seed_1 = 1; - seed_2 = 2; - } - - /* form initial .. */ - if (ISALLTRUE(GETENTRY(entryvec, seed_1))) - { - datum_l = (GISTTYPE *) palloc(GTHDRSIZE); - datum_l->len = GTHDRSIZE; - datum_l->flag = ALLISTRUE; - } - else - { - datum_l = (GISTTYPE *) palloc(GTHDRSIZE + SIGLEN); - datum_l->len = GTHDRSIZE + SIGLEN; - datum_l->flag = 0; - memcpy((void *) GETSIGN(datum_l), (void *) GETSIGN(GETENTRY(entryvec, seed_1)), sizeof(BITVEC)); - } - if (ISALLTRUE(GETENTRY(entryvec, seed_2))) - { - datum_r = (GISTTYPE *) palloc(GTHDRSIZE); - datum_r->len = GTHDRSIZE; - datum_r->flag = ALLISTRUE; - } - else - { - datum_r = (GISTTYPE *) palloc(GTHDRSIZE + SIGLEN); - datum_r->len = GTHDRSIZE + SIGLEN; - datum_r->flag = 0; - memcpy((void *) GETSIGN(datum_r), (void *) GETSIGN(GETENTRY(entryvec, seed_2)), sizeof(BITVEC)); - } - - maxoff = OffsetNumberNext(maxoff); - /* sort before ... */ - costvector = (SPLITCOST *) palloc(sizeof(SPLITCOST) * maxoff); - for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) - { - costvector[j - 1].pos = j; - _j = GETENTRY(entryvec, j); - size_alpha = hemdist(datum_l,_j); - size_beta = hemdist(datum_r,_j); - costvector[j - 1].cost = abs(size_alpha - size_beta); - } - qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost); - - union_l=GETSIGN(datum_l); - union_r=GETSIGN(datum_r); - - for (k = 0; k < maxoff; k++) - { - j = costvector[k].pos; - if (j == seed_1) - { - *left++ = j; - v->spl_nleft++; - continue; - } - else if (j == seed_2) - { - *right++ = j; - v->spl_nright++; - continue; - } - _j = GETENTRY(entryvec, j); - size_alpha = hemdist(datum_l,_j); - size_beta = hemdist(datum_r,_j); - - if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.00001)) - { - if (ISALLTRUE(datum_l) || ISALLTRUE(_j) ) { - if (!ISALLTRUE(datum_l)) - MemSet((void *) union_l, 0xff, sizeof(BITVEC)); - } else { - ptr=GETSIGN(_j); - LOOPBYTE( - union_l[i] |= ptr[i]; - ); - } - *left++ = j; - v->spl_nleft++; - } - else - { - if (ISALLTRUE(datum_r) || ISALLTRUE(_j) ) { - if (!ISALLTRUE(datum_r)) - MemSet((void *) union_r, 0xff, sizeof(BITVEC)); - } else { - ptr=GETSIGN(_j); - LOOPBYTE( - union_r[i] |= ptr[i]; - ); - } - *right++ = j; - v->spl_nright++; - } - } - - *right = *left = FirstOffsetNumber; - pfree(costvector); - - v->spl_ldatum = PointerGetDatum(datum_l); - v->spl_rdatum = PointerGetDatum(datum_r); - - PG_RETURN_POINTER(v); +g_intbig_picksplit(PG_FUNCTION_ARGS) +{ + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); + OffsetNumber k, + j; + GISTTYPE *datum_l, + *datum_r; + BITVECP union_l, + union_r; + int4 size_alpha, + size_beta; + int4 size_waste, + waste = -1; + int4 nbytes; + OffsetNumber seed_1 = 0, + seed_2 = 0; + OffsetNumber *left, + *right; + OffsetNumber maxoff; + BITVECP ptr; + int i; + SPLITCOST *costvector; + GISTTYPE *_k, + *_j; + + maxoff = entryvec->n - 2; + nbytes = (maxoff + 2) * sizeof(OffsetNumber); + v->spl_left = (OffsetNumber *) palloc(nbytes); + v->spl_right = (OffsetNumber *) palloc(nbytes); + + for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) + { + _k = GETENTRY(entryvec, k); + for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) + { + size_waste = hemdist(_k, GETENTRY(entryvec, j)); + if (size_waste > waste) + { + waste = size_waste; + seed_1 = k; + seed_2 = j; + } + } + } + + left = v->spl_left; + v->spl_nleft = 0; + right = v->spl_right; + v->spl_nright = 0; + + if (seed_1 == 0 || seed_2 == 0) + { + seed_1 = 1; + seed_2 = 2; + } + + /* form initial .. */ + if (ISALLTRUE(GETENTRY(entryvec, seed_1))) + { + datum_l = (GISTTYPE *) palloc(GTHDRSIZE); + datum_l->len = GTHDRSIZE; + datum_l->flag = ALLISTRUE; + } + else + { + datum_l = (GISTTYPE *) palloc(GTHDRSIZE + SIGLEN); + datum_l->len = GTHDRSIZE + SIGLEN; + datum_l->flag = 0; + memcpy((void *) GETSIGN(datum_l), (void *) GETSIGN(GETENTRY(entryvec, seed_1)), sizeof(BITVEC)); + } + if (ISALLTRUE(GETENTRY(entryvec, seed_2))) + { + datum_r = (GISTTYPE *) palloc(GTHDRSIZE); + datum_r->len = GTHDRSIZE; + datum_r->flag = ALLISTRUE; + } + else + { + datum_r = (GISTTYPE *) palloc(GTHDRSIZE + SIGLEN); + datum_r->len = GTHDRSIZE + SIGLEN; + datum_r->flag = 0; + memcpy((void *) GETSIGN(datum_r), (void *) GETSIGN(GETENTRY(entryvec, seed_2)), sizeof(BITVEC)); + } + + maxoff = OffsetNumberNext(maxoff); + /* sort before ... */ + costvector = (SPLITCOST *) palloc(sizeof(SPLITCOST) * maxoff); + for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) + { + costvector[j - 1].pos = j; + _j = GETENTRY(entryvec, j); + size_alpha = hemdist(datum_l, _j); + size_beta = hemdist(datum_r, _j); + costvector[j - 1].cost = abs(size_alpha - size_beta); + } + qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost); + + union_l = GETSIGN(datum_l); + union_r = GETSIGN(datum_r); + + for (k = 0; k < maxoff; k++) + { + j = costvector[k].pos; + if (j == seed_1) + { + *left++ = j; + v->spl_nleft++; + continue; + } + else if (j == seed_2) + { + *right++ = j; + v->spl_nright++; + continue; + } + _j = GETENTRY(entryvec, j); + size_alpha = hemdist(datum_l, _j); + size_beta = hemdist(datum_r, _j); + + if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.00001)) + { + if (ISALLTRUE(datum_l) || ISALLTRUE(_j)) + { + if (!ISALLTRUE(datum_l)) + MemSet((void *) union_l, 0xff, sizeof(BITVEC)); + } + else + { + ptr = GETSIGN(_j); + LOOPBYTE( + union_l[i] |= ptr[i]; + ); + } + *left++ = j; + v->spl_nleft++; + } + else + { + if (ISALLTRUE(datum_r) || ISALLTRUE(_j)) + { + if (!ISALLTRUE(datum_r)) + MemSet((void *) union_r, 0xff, sizeof(BITVEC)); + } + else + { + ptr = GETSIGN(_j); + LOOPBYTE( + union_r[i] |= ptr[i]; + ); + } + *right++ = j; + v->spl_nright++; + } + } + + *right = *left = FirstOffsetNumber; + pfree(costvector); + + v->spl_ldatum = PointerGetDatum(datum_l); + v->spl_rdatum = PointerGetDatum(datum_r); + + PG_RETURN_POINTER(v); } Datum @@ -443,12 +481,13 @@ g_intbig_consistent(PG_FUNCTION_ARGS) StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2); bool retval; - if ( ISALLTRUE(DatumGetPointer(entry->key)) ) + if (ISALLTRUE(DatumGetPointer(entry->key))) PG_RETURN_BOOL(true); - - if (strategy == BooleanSearchStrategy) { + + if (strategy == BooleanSearchStrategy) + { PG_RETURN_BOOL(signconsistent((QUERYTYPE *) query, - GETSIGN(DatumGetPointer(entry->key)), + GETSIGN(DatumGetPointer(entry->key)), false)); } @@ -462,58 +501,72 @@ g_intbig_consistent(PG_FUNCTION_ARGS) retval = _intbig_overlap((GISTTYPE *) DatumGetPointer(entry->key), query); break; case RTSameStrategyNumber: - if (GIST_LEAF(entry)) { - int i,num=ARRNELEMS(query); - int4 *ptr=ARRPTR(query); - BITVEC qp; - BITVECP dq, de; - memset(qp,0,sizeof(BITVEC)); - - while(num--) { + if (GIST_LEAF(entry)) + { + int i, + num = ARRNELEMS(query); + int4 *ptr = ARRPTR(query); + BITVEC qp; + BITVECP dq, + de; + + memset(qp, 0, sizeof(BITVEC)); + + while (num--) + { HASH(qp, *ptr); ptr++; } - de=GETSIGN((GISTTYPE *) DatumGetPointer(entry->key)); - dq=qp; - retval=true; + de = GETSIGN((GISTTYPE *) DatumGetPointer(entry->key)); + dq = qp; + retval = true; LOOPBYTE( - if ( de[i] != dq[i] ) { - retval=false; - break; - } + if (de[i] != dq[i]) + { + retval = false; + break; + } ); - } else + } + else retval = _intbig_contains((GISTTYPE *) DatumGetPointer(entry->key), query); break; case RTContainsStrategyNumber: retval = _intbig_contains((GISTTYPE *) DatumGetPointer(entry->key), query); break; case RTContainedByStrategyNumber: - if (GIST_LEAF(entry)) { - int i,num=ARRNELEMS(query); - int4 *ptr=ARRPTR(query); - BITVEC qp; - BITVECP dq, de; - memset(qp,0,sizeof(BITVEC)); - - while(num--) { + if (GIST_LEAF(entry)) + { + int i, + num = ARRNELEMS(query); + int4 *ptr = ARRPTR(query); + BITVEC qp; + BITVECP dq, + de; + + memset(qp, 0, sizeof(BITVEC)); + + while (num--) + { HASH(qp, *ptr); ptr++; } - de=GETSIGN((GISTTYPE *) DatumGetPointer(entry->key)); - dq=qp; - retval=true; + de = GETSIGN((GISTTYPE *) DatumGetPointer(entry->key)); + dq = qp; + retval = true; LOOPBYTE( - if ( de[i] & ~dq[i] ) { - retval=false; - break; - } + if (de[i] & ~dq[i]) + { + retval = false; + break; + } ); - } else + } + else retval = _intbig_overlap((GISTTYPE *) DatumGetPointer(entry->key), query); break; default: @@ -521,5 +574,3 @@ g_intbig_consistent(PG_FUNCTION_ARGS) } PG_RETURN_BOOL(retval); } - - diff --git a/contrib/ltree/_ltree_gist.c b/contrib/ltree/_ltree_gist.c index f1b2efb157c..f0e01bbf471 100644 --- a/contrib/ltree/_ltree_gist.c +++ b/contrib/ltree/_ltree_gist.c @@ -75,8 +75,8 @@ _ltree_compress(PG_FUNCTION_ARGS) if (ARR_NDIM(val) != 1) ereport(ERROR, - (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), - errmsg("array must be one-dimensional"))); + (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), + errmsg("array must be one-dimensional"))); key = (ltree_gist *) palloc(len); key->len = len; @@ -98,7 +98,7 @@ _ltree_compress(PG_FUNCTION_ARGS) entry->rel, entry->page, entry->offset, key->len, FALSE); } - else if ( !LTG_ISALLTRUE(entry->key) ) + else if (!LTG_ISALLTRUE(entry->key)) { int4 i, len; @@ -107,7 +107,7 @@ _ltree_compress(PG_FUNCTION_ARGS) BITVECP sign = LTG_SIGN(DatumGetPointer(entry->key)); ALOOPBYTE( - if ((sign[i]&0xff) != 0xff) + if ((sign[i] & 0xff) != 0xff) PG_RETURN_POINTER(retval); ); len = LTG_HDRSIZE; @@ -172,10 +172,11 @@ unionkey(BITVECP sbase, ltree_gist * add) Datum _ltree_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); int *size = (int *) PG_GETARG_POINTER(1); ABITVEC base; - int4 i,len; + int4 i, + len; int4 flag = 0; ltree_gist *result; @@ -212,28 +213,33 @@ sizebitvec(BITVECP sign) return size; } -static int -hemdistsign(BITVECP a, BITVECP b) { - int i,dist=0; - +static int +hemdistsign(BITVECP a, BITVECP b) +{ + int i, + dist = 0; + ALOOPBIT( - if ( GETBIT(a,i) != GETBIT(b,i) ) - dist++; + if (GETBIT(a, i) != GETBIT(b, i)) + dist++; ); return dist; } static int -hemdist(ltree_gist *a, ltree_gist *b) { - if ( LTG_ISALLTRUE(a) ) { - if (LTG_ISALLTRUE(b)) - return 0; - else - return ASIGLENBIT-sizebitvec(LTG_SIGN(b)); - } else if (LTG_ISALLTRUE(b)) - return ASIGLENBIT-sizebitvec(LTG_SIGN(a)); - - return hemdistsign( LTG_SIGN(a), LTG_SIGN(b) ); +hemdist(ltree_gist * a, ltree_gist * b) +{ + if (LTG_ISALLTRUE(a)) + { + if (LTG_ISALLTRUE(b)) + return 0; + else + return ASIGLENBIT - sizebitvec(LTG_SIGN(b)); + } + else if (LTG_ISALLTRUE(b)) + return ASIGLENBIT - sizebitvec(LTG_SIGN(a)); + + return hemdistsign(LTG_SIGN(a), LTG_SIGN(b)); } @@ -244,7 +250,7 @@ _ltree_penalty(PG_FUNCTION_ARGS) ltree_gist *newval = (ltree_gist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key); float *penalty = (float *) PG_GETARG_POINTER(2); - *penalty=hemdist(origval,newval); + *penalty = hemdist(origval, newval); PG_RETURN_POINTER(penalty); } @@ -263,7 +269,7 @@ comparecost(const void *a, const void *b) Datum _ltree_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); OffsetNumber k, j; @@ -271,7 +277,8 @@ _ltree_picksplit(PG_FUNCTION_ARGS) *datum_r; BITVECP union_l, union_r; - int4 size_alpha, size_beta; + int4 size_alpha, + size_beta; int4 size_waste, waste = -1; int4 nbytes; @@ -291,11 +298,14 @@ _ltree_picksplit(PG_FUNCTION_ARGS) v->spl_left = (OffsetNumber *) palloc(nbytes); v->spl_right = (OffsetNumber *) palloc(nbytes); - for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) { + for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) + { _k = GETENTRY(entryvec, k); - for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) { - size_waste=hemdist(_k, GETENTRY(entryvec, j)); - if (size_waste > waste ) { + for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) + { + size_waste = hemdist(_k, GETENTRY(entryvec, j)); + if (size_waste > waste) + { waste = size_waste; seed_1 = k; seed_2 = j; @@ -349,15 +359,15 @@ _ltree_picksplit(PG_FUNCTION_ARGS) { costvector[j - 1].pos = j; _j = GETENTRY(entryvec, j); - size_alpha = hemdist(datum_l,_j); - size_beta = hemdist(datum_r,_j); + size_alpha = hemdist(datum_l, _j); + size_beta = hemdist(datum_r, _j); costvector[j - 1].cost = abs(size_alpha - size_beta); } qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost); - union_l=LTG_SIGN(datum_l); - union_r=LTG_SIGN(datum_r); - + union_l = LTG_SIGN(datum_l); + union_r = LTG_SIGN(datum_r); + for (k = 0; k < maxoff; k++) { j = costvector[k].pos; @@ -374,18 +384,21 @@ _ltree_picksplit(PG_FUNCTION_ARGS) continue; } _j = GETENTRY(entryvec, j); - size_alpha = hemdist(datum_l,_j); - size_beta = hemdist(datum_r,_j); + size_alpha = hemdist(datum_l, _j); + size_beta = hemdist(datum_r, _j); - if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.00001)) + if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.00001)) { - if (LTG_ISALLTRUE(datum_l) || LTG_ISALLTRUE(_j) ) { + if (LTG_ISALLTRUE(datum_l) || LTG_ISALLTRUE(_j)) + { if (!LTG_ISALLTRUE(datum_l)) - MemSet((void *) union_l, 0xff, sizeof(ABITVEC)); - } else { - ptr=LTG_SIGN(_j); + MemSet((void *) union_l, 0xff, sizeof(ABITVEC)); + } + else + { + ptr = LTG_SIGN(_j); ALOOPBYTE( - union_l[i] |= ptr[i]; + union_l[i] |= ptr[i]; ); } *left++ = j; @@ -393,13 +406,16 @@ _ltree_picksplit(PG_FUNCTION_ARGS) } else { - if (LTG_ISALLTRUE(datum_r) || LTG_ISALLTRUE(_j) ) { + if (LTG_ISALLTRUE(datum_r) || LTG_ISALLTRUE(_j)) + { if (!LTG_ISALLTRUE(datum_r)) - MemSet((void *) union_r, 0xff, sizeof(ABITVEC)); - } else { - ptr=LTG_SIGN(_j); + MemSet((void *) union_r, 0xff, sizeof(ABITVEC)); + } + else + { + ptr = LTG_SIGN(_j); ALOOPBYTE( - union_r[i] |= ptr[i]; + union_r[i] |= ptr[i]; ); } *right++ = j; @@ -498,22 +514,24 @@ gist_qe(ltree_gist * key, lquery * query) } static bool -_arrq_cons(ltree_gist *key, ArrayType *_query) { - lquery *query = (lquery *) ARR_DATA_PTR(_query); - int num = ArrayGetNItems(ARR_NDIM(_query), ARR_DIMS(_query)); +_arrq_cons(ltree_gist * key, ArrayType *_query) +{ + lquery *query = (lquery *) ARR_DATA_PTR(_query); + int num = ArrayGetNItems(ARR_NDIM(_query), ARR_DIMS(_query)); - if (ARR_NDIM(_query) != 1) - ereport(ERROR, + if (ARR_NDIM(_query) != 1) + ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("array must be one-dimensional"))); - while (num > 0) { - if ( gist_qe(key, query) ) - return true; - num--; - query = (lquery*)NEXTVAL(query); - } - return false; + while (num > 0) + { + if (gist_qe(key, query)) + return true; + num--; + query = (lquery *) NEXTVAL(query); + } + return false; } Datum diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c index eb091f99271..7630b4f60c3 100644 --- a/contrib/ltree/ltree_gist.c +++ b/contrib/ltree/ltree_gist.c @@ -166,7 +166,7 @@ hashing(BITVECP sign, ltree * t) Datum ltree_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); int *size = (int *) PG_GETARG_POINTER(1); BITVEC base; int4 i, @@ -277,14 +277,14 @@ treekey_cmp(const void *a, const void *b) return ltree_compare( ((RIX *) a)->r, ((RIX *) b)->r - ); + ); } Datum ltree_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); OffsetNumber j; int4 i; @@ -602,21 +602,23 @@ gist_qtxt(ltree_gist * key, ltxtquery * query) } static bool -arrq_cons(ltree_gist *key, ArrayType *_query) { - lquery *query = (lquery *) ARR_DATA_PTR(_query); - int num = ArrayGetNItems(ARR_NDIM(_query), ARR_DIMS(_query)); +arrq_cons(ltree_gist * key, ArrayType *_query) +{ + lquery *query = (lquery *) ARR_DATA_PTR(_query); + int num = ArrayGetNItems(ARR_NDIM(_query), ARR_DIMS(_query)); - if (ARR_NDIM(_query) != 1) - ereport(ERROR, + if (ARR_NDIM(_query) != 1) + ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("array must be one-dimensional"))); - while (num > 0) { - if ( gist_qe(key, query) && gist_between(key, query) ) - return true; - num--; - query = NEXTVAL(query); - } + while (num > 0) + { + if (gist_qe(key, query) && gist_between(key, query)) + return true; + num--; + query = NEXTVAL(query); + } return false; } @@ -700,7 +702,7 @@ ltree_consistent(PG_FUNCTION_ARGS) if (GIST_LEAF(entry)) res = DatumGetBool(DirectFunctionCall2(lt_q_regex, PointerGetDatum(LTG_NODE(key)), - PointerGetDatum((ArrayType *) query) + PointerGetDatum((ArrayType *) query) )); else res = arrq_cons(key, (ArrayType *) query); diff --git a/contrib/pg_autovacuum/pg_autovacuum.c b/contrib/pg_autovacuum/pg_autovacuum.c index 191b91ba384..6ea76515cfb 100644 --- a/contrib/pg_autovacuum/pg_autovacuum.c +++ b/contrib/pg_autovacuum/pg_autovacuum.c @@ -228,20 +228,20 @@ update_table_list(db_info * dbi) if (res != NULL) { t = PQntuples(res); - + /* - * First: use the tbl_list as the outer loop and the result set as - * the inner loop, this will determine what tables should be - * removed - */ + * First: use the tbl_list as the outer loop and the result + * set as the inner loop, this will determine what tables + * should be removed + */ while (tbl_elem != NULL) { tbl = ((tbl_info *) DLE_VAL(tbl_elem)); found_match = 0; - + for (i = 0; i < t; i++) - { /* loop through result set looking for a - * match */ + { /* loop through result set looking for a + * match */ if (tbl->relid == atooid(PQgetvalue(res, i, PQfnumber(res, "oid")))) { found_match = 1; @@ -249,22 +249,22 @@ update_table_list(db_info * dbi) } } if (found_match == 0) - { /* then we didn't find this tbl_elem in - * the result set */ + { /* then we didn't find this tbl_elem in + * the result set */ Dlelem *elem_to_remove = tbl_elem; - + tbl_elem = DLGetSucc(tbl_elem); remove_table_from_list(elem_to_remove); } else tbl_elem = DLGetSucc(tbl_elem); - } /* Done removing dropped tables from the - * table_list */ - + } /* Done removing dropped tables from the + * table_list */ + /* - * Then loop use result set as outer loop and tbl_list as the - * inner loop to determine what tables are new - */ + * Then loop use result set as outer loop and tbl_list as the + * inner loop to determine what tables are new + */ for (i = 0; i < t; i++) { tbl_elem = DLGetHead(dbi->table_list); @@ -279,8 +279,8 @@ update_table_list(db_info * dbi) } tbl_elem = DLGetSucc(tbl_elem); } - if (found_match == 0) /* then we didn't find this result - * now in the tbl_list */ + if (found_match == 0) /* then we didn't find this result + * now in the tbl_list */ { DLAddTail(dbi->table_list, DLNewElem(init_table_info(res, i, dbi))); if (args->debug >= 1) @@ -290,7 +290,7 @@ update_table_list(db_info * dbi) log_entry(logbuffer); } } - } /* end of for loop that adds tables */ + } /* end of for loop that adds tables */ } fflush(LOGOUTPUT); PQclear(res); @@ -419,7 +419,7 @@ init_db_list() dbs->age = atol(PQgetvalue(res, 0, PQfnumber(res, "age"))); if (res) PQclear(res); - + if (args->debug >= 2) print_db_list(db_list, 0); } @@ -499,49 +499,49 @@ update_db_list(Dllist *db_list) if (res != NULL) { t = PQntuples(res); - + /* - * First: use the db_list as the outer loop and the result set as - * the inner loop, this will determine what databases should be - * removed - */ + * First: use the db_list as the outer loop and the result set + * as the inner loop, this will determine what databases + * should be removed + */ while (db_elem != NULL) { dbi = ((db_info *) DLE_VAL(db_elem)); found_match = 0; - + for (i = 0; i < t; i++) - { /* loop through result set looking for a - * match */ + { /* loop through result set looking for a + * match */ if (dbi->oid == atooid(PQgetvalue(res, i, PQfnumber(res, "oid")))) { found_match = 1; - + /* - * update the dbi->age so that we ensure - * xid_wraparound won't happen - */ + * update the dbi->age so that we ensure + * xid_wraparound won't happen + */ dbi->age = atol(PQgetvalue(res, i, PQfnumber(res, "age"))); break; } } if (found_match == 0) - { /* then we didn't find this db_elem in the - * result set */ + { /* then we didn't find this db_elem in the + * result set */ Dlelem *elem_to_remove = db_elem; - + db_elem = DLGetSucc(db_elem); remove_db_from_list(elem_to_remove); } else db_elem = DLGetSucc(db_elem); - } /* Done removing dropped databases from - * the table_list */ - + } /* Done removing dropped databases from + * the table_list */ + /* - * Then loop use result set as outer loop and db_list as the inner - * loop to determine what databases are new - */ + * Then loop use result set as outer loop and db_list as the + * inner loop to determine what databases are new + */ for (i = 0; i < t; i++) { db_elem = DLGetHead(db_list); @@ -556,20 +556,20 @@ update_db_list(Dllist *db_list) } db_elem = DLGetSucc(db_elem); } - if (found_match == 0) /* then we didn't find this result - * now in the tbl_list */ + if (found_match == 0) /* then we didn't find this result + * now in the tbl_list */ { DLAddTail(db_list, DLNewElem(init_dbinfo - (PQgetvalue(res, i, PQfnumber(res, "datname")), - atooid(PQgetvalue(res, i, PQfnumber(res, "oid"))), - atol(PQgetvalue(res, i, PQfnumber(res, "age")))))); + (PQgetvalue(res, i, PQfnumber(res, "datname")), + atooid(PQgetvalue(res, i, PQfnumber(res, "oid"))), + atol(PQgetvalue(res, i, PQfnumber(res, "age")))))); if (args->debug >= 1) { sprintf(logbuffer, "added database: %s", ((db_info *) DLE_VAL(DLGetTail(db_list)))->dbname); log_entry(logbuffer); } } - } /* end of for loop that adds tables */ + } /* end of for loop that adds tables */ } fflush(LOGOUTPUT); PQclear(res); @@ -604,16 +604,14 @@ xid_wraparound_check(db_info * dbi) * 500million xacts to work with so we should be able to spread the * load of full database vacuums a bit */ - if (dbi->age > 1500000000 ) + if (dbi->age > 1500000000) { PGresult *res = NULL; res = send_query("VACUUM", dbi); /* FIXME: Perhaps should add a check for PQ_COMMAND_OK */ if (res != NULL) - { PQclear(res); - } return 1; } return 0; @@ -697,16 +695,16 @@ print_db_info(db_info * dbi, int print_tbl_list) { sprintf(logbuffer, "dbname: %s", (dbi->dbname) ? dbi->dbname : "(null)"); log_entry(logbuffer); - + sprintf(logbuffer, " oid: %u", dbi->oid); log_entry(logbuffer); - + sprintf(logbuffer, " username: %s", (dbi->username) ? dbi->username : "(null)"); log_entry(logbuffer); - + sprintf(logbuffer, " password: %s", (dbi->password) ? dbi->password : "(null)"); log_entry(logbuffer); - + if (dbi->conn != NULL) log_entry(" conn is valid, (connected)"); else @@ -714,10 +712,10 @@ print_db_info(db_info * dbi, int print_tbl_list) sprintf(logbuffer, " default_analyze_threshold: %li", dbi->analyze_threshold); log_entry(logbuffer); - + sprintf(logbuffer, " default_vacuum_threshold: %li", dbi->vacuum_threshold); log_entry(logbuffer); - + fflush(LOGOUTPUT); if (print_tbl_list > 0) print_table_list(dbi->table_list); @@ -1036,7 +1034,7 @@ main(int argc, char *argv[]) db_list = init_db_list(); if (db_list == NULL) return 1; - + if (check_stats_enabled(((db_info *) DLE_VAL(DLGetHead(db_list)))) != 0) { log_entry("Error: GUC variable stats_row_level must be enabled."); @@ -1096,38 +1094,40 @@ main(int argc, char *argv[]) if (res != NULL) { for (j = 0; j < PQntuples(res); j++) - { /* loop through result set */ - tbl_elem = DLGetHead(dbs->table_list); /* Reset tbl_elem to top - * of dbs->table_list */ + { /* loop through result set */ + tbl_elem = DLGetHead(dbs->table_list); /* Reset tbl_elem to top + * of dbs->table_list */ while (tbl_elem != NULL) - { /* Loop through tables in list */ - tbl = ((tbl_info *) DLE_VAL(tbl_elem)); /* set tbl_info = - * current_table */ + { /* Loop through tables in list */ + tbl = ((tbl_info *) DLE_VAL(tbl_elem)); /* set tbl_info = + * current_table */ if (tbl->relid == atooid(PQgetvalue(res, j, PQfnumber(res, "oid")))) { tbl->curr_analyze_count = (atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_ins"))) + - atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_upd"))) + - atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_del")))); + atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_upd"))) + + atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_del")))); tbl->curr_vacuum_count = (atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_del"))) + - atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_upd")))); - + atol(PQgetvalue(res, j, PQfnumber(res, "n_tup_upd")))); + /* - * Check numDeletes to see if we need to - * vacuum, if so: Run vacuum analyze - * (adding analyze is small so we might as - * well) Update table thresholds and - * related information if numDeletes is - * not big enough for vacuum then check - * numInserts for analyze - */ + * Check numDeletes to see if we need + * to vacuum, if so: Run vacuum + * analyze (adding analyze is small so + * we might as well) Update table + * thresholds and related information + * if numDeletes is not big enough for + * vacuum then check numInserts for + * analyze + */ if (tbl->curr_vacuum_count - tbl->CountAtLastVacuum >= tbl->vacuum_threshold) { /* - * if relisshared = t and database != - * template1 then only do an analyze - */ + * if relisshared = t and database + * != template1 then only do an + * analyze + */ if (tbl->relisshared > 0 && strcmp("template1", dbs->dbname)) snprintf(buf, sizeof(buf), "ANALYZE %s", tbl->table_name); else @@ -1157,19 +1157,20 @@ main(int argc, char *argv[]) if (args->debug >= 2) print_table_info(tbl); } - - break; /* once we have found a match, no - * need to keep checking. */ + + break; /* once we have found a + * match, no need to keep + * checking. */ } - + /* - * Advance the table pointers for the next - * loop - */ + * Advance the table pointers for the next + * loop + */ tbl_elem = DLGetSucc(tbl_elem); - - } /* end for table while loop */ - } /* end for j loop (tuples in PGresult) */ + + } /* end for table while loop */ + } /* end for j loop (tuples in PGresult) */ } /* end if (res != NULL) */ } /* close of if(xid_wraparound_check()) */ /* Done working on this db, Clean up, then advance cur_db */ diff --git a/contrib/pg_dumplo/utils.c b/contrib/pg_dumplo/utils.c index 352b50ac8d0..644781b79ce 100644 --- a/contrib/pg_dumplo/utils.c +++ b/contrib/pg_dumplo/utils.c @@ -1,7 +1,7 @@ /* ------------------------------------------------------------------------- * pg_dumplo * - * $PostgreSQL: pgsql/contrib/pg_dumplo/utils.c,v 1.8 2003/11/29 19:51:35 pgsql Exp $ + * $PostgreSQL: pgsql/contrib/pg_dumplo/utils.c,v 1.9 2004/08/29 05:06:36 momjian Exp $ * * Karel Zak 1999-2000 * ------------------------------------------------------------------------- @@ -30,7 +30,7 @@ void index_file(LODumpMaster * pgLO) { char path[BUFSIZ]; - int sz; + int sz; if (pgLO->action == ACTION_SHOW) return; @@ -51,7 +51,7 @@ index_file(LODumpMaster * pgLO) } sz = strlen(path); - strncat(path, "/lo_dump.index", BUFSIZ-sz); + strncat(path, "/lo_dump.index", BUFSIZ - sz); if ((pgLO->index = fopen(path, "w")) == NULL) { @@ -63,7 +63,7 @@ index_file(LODumpMaster * pgLO) else if (pgLO->action != ACTION_NONE) { sz = strlen(path); - strncat(path, "/lo_dump.index", BUFSIZ-sz); + strncat(path, "/lo_dump.index", BUFSIZ - sz); if ((pgLO->index = fopen(path, "r")) == NULL) { diff --git a/contrib/pg_trgm/trgm.h b/contrib/pg_trgm/trgm.h index a56edca1297..73c4cb6aa6f 100644 --- a/contrib/pg_trgm/trgm.h +++ b/contrib/pg_trgm/trgm.h @@ -11,53 +11,54 @@ #include "storage/bufpage.h" /* options */ -#define LPADDING 2 -#define RPADDING 1 +#define LPADDING 2 +#define RPADDING 1 #define KEEPONLYALNUM -#define IGNORECASE +#define IGNORECASE #define DIVUNION typedef char trgm[3]; #define CMPCHAR(a,b) ( ((a)==(b)) ? 0 : ( ((a)<(b)) ? -1 : 1 ) ) -#define CMPPCHAR(a,b,i) CMPCHAR( *(((char*)(a))+i), *(((char*)(b))+i) ) +#define CMPPCHAR(a,b,i) CMPCHAR( *(((char*)(a))+i), *(((char*)(b))+i) ) #define CMPTRGM(a,b) ( CMPPCHAR(a,b,0) ? CMPPCHAR(a,b,0) : ( CMPPCHAR(a,b,1) ? CMPPCHAR(a,b,1) : CMPPCHAR(a,b,2) ) ) -#define CPTRGM(a,b) do { \ +#define CPTRGM(a,b) do { \ *(((char*)(a))+0) = *(((char*)(b))+0); \ *(((char*)(a))+1) = *(((char*)(b))+1); \ *(((char*)(a))+2) = *(((char*)(b))+2); \ } while(0); -typedef struct { - int4 len; - uint8 flag; - char data[1]; -} TRGM; +typedef struct +{ + int4 len; + uint8 flag; + char data[1]; +} TRGM; -#define TRGMHRDSIZE (sizeof(int4)+sizeof(uint8)) +#define TRGMHRDSIZE (sizeof(int4)+sizeof(uint8)) /* gist */ #define BITBYTE 8 -#define SIGLENINT 3 /* >122 => key will toast, so very slow!!! */ -#define SIGLEN ( sizeof(int)*SIGLENINT ) +#define SIGLENINT 3 /* >122 => key will toast, so very slow!!! */ +#define SIGLEN ( sizeof(int)*SIGLENINT ) -#define SIGLENBIT (SIGLEN*BITBYTE - 1) /* see makesign */ +#define SIGLENBIT (SIGLEN*BITBYTE - 1) /* see makesign */ typedef char BITVEC[SIGLEN]; typedef char *BITVECP; #define LOOPBYTE(a) \ - for(i=0;i<SIGLEN;i++) {\ - a;\ - } + for(i=0;i<SIGLEN;i++) {\ + a;\ + } #define LOOPBIT(a) \ - for(i=0;i<SIGLENBIT;i++) {\ - a;\ - } + for(i=0;i<SIGLENBIT;i++) {\ + a;\ + } #define GETBYTE(x,i) ( *( (BITVECP)(x) + (int)( (i) / BITBYTE ) ) ) #define GETBITBYTE(x,i) ( ((char)(x)) >> i & 0x01 ) @@ -68,21 +69,21 @@ typedef char *BITVECP; #define HASHVAL(val) (((unsigned int)(val)) % SIGLENBIT) #define HASH(sign, val) SETBIT((sign), HASHVAL(val)) -#define ARRKEY 0x01 -#define SIGNKEY 0x02 -#define ALLISTRUE 0x04 - +#define ARRKEY 0x01 +#define SIGNKEY 0x02 +#define ALLISTRUE 0x04 + #define ISARRKEY(x) ( ((TRGM*)x)->flag & ARRKEY ) -#define ISSIGNKEY(x) ( ((TRGM*)x)->flag & SIGNKEY ) -#define ISALLTRUE(x) ( ((TRGM*)x)->flag & ALLISTRUE ) +#define ISSIGNKEY(x) ( ((TRGM*)x)->flag & SIGNKEY ) +#define ISALLTRUE(x) ( ((TRGM*)x)->flag & ALLISTRUE ) #define CALCGTSIZE(flag, len) ( TRGMHRDSIZE + ( ( (flag) & ARRKEY ) ? ((len)*sizeof(trgm)) : (((flag) & ALLISTRUE) ? 0 : SIGLEN) ) ) -#define GETSIGN(x) ( (BITVECP)( (char*)x+TRGMHRDSIZE ) ) -#define GETARR(x) ( (trgm*)( (char*)x+TRGMHRDSIZE ) ) +#define GETSIGN(x) ( (BITVECP)( (char*)x+TRGMHRDSIZE ) ) +#define GETARR(x) ( (trgm*)( (char*)x+TRGMHRDSIZE ) ) #define ARRNELEM(x) ( ( ((TRGM*)x)->len - TRGMHRDSIZE )/sizeof(trgm) ) extern float4 trgm_limit; -TRGM* generate_trgm(char *str, int slen); -float4 cnt_sml(TRGM *trg1, TRGM *trg2); +TRGM *generate_trgm(char *str, int slen); +float4 cnt_sml(TRGM * trg1, TRGM * trg2); #endif diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c index 6f30a442448..3ae8f4b3aa2 100644 --- a/contrib/pg_trgm/trgm_gist.c +++ b/contrib/pg_trgm/trgm_gist.c @@ -71,12 +71,13 @@ makesign(BITVECP sign, TRGM * a) int4 k, len = ARRNELEM(a); trgm *ptr = GETARR(a); - int4 tmp=0; + int4 tmp = 0; MemSet((void *) sign, 0, sizeof(BITVEC)); - SETBIT(sign, SIGLENBIT); /*set last unused bit*/ - for (k = 0; k < len; k++) { - CPTRGM( ((char*)&tmp), ptr+k ); + SETBIT(sign, SIGLENBIT); /* set last unused bit */ + for (k = 0; k < len; k++) + { + CPTRGM(((char *) &tmp), ptr + k); HASH(sign, tmp); } } @@ -89,7 +90,7 @@ gtrgm_compress(PG_FUNCTION_ARGS) if (entry->leafkey) { /* trgm */ - TRGM *res; + TRGM *res; text *toastedval = (text *) DatumGetPointer(entry->key); text *val = (text *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); @@ -107,7 +108,7 @@ gtrgm_compress(PG_FUNCTION_ARGS) { int4 i, len; - TRGM *res; + TRGM *res; BITVECP sign = GETSIGN(DatumGetPointer(entry->key)); LOOPBYTE( @@ -137,37 +138,45 @@ gtrgm_decompress(PG_FUNCTION_ARGS) Datum gtrgm_consistent(PG_FUNCTION_ARGS) { - text *query = (text *) PG_GETARG_TEXT_P(1); - TRGM *key = (TRGM *) DatumGetPointer( ((GISTENTRY *) PG_GETARG_POINTER(0))->key ); - TRGM *qtrg = generate_trgm(VARDATA(query), VARSIZE(query) - VARHDRSZ); - int res=false; - - if ( GIST_LEAF( (GISTENTRY *) PG_GETARG_POINTER(0) ) ) { /* all leafs contains orig trgm */ - float4 tmpsml = cnt_sml(key,qtrg); - /* strange bug at freebsd 5.2.1 and gcc 3.3.3 */ - res = ( *(int*)&tmpsml==*(int*)&trgm_limit || tmpsml > trgm_limit ) ? true : false; - } else if ( ISALLTRUE(key) ) { /* non-leaf contains signature */ + text *query = (text *) PG_GETARG_TEXT_P(1); + TRGM *key = (TRGM *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key); + TRGM *qtrg = generate_trgm(VARDATA(query), VARSIZE(query) - VARHDRSZ); + int res = false; + + if (GIST_LEAF((GISTENTRY *) PG_GETARG_POINTER(0))) + { /* all leafs contains orig trgm */ + float4 tmpsml = cnt_sml(key, qtrg); + + /* strange bug at freebsd 5.2.1 and gcc 3.3.3 */ + res = (*(int *) &tmpsml == *(int *) &trgm_limit || tmpsml > trgm_limit) ? true : false; + } + else if (ISALLTRUE(key)) + { /* non-leaf contains signature */ res = true; - } else { /* non-leaf contains signature */ - int4 count=0; - int4 k, len = ARRNELEM(qtrg); + } + else + { /* non-leaf contains signature */ + int4 count = 0; + int4 k, + len = ARRNELEM(qtrg); trgm *ptr = GETARR(qtrg); - BITVECP sign = GETSIGN(key); - int4 tmp=0; + BITVECP sign = GETSIGN(key); + int4 tmp = 0; - for (k = 0; k < len; k++) { - CPTRGM( ((char*)&tmp), ptr+k ); + for (k = 0; k < len; k++) + { + CPTRGM(((char *) &tmp), ptr + k); count += GETBIT(sign, HASHVAL(tmp)); } #ifdef DIVUNION - res = ( len==count ) ? true : ( ( ( ( ((float4)count) / ((float4)(len-count)) ) ) >= trgm_limit ) ? true : false ); + res = (len == count) ? true : ((((((float4) count) / ((float4) (len - count)))) >= trgm_limit) ? true : false); #else - res = (len==0) ? false : ( ( ( ( ((float4)count) / ((float4)len) ) ) >= trgm_limit ) ? true : false ); + res = (len == 0) ? false : ((((((float4) count) / ((float4) len))) >= trgm_limit) ? true : false); #endif } - PG_FREE_IF_COPY(query,1); - pfree(qtrg); + PG_FREE_IF_COPY(query, 1); + pfree(qtrg); PG_RETURN_BOOL(res); } @@ -191,10 +200,11 @@ unionkey(BITVECP sbase, TRGM * add) else { trgm *ptr = GETARR(add); - int4 tmp=0; + int4 tmp = 0; - for (i = 0; i < ARRNELEM(add); i++) { - CPTRGM( ((char*)&tmp), ptr+i ); + for (i = 0; i < ARRNELEM(add); i++) + { + CPTRGM(((char *) &tmp), ptr + i); HASH(sbase, tmp); } } @@ -205,13 +215,13 @@ unionkey(BITVECP sbase, TRGM * add) Datum gtrgm_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - int4 len = entryvec->n; + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + int4 len = entryvec->n; int *size = (int *) PG_GETARG_POINTER(1); BITVEC base; int4 i; int4 flag = 0; - TRGM *result; + TRGM *result; MemSet((void *) base, 0, sizeof(BITVEC)); for (i = 0; i < len; i++) @@ -237,8 +247,8 @@ gtrgm_union(PG_FUNCTION_ARGS) Datum gtrgm_same(PG_FUNCTION_ARGS) { - TRGM *a = (TRGM *) PG_GETARG_POINTER(0); - TRGM *b = (TRGM *) PG_GETARG_POINTER(1); + TRGM *a = (TRGM *) PG_GETARG_POINTER(0); + TRGM *b = (TRGM *) PG_GETARG_POINTER(1); bool *result = (bool *) PG_GETARG_POINTER(2); if (ISSIGNKEY(a)) @@ -280,7 +290,7 @@ gtrgm_same(PG_FUNCTION_ARGS) *result = true; for (i = 0; i < lena; i++) - if (CMPTRGM(ptra+i, ptrb+i)) + if (CMPTRGM(ptra + i, ptrb + i)) { *result = false; break; @@ -298,34 +308,39 @@ sizebitvec(BITVECP sign) i; LOOPBYTE( - size += SUMBIT(*(char *) sign); - sign = (BITVECP) (((char *) sign) + 1); + size += SUMBIT(*(char *) sign); + sign = (BITVECP) (((char *) sign) + 1); ); return size; } static int -hemdistsign(BITVECP a, BITVECP b) { - int i,dist=0; +hemdistsign(BITVECP a, BITVECP b) +{ + int i, + dist = 0; LOOPBIT( - if ( GETBIT(a,i) != GETBIT(b,i) ) + if (GETBIT(a, i) != GETBIT(b, i)) dist++; ); return dist; } static int -hemdist(TRGM *a, TRGM *b) { - if ( ISALLTRUE(a) ) { +hemdist(TRGM * a, TRGM * b) +{ + if (ISALLTRUE(a)) + { if (ISALLTRUE(b)) return 0; else - return SIGLENBIT-sizebitvec(GETSIGN(b)); - } else if (ISALLTRUE(b)) - return SIGLENBIT-sizebitvec(GETSIGN(a)); + return SIGLENBIT - sizebitvec(GETSIGN(b)); + } + else if (ISALLTRUE(b)) + return SIGLENBIT - sizebitvec(GETSIGN(a)); - return hemdistsign( GETSIGN(a), GETSIGN(b) ); + return hemdistsign(GETSIGN(a), GETSIGN(b)); } Datum @@ -334,23 +349,25 @@ gtrgm_penalty(PG_FUNCTION_ARGS) GISTENTRY *origentry = (GISTENTRY *) PG_GETARG_POINTER(0); /* always ISSIGNKEY */ GISTENTRY *newentry = (GISTENTRY *) PG_GETARG_POINTER(1); float *penalty = (float *) PG_GETARG_POINTER(2); - TRGM *origval = (TRGM *) DatumGetPointer(origentry->key); - TRGM *newval = (TRGM *) DatumGetPointer(newentry->key); + TRGM *origval = (TRGM *) DatumGetPointer(origentry->key); + TRGM *newval = (TRGM *) DatumGetPointer(newentry->key); BITVECP orig = GETSIGN(origval); *penalty = 0.0; - if (ISARRKEY(newval)) { - BITVEC sign; + if (ISARRKEY(newval)) + { + BITVEC sign; + makesign(sign, newval); - if ( ISALLTRUE(origval) ) - *penalty=((float)(SIGLENBIT-sizebitvec(sign)))/(float)(SIGLENBIT+1); - else - *penalty=hemdistsign(sign,orig); - } else { - *penalty=hemdist(origval,newval); + if (ISALLTRUE(origval)) + *penalty = ((float) (SIGLENBIT - sizebitvec(sign))) / (float) (SIGLENBIT + 1); + else + *penalty = hemdistsign(sign, orig); } + else + *penalty = hemdist(origval, newval); PG_RETURN_POINTER(penalty); } @@ -390,27 +407,30 @@ comparecost(const void *a, const void *b) static int -hemdistcache(CACHESIGN *a, CACHESIGN *b) { - if ( a->allistrue ) { +hemdistcache(CACHESIGN * a, CACHESIGN * b) +{ + if (a->allistrue) + { if (b->allistrue) return 0; else - return SIGLENBIT-sizebitvec(b->sign); - } else if (b->allistrue) - return SIGLENBIT-sizebitvec(a->sign); + return SIGLENBIT - sizebitvec(b->sign); + } + else if (b->allistrue) + return SIGLENBIT - sizebitvec(a->sign); - return hemdistsign( a->sign, b->sign ); + return hemdistsign(a->sign, b->sign); } Datum gtrgm_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); - OffsetNumber maxoff = entryvec->n - 2; + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + OffsetNumber maxoff = entryvec->n - 2; GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); OffsetNumber k, j; - TRGM *datum_l, + TRGM *datum_l, *datum_r; BITVECP union_l, union_r; @@ -435,13 +455,16 @@ gtrgm_picksplit(PG_FUNCTION_ARGS) cache = (CACHESIGN *) palloc(sizeof(CACHESIGN) * (maxoff + 2)); fillcache(&cache[FirstOffsetNumber], GETENTRY(entryvec, FirstOffsetNumber)); - for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) { - for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) { + for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) + { + for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) + { if (k == FirstOffsetNumber) fillcache(&cache[j], GETENTRY(entryvec, j)); - size_waste=hemdistcache(&(cache[j]),&(cache[k])); - if (size_waste > waste) { + size_waste = hemdistcache(&(cache[j]), &(cache[k])); + if (size_waste > waste) + { waste = size_waste; seed_1 = k; seed_2 = j; @@ -454,101 +477,124 @@ gtrgm_picksplit(PG_FUNCTION_ARGS) right = v->spl_right; v->spl_nright = 0; - if (seed_1 == 0 || seed_2 == 0) { + if (seed_1 == 0 || seed_2 == 0) + { seed_1 = 1; seed_2 = 2; } /* form initial .. */ - if (cache[seed_1].allistrue) { + if (cache[seed_1].allistrue) + { datum_l = (TRGM *) palloc(CALCGTSIZE(SIGNKEY | ALLISTRUE, 0)); datum_l->len = CALCGTSIZE(SIGNKEY | ALLISTRUE, 0); datum_l->flag = SIGNKEY | ALLISTRUE; - } else { + } + else + { datum_l = (TRGM *) palloc(CALCGTSIZE(SIGNKEY, 0)); datum_l->len = CALCGTSIZE(SIGNKEY, 0); datum_l->flag = SIGNKEY; memcpy((void *) GETSIGN(datum_l), (void *) cache[seed_1].sign, sizeof(BITVEC)); } - if (cache[seed_2].allistrue) { + if (cache[seed_2].allistrue) + { datum_r = (TRGM *) palloc(CALCGTSIZE(SIGNKEY | ALLISTRUE, 0)); datum_r->len = CALCGTSIZE(SIGNKEY | ALLISTRUE, 0); datum_r->flag = SIGNKEY | ALLISTRUE; - } else { + } + else + { datum_r = (TRGM *) palloc(CALCGTSIZE(SIGNKEY, 0)); datum_r->len = CALCGTSIZE(SIGNKEY, 0); datum_r->flag = SIGNKEY; memcpy((void *) GETSIGN(datum_r), (void *) cache[seed_2].sign, sizeof(BITVEC)); } - union_l=GETSIGN(datum_l); - union_r=GETSIGN(datum_r); + union_l = GETSIGN(datum_l); + union_r = GETSIGN(datum_r); maxoff = OffsetNumberNext(maxoff); fillcache(&cache[maxoff], GETENTRY(entryvec, maxoff)); /* sort before ... */ costvector = (SPLITCOST *) palloc(sizeof(SPLITCOST) * maxoff); - for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) { + for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) + { costvector[j - 1].pos = j; size_alpha = hemdistcache(&(cache[seed_1]), &(cache[j])); - size_beta = hemdistcache(&(cache[seed_2]), &(cache[j])); + size_beta = hemdistcache(&(cache[seed_2]), &(cache[j])); costvector[j - 1].cost = abs(size_alpha - size_beta); } qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost); - for (k = 0; k < maxoff; k++) { + for (k = 0; k < maxoff; k++) + { j = costvector[k].pos; - if (j == seed_1) { + if (j == seed_1) + { *left++ = j; v->spl_nleft++; continue; - } else if (j == seed_2) { + } + else if (j == seed_2) + { *right++ = j; v->spl_nright++; continue; } - if (ISALLTRUE(datum_l) || cache[j].allistrue) { - if ( ISALLTRUE(datum_l) && cache[j].allistrue ) - size_alpha=0; + if (ISALLTRUE(datum_l) || cache[j].allistrue) + { + if (ISALLTRUE(datum_l) && cache[j].allistrue) + size_alpha = 0; else - size_alpha = SIGLENBIT-sizebitvec( - ( cache[j].allistrue ) ? GETSIGN(datum_l) : GETSIGN(cache[j].sign) - ); - } else { - size_alpha=hemdistsign(cache[j].sign,GETSIGN(datum_l)); + size_alpha = SIGLENBIT - sizebitvec( + (cache[j].allistrue) ? GETSIGN(datum_l) : GETSIGN(cache[j].sign) + ); } + else + size_alpha = hemdistsign(cache[j].sign, GETSIGN(datum_l)); - if (ISALLTRUE(datum_r) || cache[j].allistrue) { - if ( ISALLTRUE(datum_r) && cache[j].allistrue ) - size_beta=0; + if (ISALLTRUE(datum_r) || cache[j].allistrue) + { + if (ISALLTRUE(datum_r) && cache[j].allistrue) + size_beta = 0; else - size_beta = SIGLENBIT-sizebitvec( - ( cache[j].allistrue ) ? GETSIGN(datum_r) : GETSIGN(cache[j].sign) - ); - } else { - size_beta=hemdistsign(cache[j].sign,GETSIGN(datum_r)); + size_beta = SIGLENBIT - sizebitvec( + (cache[j].allistrue) ? GETSIGN(datum_r) : GETSIGN(cache[j].sign) + ); } + else + size_beta = hemdistsign(cache[j].sign, GETSIGN(datum_r)); - if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.1)) { - if (ISALLTRUE(datum_l) || cache[j].allistrue) { - if (! ISALLTRUE(datum_l) ) + if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.1)) + { + if (ISALLTRUE(datum_l) || cache[j].allistrue) + { + if (!ISALLTRUE(datum_l)) MemSet((void *) GETSIGN(datum_l), 0xff, sizeof(BITVEC)); - } else { - ptr=cache[j].sign; + } + else + { + ptr = cache[j].sign; LOOPBYTE( - union_l[i] |= ptr[i]; + union_l[i] |= ptr[i]; ); } *left++ = j; v->spl_nleft++; - } else { - if (ISALLTRUE(datum_r) || cache[j].allistrue) { - if (! ISALLTRUE(datum_r) ) + } + else + { + if (ISALLTRUE(datum_r) || cache[j].allistrue) + { + if (!ISALLTRUE(datum_r)) MemSet((void *) GETSIGN(datum_r), 0xff, sizeof(BITVEC)); - } else { - ptr=cache[j].sign; + } + else + { + ptr = cache[j].sign; LOOPBYTE( - union_r[i] |= ptr[i]; + union_r[i] |= ptr[i]; ); } *right++ = j; diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c index 01ece90cdf5..57fb944a569 100644 --- a/contrib/pg_trgm/trgm_op.c +++ b/contrib/pg_trgm/trgm_op.c @@ -3,136 +3,162 @@ #include "utils/array.h" #include "catalog/pg_type.h" -float4 trgm_limit = 0.3; +float4 trgm_limit = 0.3; PG_FUNCTION_INFO_V1(set_limit); -Datum set_limit(PG_FUNCTION_ARGS); +Datum set_limit(PG_FUNCTION_ARGS); Datum -set_limit(PG_FUNCTION_ARGS) { - float4 nlimit = PG_GETARG_FLOAT4(0); - if ( nlimit < 0 || nlimit > 1.0 ) - elog(ERROR,"Wrong limit, should be between 0 and 1"); +set_limit(PG_FUNCTION_ARGS) +{ + float4 nlimit = PG_GETARG_FLOAT4(0); + + if (nlimit < 0 || nlimit > 1.0) + elog(ERROR, "Wrong limit, should be between 0 and 1"); trgm_limit = nlimit; PG_RETURN_FLOAT4(trgm_limit); } PG_FUNCTION_INFO_V1(show_limit); -Datum show_limit(PG_FUNCTION_ARGS); +Datum show_limit(PG_FUNCTION_ARGS); Datum -show_limit(PG_FUNCTION_ARGS) { +show_limit(PG_FUNCTION_ARGS) +{ PG_RETURN_FLOAT4(trgm_limit); } -#define WORDWAIT 0 -#define INWORD 1 +#define WORDWAIT 0 +#define INWORD 1 static int -comp_trgm(const void *a, const void *b) { - return CMPTRGM(a,b); +comp_trgm(const void *a, const void *b) +{ + return CMPTRGM(a, b); } static int -unique_array (trgm *a, int len) { - trgm *curend, *tmp; +unique_array(trgm * a, int len) +{ + trgm *curend, + *tmp; curend = tmp = a; while (tmp - a < len) - if ( CMPTRGM(tmp, curend) ) { + if (CMPTRGM(tmp, curend)) + { curend++; - CPTRGM(curend,tmp); + CPTRGM(curend, tmp); tmp++; - } else + } + else tmp++; - + return curend + 1 - a; } -TRGM* -generate_trgm(char *str, int slen) { - TRGM* trg; - char *buf,*sptr,*bufptr; - trgm *tptr; - int state=WORDWAIT; - int wl,len; +TRGM * +generate_trgm(char *str, int slen) +{ + TRGM *trg; + char *buf, + *sptr, + *bufptr; + trgm *tptr; + int state = WORDWAIT; + int wl, + len; - trg = (TRGM*) palloc(TRGMHRDSIZE+sizeof(trgm) * (slen/2 + 1) * 3); + trg = (TRGM *) palloc(TRGMHRDSIZE + sizeof(trgm) * (slen / 2 + 1) * 3); trg->flag = ARRKEY; trg->len = TRGMHRDSIZE; - if ( slen+LPADDING+RPADDING<3 || slen == 0 ) + if (slen + LPADDING + RPADDING < 3 || slen == 0) return trg; tptr = GETARR(trg); - buf = palloc(sizeof(char) * (slen+4)); + buf = palloc(sizeof(char) * (slen + 4)); sptr = str; - if ( LPADDING > 0 ) { + if (LPADDING > 0) + { *buf = ' '; - if ( LPADDING > 1 ) - *(buf+1) = ' '; + if (LPADDING > 1) + *(buf + 1) = ' '; } - bufptr = buf+LPADDING; - while( sptr-str < slen ) { - if ( state == WORDWAIT ) { - if ( + bufptr = buf + LPADDING; + while (sptr - str < slen) + { + if (state == WORDWAIT) + { + if ( #ifdef KEEPONLYALNUM - isalnum((unsigned char)*sptr) + isalnum((unsigned char) *sptr) #else - !isspace( (unsigned char)*sptr ) + !isspace((unsigned char) *sptr) #endif - ) { - *bufptr = *sptr; /* start put word in buffer */ + ) + { + *bufptr = *sptr; /* start put word in buffer */ bufptr++; state = INWORD; - if ( sptr-str == slen-1 /* last char */ ) + if (sptr - str == slen - 1 /* last char */ ) goto gettrg; } - } else { + } + else + { if ( #ifdef KEEPONLYALNUM - !isalnum((unsigned char)*sptr) + !isalnum((unsigned char) *sptr) #else - isspace( (unsigned char)*sptr ) + isspace((unsigned char) *sptr) #endif - ) { -gettrg: + ) + { + gettrg: /* word in buffer, so count trigrams */ *bufptr = ' '; - *(bufptr+1) = ' '; - wl = bufptr - (buf+LPADDING) - 2 + LPADDING + RPADDING; - if ( wl<=0 ) { - bufptr = buf+LPADDING; + *(bufptr + 1) = ' '; + wl = bufptr - (buf + LPADDING) - 2 + LPADDING + RPADDING; + if (wl <= 0) + { + bufptr = buf + LPADDING; state = WORDWAIT; sptr++; continue; } #ifdef IGNORECASE - do { /* lower word */ - int wwl = bufptr-buf; - bufptr = buf+LPADDING; - while( bufptr-buf < wwl ) { - *bufptr = tolower( (unsigned char) *bufptr ); + do + { /* lower word */ + int wwl = bufptr - buf; + + bufptr = buf + LPADDING; + while (bufptr - buf < wwl) + { + *bufptr = tolower((unsigned char) *bufptr); bufptr++; } - } while(0); + } while (0); #endif bufptr = buf; /* set trigrams */ - while( bufptr-buf < wl ) { + while (bufptr - buf < wl) + { CPTRGM(tptr, bufptr); bufptr++; tptr++; } - bufptr = buf+LPADDING; + bufptr = buf + LPADDING; state = WORDWAIT; - } else { - *bufptr = *sptr; /* put in buffer */ + } + else + { + *bufptr = *sptr; /* put in buffer */ bufptr++; - if ( sptr-str == slen-1 ) + if (sptr - str == slen - 1) goto gettrg; } } @@ -141,13 +167,14 @@ gettrg: pfree(buf); - if ( (len=tptr-GETARR(trg)) == 0 ) + if ((len = tptr - GETARR(trg)) == 0) return trg; - if ( len>0 ) { - qsort( (void*)GETARR(trg), len, sizeof(trgm), comp_trgm ); - len = unique_array( GETARR(trg), len ); - } + if (len > 0) + { + qsort((void *) GETARR(trg), len, sizeof(trgm), comp_trgm); + len = unique_array(GETARR(trg), len); + } trg->len = CALCGTSIZE(ARRKEY, len); @@ -156,68 +183,78 @@ gettrg: PG_FUNCTION_INFO_V1(show_trgm); -Datum show_trgm(PG_FUNCTION_ARGS); +Datum show_trgm(PG_FUNCTION_ARGS); Datum -show_trgm(PG_FUNCTION_ARGS) { - text *in = PG_GETARG_TEXT_P(0); - TRGM *trg; - Datum *d; - ArrayType *a; - trgm *ptr; +show_trgm(PG_FUNCTION_ARGS) +{ + text *in = PG_GETARG_TEXT_P(0); + TRGM *trg; + Datum *d; + ArrayType *a; + trgm *ptr; trg = generate_trgm(VARDATA(in), VARSIZE(in) - VARHDRSZ); - d = (Datum*)palloc( sizeof(Datum)*(1+ARRNELEM(trg)) ); + d = (Datum *) palloc(sizeof(Datum) * (1 + ARRNELEM(trg))); ptr = GETARR(trg); - while( ptr-GETARR(trg) < ARRNELEM(trg) ) { - text *item=(text*)palloc(VARHDRSZ + 3); - VARATT_SIZEP(item) = VARHDRSZ+3; + while (ptr - GETARR(trg) < ARRNELEM(trg)) + { + text *item = (text *) palloc(VARHDRSZ + 3); + + VARATT_SIZEP(item) = VARHDRSZ + 3; CPTRGM(VARDATA(item), ptr); - d[ ptr-GETARR(trg) ] = PointerGetDatum(item); + d[ptr - GETARR(trg)] = PointerGetDatum(item); ptr++; } a = construct_array( - d, - ARRNELEM(trg), - TEXTOID, - -1, - false, - 'i' - ); + d, + ARRNELEM(trg), + TEXTOID, + -1, + false, + 'i' + ); ptr = GETARR(trg); - while( ptr-GETARR(trg) < ARRNELEM(trg) ) { - pfree(DatumGetPointer(d[ ptr-GETARR(trg) ])); + while (ptr - GETARR(trg) < ARRNELEM(trg)) + { + pfree(DatumGetPointer(d[ptr - GETARR(trg)])); ptr++; } - + pfree(d); pfree(trg); - PG_FREE_IF_COPY(in,0); + PG_FREE_IF_COPY(in, 0); PG_RETURN_POINTER(a); } float4 -cnt_sml(TRGM *trg1, TRGM *trg2) { - trgm *ptr1, *ptr2; - int count=0; - int len1, len2; - +cnt_sml(TRGM * trg1, TRGM * trg2) +{ + trgm *ptr1, + *ptr2; + int count = 0; + int len1, + len2; + ptr1 = GETARR(trg1); ptr2 = GETARR(trg2); - + len1 = ARRNELEM(trg1); len2 = ARRNELEM(trg2); - while( ptr1 - GETARR(trg1) < len1 && ptr2 - GETARR(trg2) < len2 ) { - int res = CMPTRGM(ptr1,ptr2); - if ( res < 0 ) { + while (ptr1 - GETARR(trg1) < len1 && ptr2 - GETARR(trg2) < len2) + { + int res = CMPTRGM(ptr1, ptr2); + + if (res < 0) ptr1++; - } else if ( res > 0 ) { + else if (res > 0) ptr2++; - } else { + else + { ptr1++; ptr2++; count++; @@ -225,45 +262,47 @@ cnt_sml(TRGM *trg1, TRGM *trg2) { } #ifdef DIVUNION - return ( ( ((float4)count) / ((float4)(len1+len2-count)) ) ); + return ((((float4) count) / ((float4) (len1 + len2 - count)))); #else - return ( ((float)count) / ((float)( (len1>len2) ? len1 : len2 )) ); + return (((float) count) / ((float) ((len1 > len2) ? len1 : len2))); #endif } PG_FUNCTION_INFO_V1(similarity); -Datum similarity(PG_FUNCTION_ARGS); +Datum similarity(PG_FUNCTION_ARGS); Datum -similarity(PG_FUNCTION_ARGS) { - text *in1 = PG_GETARG_TEXT_P(0); - text *in2 = PG_GETARG_TEXT_P(1); - TRGM *trg1, *trg2; - float4 res; +similarity(PG_FUNCTION_ARGS) +{ + text *in1 = PG_GETARG_TEXT_P(0); + text *in2 = PG_GETARG_TEXT_P(1); + TRGM *trg1, + *trg2; + float4 res; trg1 = generate_trgm(VARDATA(in1), VARSIZE(in1) - VARHDRSZ); trg2 = generate_trgm(VARDATA(in2), VARSIZE(in2) - VARHDRSZ); - res = cnt_sml(trg1,trg2); - + res = cnt_sml(trg1, trg2); + pfree(trg1); pfree(trg2); - PG_FREE_IF_COPY(in1,0); - PG_FREE_IF_COPY(in2,1); - + PG_FREE_IF_COPY(in1, 0); + PG_FREE_IF_COPY(in2, 1); + PG_RETURN_FLOAT4(res); } PG_FUNCTION_INFO_V1(similarity_op); -Datum similarity_op(PG_FUNCTION_ARGS); +Datum similarity_op(PG_FUNCTION_ARGS); Datum -similarity_op(PG_FUNCTION_ARGS) { - float4 res=DatumGetFloat4( DirectFunctionCall2( - similarity, - PG_GETARG_DATUM(0), - PG_GETARG_DATUM(1) - ) ); - PG_RETURN_BOOL( res >= trgm_limit ); +similarity_op(PG_FUNCTION_ARGS) +{ + float4 res = DatumGetFloat4(DirectFunctionCall2( + similarity, + PG_GETARG_DATUM(0), + PG_GETARG_DATUM(1) + )); + + PG_RETURN_BOOL(res >= trgm_limit); } - - diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c index 8fc8273cc8f..cf1880f242d 100644 --- a/contrib/pgbench/pgbench.c +++ b/contrib/pgbench/pgbench.c @@ -1,5 +1,5 @@ /* - * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.31 2004/06/14 11:00:12 ishii Exp $ + * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.32 2004/08/29 05:06:36 momjian Exp $ * * pgbench: a simple TPC-B like benchmark program for PostgreSQL * written by Tatsuo Ishii @@ -261,7 +261,7 @@ doOne(CState * state, int n, int debug, int ttype) */ if (use_log) { - double diff; + double diff; struct timeval now; gettimeofday(&now, 0); @@ -492,7 +492,7 @@ init(void) static char *DDLAFTERs[] = { "alter table branches add primary key (bid)", "alter table tellers add primary key (tid)", - "alter table accounts add primary key (aid)"}; + "alter table accounts add primary key (aid)"}; char sql[256]; diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index 98b4cc3d519..bfe37eb9ed6 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -1,5 +1,5 @@ /* - * $PostgreSQL: pgsql/contrib/pgstattuple/pgstattuple.c,v 1.15 2004/05/08 19:09:24 tgl Exp $ + * $PostgreSQL: pgsql/contrib/pgstattuple/pgstattuple.c,v 1.16 2004/08/29 05:06:37 momjian Exp $ * * Copyright (c) 2001,2002 Tatsuo Ishii * @@ -129,7 +129,7 @@ pgstattuple_real(Relation rel) scan = heap_beginscan(rel, SnapshotAny, 0, NULL); - nblocks = scan->rs_nblocks; /* # blocks to be scanned */ + nblocks = scan->rs_nblocks; /* # blocks to be scanned */ /* scan the relation */ while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) @@ -189,9 +189,9 @@ pgstattuple_real(Relation rel) } /* - * Prepare a values array for constructing the tuple. This should be an - * array of C strings which will be processed later by the appropriate - * "in" functions. + * Prepare a values array for constructing the tuple. This should be + * an array of C strings which will be processed later by the + * appropriate "in" functions. */ values = (char **) palloc(NCOLUMNS * sizeof(char *)); for (i = 0; i < NCOLUMNS; i++) diff --git a/contrib/rtree_gist/rtree_gist.c b/contrib/rtree_gist/rtree_gist.c index eee6c1e6b66..92714522d80 100644 --- a/contrib/rtree_gist/rtree_gist.c +++ b/contrib/rtree_gist/rtree_gist.c @@ -7,7 +7,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/contrib/rtree_gist/rtree_gist.c,v 1.9 2004/03/30 15:45:33 teodor Exp $ + * $PostgreSQL: pgsql/contrib/rtree_gist/rtree_gist.c,v 1.10 2004/08/29 05:06:37 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -98,7 +98,7 @@ gbox_consistent(PG_FUNCTION_ARGS) Datum gbox_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); int *sizep = (int *) PG_GETARG_POINTER(1); int numranges, i; @@ -186,7 +186,7 @@ compare_KB(const void *a, const void *b) Datum gbox_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); OffsetNumber i; OffsetNumber *listL, diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c index 39f006291ed..e86772ab2b8 100644 --- a/contrib/seg/seg.c +++ b/contrib/seg/seg.c @@ -240,7 +240,7 @@ gseg_union(GistEntryVector *entryvec, int *sizep) for (i = 1; i < numranges; i++) { out = gseg_binary_union(tmp, (SEG *) - DatumGetPointer(entryvec->vector[i].key), + DatumGetPointer(entryvec->vector[i].key), sizep); if (i > 1) pfree(tmp); @@ -755,8 +755,8 @@ seg_cmp(SEG * a, SEG * b) * a->lower == b->lower, so consider type of boundary. * * A '-' lower bound is < any other kind (this could only be relevant if - * -HUGE_VAL is used as a regular data value). A '<' lower bound is < any - * other kind except '-'. A '>' lower bound is > any other kind. + * -HUGE_VAL is used as a regular data value). A '<' lower bound is < + * any other kind except '-'. A '>' lower bound is > any other kind. */ if (a->l_ext != b->l_ext) { @@ -813,8 +813,8 @@ seg_cmp(SEG * a, SEG * b) * a->upper == b->upper, so consider type of boundary. * * A '-' upper bound is > any other kind (this could only be relevant if - * HUGE_VAL is used as a regular data value). A '<' upper bound is < any - * other kind. A '>' upper bound is > any other kind except '-'. + * HUGE_VAL is used as a regular data value). A '<' upper bound is < + * any other kind. A '>' upper bound is > any other kind except '-'. */ if (a->u_ext != b->u_ext) { diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c index 02570a403d7..d20b101e7ab 100644 --- a/contrib/spi/timetravel.c +++ b/contrib/spi/timetravel.c @@ -309,7 +309,7 @@ timetravel(PG_FUNCTION_ARGS) void *pplan; Oid *ctypes; char sql[8192]; - char separ=' '; + char separ = ' '; /* allocate ctypes for preparation */ ctypes = (Oid *) palloc(natts * sizeof(Oid)); @@ -323,8 +323,8 @@ timetravel(PG_FUNCTION_ARGS) ctypes[i - 1] = SPI_gettypeid(tupdesc, i); if (!(tupdesc->attrs[i - 1]->attisdropped)) /* skip dropped columns */ { - snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), "%c$%d", separ,i); - separ = ','; + snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), "%c$%d", separ, i); + separ = ','; } } snprintf(sql + strlen(sql), sizeof(sql) - strlen(sql), ")"); diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index be427219e2f..212be222cbd 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -875,7 +875,7 @@ get_crosstab_tuplestore(char *sql, /* no qualifying category tuples */ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("provided \"categories\" SQL must " \ + errmsg("provided \"categories\" SQL must " \ "return 1 column of at least one row"))); } diff --git a/contrib/tsearch/gistidx.c b/contrib/tsearch/gistidx.c index 5b00960e1cb..46d516904b7 100644 --- a/contrib/tsearch/gistidx.c +++ b/contrib/tsearch/gistidx.c @@ -326,10 +326,11 @@ unionkey(BITVECP sbase, GISTTYPE * add) Datum gtxtidx_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); int *size = (int *) PG_GETARG_POINTER(1); BITVEC base; - int4 i,len; + int4 i, + len; int4 flag = 0; GISTTYPE *result; @@ -512,7 +513,7 @@ comparecost(const void *a, const void *b) Datum gtxtidx_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); OffsetNumber k, j; diff --git a/contrib/tsearch/query.c b/contrib/tsearch/query.c index c0e2cae9165..80e55569941 100644 --- a/contrib/tsearch/query.c +++ b/contrib/tsearch/query.c @@ -241,16 +241,20 @@ pushval_morph(QPRS_STATE * state, int typeval, char *strval, int lenval) lemm = lemmatize(token, &lenlemm, type); if (lemm) { - if ( lemm==token ) { - char *ptrs=token,*ptrd; - ptrd = lemm = palloc(lenlemm+1); - while(ptrs-token<lenlemm) { + if (lemm == token) + { + char *ptrs = token, + *ptrd; + + ptrd = lemm = palloc(lenlemm + 1); + while (ptrs - token < lenlemm) + { *ptrd = tolower((unsigned char) *ptrs); ptrs++; ptrd++; } - *ptrd='\0'; - } + *ptrd = '\0'; + } pushval_asis(state, VAL, lemm, lenlemm); pfree(lemm); } diff --git a/contrib/tsearch2/common.c b/contrib/tsearch2/common.c index b51c3e05833..4984c3d256b 100644 --- a/contrib/tsearch2/common.c +++ b/contrib/tsearch2/common.c @@ -21,7 +21,7 @@ #include "dict.h" -Oid TSNSP_FunctionOid = InvalidOid; +Oid TSNSP_FunctionOid = InvalidOid; text * @@ -121,44 +121,45 @@ text_cmp(text *a, text *b) } -char* -get_namespace(Oid funcoid) { - HeapTuple tuple; - Form_pg_proc proc; - Form_pg_namespace nsp; - Oid nspoid; - char *txt; - - tuple = SearchSysCache(PROCOID, ObjectIdGetDatum(funcoid), 0, 0, 0); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for proc oid %u", funcoid); - proc=(Form_pg_proc) GETSTRUCT(tuple); - nspoid = proc->pronamespace; - ReleaseSysCache(tuple); - - tuple = SearchSysCache(NAMESPACEOID, ObjectIdGetDatum(nspoid), 0, 0, 0); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for namespace oid %u", nspoid); - nsp = (Form_pg_namespace) GETSTRUCT(tuple); - txt = pstrdup( NameStr((nsp->nspname)) ); - ReleaseSysCache(tuple); - - return txt; +char * +get_namespace(Oid funcoid) +{ + HeapTuple tuple; + Form_pg_proc proc; + Form_pg_namespace nsp; + Oid nspoid; + char *txt; + + tuple = SearchSysCache(PROCOID, ObjectIdGetDatum(funcoid), 0, 0, 0); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for proc oid %u", funcoid); + proc = (Form_pg_proc) GETSTRUCT(tuple); + nspoid = proc->pronamespace; + ReleaseSysCache(tuple); + + tuple = SearchSysCache(NAMESPACEOID, ObjectIdGetDatum(nspoid), 0, 0, 0); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for namespace oid %u", nspoid); + nsp = (Form_pg_namespace) GETSTRUCT(tuple); + txt = pstrdup(NameStr((nsp->nspname))); + ReleaseSysCache(tuple); + + return txt; } Oid -get_oidnamespace(Oid funcoid) { - HeapTuple tuple; - Form_pg_proc proc; - Oid nspoid; - - tuple = SearchSysCache(PROCOID, ObjectIdGetDatum(funcoid), 0, 0, 0); - if (!HeapTupleIsValid(tuple)) - elog(ERROR, "cache lookup failed for proc oid %u", funcoid); - proc=(Form_pg_proc) GETSTRUCT(tuple); - nspoid = proc->pronamespace; - ReleaseSysCache(tuple); - - return nspoid; +get_oidnamespace(Oid funcoid) +{ + HeapTuple tuple; + Form_pg_proc proc; + Oid nspoid; + + tuple = SearchSysCache(PROCOID, ObjectIdGetDatum(funcoid), 0, 0, 0); + if (!HeapTupleIsValid(tuple)) + elog(ERROR, "cache lookup failed for proc oid %u", funcoid); + proc = (Form_pg_proc) GETSTRUCT(tuple); + nspoid = proc->pronamespace; + ReleaseSysCache(tuple); + + return nspoid; } - diff --git a/contrib/tsearch2/common.h b/contrib/tsearch2/common.h index 6720598f817..c84e841e15e 100644 --- a/contrib/tsearch2/common.h +++ b/contrib/tsearch2/common.h @@ -21,13 +21,14 @@ int text_cmp(text *a, text *b); void ts_error(int state, const char *format,...); -extern Oid TSNSP_FunctionOid; /* oid of called function, needed only for determ namespace, no more */ -char* get_namespace(Oid funcoid); -Oid get_oidnamespace(Oid funcoid); - -#define SET_FUNCOID() do { \ - if ( fcinfo->flinfo && fcinfo->flinfo->fn_oid != InvalidOid ) \ - TSNSP_FunctionOid = fcinfo->flinfo->fn_oid; \ +extern Oid TSNSP_FunctionOid; /* oid of called function, needed only for + * determ namespace, no more */ +char *get_namespace(Oid funcoid); +Oid get_oidnamespace(Oid funcoid); + +#define SET_FUNCOID() do { \ + if ( fcinfo->flinfo && fcinfo->flinfo->fn_oid != InvalidOid ) \ + TSNSP_FunctionOid = fcinfo->flinfo->fn_oid; \ } while(0) #endif diff --git a/contrib/tsearch2/dict.c b/contrib/tsearch2/dict.c index 7be406da9ed..357097681e5 100644 --- a/contrib/tsearch2/dict.c +++ b/contrib/tsearch2/dict.c @@ -26,18 +26,18 @@ init_dict(Oid id, DictInfo * dict) bool isnull; Datum pars[1]; int stat; - void *plan; - char buf[1024]; - char *nsp = get_namespace(TSNSP_FunctionOid); + void *plan; + char buf[1024]; + char *nsp = get_namespace(TSNSP_FunctionOid); arg[0] = OIDOID; pars[0] = ObjectIdGetDatum(id); memset(dict, 0, sizeof(DictInfo)); SPI_connect(); - sprintf(buf,"select dict_init, dict_initoption, dict_lexize from %s.pg_ts_dict where oid = $1", nsp); + sprintf(buf, "select dict_init, dict_initoption, dict_lexize from %s.pg_ts_dict where oid = $1", nsp); pfree(nsp); - plan= SPI_prepare(buf, 1, arg); + plan = SPI_prepare(buf, 1, arg); if (!plan) ts_error(ERROR, "SPI_prepare() failed"); @@ -142,8 +142,9 @@ name2id_dict(text *name) Datum pars[1]; int stat; Oid id = findSNMap_t(&(DList.name2id_map), name); - void *plan; - char buf[1024], *nsp; + void *plan; + char buf[1024], + *nsp; arg[0] = TEXTOID; pars[0] = PointerGetDatum(name); @@ -153,9 +154,9 @@ name2id_dict(text *name) nsp = get_namespace(TSNSP_FunctionOid); SPI_connect(); - sprintf(buf,"select oid from %s.pg_ts_dict where dict_name = $1", nsp); + sprintf(buf, "select oid from %s.pg_ts_dict where dict_name = $1", nsp); pfree(nsp); - plan= SPI_prepare(buf, 1, arg); + plan = SPI_prepare(buf, 1, arg); if (!plan) ts_error(ERROR, "SPI_prepare() failed"); @@ -245,7 +246,8 @@ lexize_byname(PG_FUNCTION_ARGS) { text *dictname = PG_GETARG_TEXT_P(0); Datum res; - SET_FUNCOID(); + + SET_FUNCOID(); res = DirectFunctionCall3( lexize, @@ -267,7 +269,7 @@ Datum set_curdict(PG_FUNCTION_ARGS); Datum set_curdict(PG_FUNCTION_ARGS) { - SET_FUNCOID(); + SET_FUNCOID(); finddict(PG_GETARG_OID(0)); currect_dictionary_id = PG_GETARG_OID(0); PG_RETURN_VOID(); @@ -279,7 +281,8 @@ Datum set_curdict_byname(PG_FUNCTION_ARGS) { text *dictname = PG_GETARG_TEXT_P(0); - SET_FUNCOID(); + + SET_FUNCOID(); DirectFunctionCall1( set_curdict, ObjectIdGetDatum(name2id_dict(dictname)) @@ -294,7 +297,8 @@ Datum lexize_bycurrent(PG_FUNCTION_ARGS) { Datum res; - SET_FUNCOID(); + + SET_FUNCOID(); if (currect_dictionary_id == 0) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), diff --git a/contrib/tsearch2/gistidx.c b/contrib/tsearch2/gistidx.c index 6f9475a0c79..f0a9d8828d9 100644 --- a/contrib/tsearch2/gistidx.c +++ b/contrib/tsearch2/gistidx.c @@ -123,8 +123,8 @@ gtsvector_compress(PG_FUNCTION_ARGS) if (entry->leafkey) { /* tsvector */ GISTTYPE *res; - tsvector *toastedval = (tsvector *) DatumGetPointer(entry->key); - tsvector *val = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); + tsvector *toastedval = (tsvector *) DatumGetPointer(entry->key); + tsvector *val = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); int4 len; int4 *arr; WordEntry *ptr = ARRPTR(val); @@ -277,10 +277,10 @@ gtsvector_consistent(PG_FUNCTION_ARGS) PG_RETURN_BOOL(true); PG_RETURN_BOOL(TS_execute( - GETQUERY(query), - (void *) GETSIGN(key), false, - checkcondition_bit - )); + GETQUERY(query), + (void *) GETSIGN(key), false, + checkcondition_bit + )); } else { /* only leaf pages */ @@ -289,10 +289,10 @@ gtsvector_consistent(PG_FUNCTION_ARGS) chkval.arrb = GETARR(key); chkval.arre = chkval.arrb + ARRNELEM(key); PG_RETURN_BOOL(TS_execute( - GETQUERY(query), - (void *) &chkval, true, - checkcondition_arr - )); + GETQUERY(query), + (void *) &chkval, true, + checkcondition_arr + )); } } @@ -326,10 +326,11 @@ unionkey(BITVECP sbase, GISTTYPE * add) Datum gtsvector_union(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); int *size = (int *) PG_GETARG_POINTER(1); BITVEC base; - int4 i,len; + int4 i, + len; int4 flag = 0; GISTTYPE *result; @@ -418,34 +419,39 @@ sizebitvec(BITVECP sign) i; LOOPBYTE( - size += SUMBIT(*(char *) sign); - sign = (BITVECP) (((char *) sign) + 1); + size += SUMBIT(*(char *) sign); + sign = (BITVECP) (((char *) sign) + 1); ); return size; } static int -hemdistsign(BITVECP a, BITVECP b) { - int i,dist=0; +hemdistsign(BITVECP a, BITVECP b) +{ + int i, + dist = 0; LOOPBIT( - if ( GETBIT(a,i) != GETBIT(b,i) ) + if (GETBIT(a, i) != GETBIT(b, i)) dist++; ); return dist; } static int -hemdist(GISTTYPE *a, GISTTYPE *b) { - if ( ISALLTRUE(a) ) { +hemdist(GISTTYPE * a, GISTTYPE * b) +{ + if (ISALLTRUE(a)) + { if (ISALLTRUE(b)) return 0; else - return SIGLENBIT-sizebitvec(GETSIGN(b)); - } else if (ISALLTRUE(b)) - return SIGLENBIT-sizebitvec(GETSIGN(a)); + return SIGLENBIT - sizebitvec(GETSIGN(b)); + } + else if (ISALLTRUE(b)) + return SIGLENBIT - sizebitvec(GETSIGN(a)); - return hemdistsign( GETSIGN(a), GETSIGN(b) ); + return hemdistsign(GETSIGN(a), GETSIGN(b)); } Datum @@ -460,17 +466,19 @@ gtsvector_penalty(PG_FUNCTION_ARGS) *penalty = 0.0; - if (ISARRKEY(newval)) { - BITVEC sign; + if (ISARRKEY(newval)) + { + BITVEC sign; + makesign(sign, newval); - if ( ISALLTRUE(origval) ) - *penalty=((float)(SIGLENBIT-sizebitvec(sign)))/(float)(SIGLENBIT+1); - else - *penalty=hemdistsign(sign,orig); - } else { - *penalty=hemdist(origval,newval); + if (ISALLTRUE(origval)) + *penalty = ((float) (SIGLENBIT - sizebitvec(sign))) / (float) (SIGLENBIT + 1); + else + *penalty = hemdistsign(sign, orig); } + else + *penalty = hemdist(origval, newval); PG_RETURN_POINTER(penalty); } @@ -510,22 +518,25 @@ comparecost(const void *a, const void *b) static int -hemdistcache(CACHESIGN *a, CACHESIGN *b) { - if ( a->allistrue ) { +hemdistcache(CACHESIGN * a, CACHESIGN * b) +{ + if (a->allistrue) + { if (b->allistrue) return 0; else - return SIGLENBIT-sizebitvec(b->sign); - } else if (b->allistrue) - return SIGLENBIT-sizebitvec(a->sign); + return SIGLENBIT - sizebitvec(b->sign); + } + else if (b->allistrue) + return SIGLENBIT - sizebitvec(a->sign); - return hemdistsign( a->sign, b->sign ); + return hemdistsign(a->sign, b->sign); } Datum gtsvector_picksplit(PG_FUNCTION_ARGS) { - GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); + GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0); GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1); OffsetNumber k, j; @@ -556,13 +567,16 @@ gtsvector_picksplit(PG_FUNCTION_ARGS) cache = (CACHESIGN *) palloc(sizeof(CACHESIGN) * (maxoff + 2)); fillcache(&cache[FirstOffsetNumber], GETENTRY(entryvec, FirstOffsetNumber)); - for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) { - for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) { + for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k)) + { + for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) + { if (k == FirstOffsetNumber) fillcache(&cache[j], GETENTRY(entryvec, j)); - size_waste=hemdistcache(&(cache[j]),&(cache[k])); - if (size_waste > waste) { + size_waste = hemdistcache(&(cache[j]), &(cache[k])); + if (size_waste > waste) + { waste = size_waste; seed_1 = k; seed_2 = j; @@ -575,101 +589,124 @@ gtsvector_picksplit(PG_FUNCTION_ARGS) right = v->spl_right; v->spl_nright = 0; - if (seed_1 == 0 || seed_2 == 0) { + if (seed_1 == 0 || seed_2 == 0) + { seed_1 = 1; seed_2 = 2; } /* form initial .. */ - if (cache[seed_1].allistrue) { + if (cache[seed_1].allistrue) + { datum_l = (GISTTYPE *) palloc(CALCGTSIZE(SIGNKEY | ALLISTRUE, 0)); datum_l->len = CALCGTSIZE(SIGNKEY | ALLISTRUE, 0); datum_l->flag = SIGNKEY | ALLISTRUE; - } else { + } + else + { datum_l = (GISTTYPE *) palloc(CALCGTSIZE(SIGNKEY, 0)); datum_l->len = CALCGTSIZE(SIGNKEY, 0); datum_l->flag = SIGNKEY; memcpy((void *) GETSIGN(datum_l), (void *) cache[seed_1].sign, sizeof(BITVEC)); } - if (cache[seed_2].allistrue) { + if (cache[seed_2].allistrue) + { datum_r = (GISTTYPE *) palloc(CALCGTSIZE(SIGNKEY | ALLISTRUE, 0)); datum_r->len = CALCGTSIZE(SIGNKEY | ALLISTRUE, 0); datum_r->flag = SIGNKEY | ALLISTRUE; - } else { + } + else + { datum_r = (GISTTYPE *) palloc(CALCGTSIZE(SIGNKEY, 0)); datum_r->len = CALCGTSIZE(SIGNKEY, 0); datum_r->flag = SIGNKEY; memcpy((void *) GETSIGN(datum_r), (void *) cache[seed_2].sign, sizeof(BITVEC)); } - union_l=GETSIGN(datum_l); - union_r=GETSIGN(datum_r); + union_l = GETSIGN(datum_l); + union_r = GETSIGN(datum_r); maxoff = OffsetNumberNext(maxoff); fillcache(&cache[maxoff], GETENTRY(entryvec, maxoff)); /* sort before ... */ costvector = (SPLITCOST *) palloc(sizeof(SPLITCOST) * maxoff); - for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) { + for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) + { costvector[j - 1].pos = j; size_alpha = hemdistcache(&(cache[seed_1]), &(cache[j])); - size_beta = hemdistcache(&(cache[seed_2]), &(cache[j])); + size_beta = hemdistcache(&(cache[seed_2]), &(cache[j])); costvector[j - 1].cost = abs(size_alpha - size_beta); } qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost); - for (k = 0; k < maxoff; k++) { + for (k = 0; k < maxoff; k++) + { j = costvector[k].pos; - if (j == seed_1) { + if (j == seed_1) + { *left++ = j; v->spl_nleft++; continue; - } else if (j == seed_2) { + } + else if (j == seed_2) + { *right++ = j; v->spl_nright++; continue; } - if (ISALLTRUE(datum_l) || cache[j].allistrue) { - if ( ISALLTRUE(datum_l) && cache[j].allistrue ) - size_alpha=0; + if (ISALLTRUE(datum_l) || cache[j].allistrue) + { + if (ISALLTRUE(datum_l) && cache[j].allistrue) + size_alpha = 0; else - size_alpha = SIGLENBIT-sizebitvec( - ( cache[j].allistrue ) ? GETSIGN(datum_l) : GETSIGN(cache[j].sign) - ); - } else { - size_alpha=hemdistsign(cache[j].sign,GETSIGN(datum_l)); + size_alpha = SIGLENBIT - sizebitvec( + (cache[j].allistrue) ? GETSIGN(datum_l) : GETSIGN(cache[j].sign) + ); } + else + size_alpha = hemdistsign(cache[j].sign, GETSIGN(datum_l)); - if (ISALLTRUE(datum_r) || cache[j].allistrue) { - if ( ISALLTRUE(datum_r) && cache[j].allistrue ) - size_beta=0; + if (ISALLTRUE(datum_r) || cache[j].allistrue) + { + if (ISALLTRUE(datum_r) && cache[j].allistrue) + size_beta = 0; else - size_beta = SIGLENBIT-sizebitvec( - ( cache[j].allistrue ) ? GETSIGN(datum_r) : GETSIGN(cache[j].sign) - ); - } else { - size_beta=hemdistsign(cache[j].sign,GETSIGN(datum_r)); + size_beta = SIGLENBIT - sizebitvec( + (cache[j].allistrue) ? GETSIGN(datum_r) : GETSIGN(cache[j].sign) + ); } + else + size_beta = hemdistsign(cache[j].sign, GETSIGN(datum_r)); - if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.1)) { - if (ISALLTRUE(datum_l) || cache[j].allistrue) { - if (! ISALLTRUE(datum_l) ) + if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.1)) + { + if (ISALLTRUE(datum_l) || cache[j].allistrue) + { + if (!ISALLTRUE(datum_l)) MemSet((void *) GETSIGN(datum_l), 0xff, sizeof(BITVEC)); - } else { - ptr=cache[j].sign; + } + else + { + ptr = cache[j].sign; LOOPBYTE( - union_l[i] |= ptr[i]; + union_l[i] |= ptr[i]; ); } *left++ = j; v->spl_nleft++; - } else { - if (ISALLTRUE(datum_r) || cache[j].allistrue) { - if (! ISALLTRUE(datum_r) ) + } + else + { + if (ISALLTRUE(datum_r) || cache[j].allistrue) + { + if (!ISALLTRUE(datum_r)) MemSet((void *) GETSIGN(datum_r), 0xff, sizeof(BITVEC)); - } else { - ptr=cache[j].sign; + } + else + { + ptr = cache[j].sign; LOOPBYTE( - union_r[i] |= ptr[i]; + union_r[i] |= ptr[i]; ); } *right++ = j; diff --git a/contrib/tsearch2/ispell/regis.c b/contrib/tsearch2/ispell/regis.c index 052413788b0..996417b18a9 100644 --- a/contrib/tsearch2/ispell/regis.c +++ b/contrib/tsearch2/ispell/regis.c @@ -2,105 +2,132 @@ #include <stdlib.h> #include <string.h> #include <ctype.h> - + #include "regis.h" #include "common.h" int -RS_isRegis(const char *str) { - unsigned char *ptr=(unsigned char *)str; +RS_isRegis(const char *str) +{ + unsigned char *ptr = (unsigned char *) str; - while(ptr && *ptr) - if ( isalpha(*ptr) || *ptr=='[' || *ptr==']' || *ptr=='^') + while (ptr && *ptr) + if (isalpha(*ptr) || *ptr == '[' || *ptr == ']' || *ptr == '^') ptr++; else return 0; - return 1; + return 1; } -#define RS_IN_ONEOF 1 +#define RS_IN_ONEOF 1 #define RS_IN_ONEOF_IN 2 #define RS_IN_NONEOF 3 #define RS_IN_WAIT 4 -static RegisNode* -newRegisNode(RegisNode *prev, int len) { - RegisNode *ptr; - ptr = (RegisNode*)malloc(RNHDRSZ+len+1); +static RegisNode * +newRegisNode(RegisNode * prev, int len) +{ + RegisNode *ptr; + + ptr = (RegisNode *) malloc(RNHDRSZ + len + 1); if (!ptr) - ts_error(ERROR, "No memory"); - memset(ptr,0,RNHDRSZ+len+1); + ts_error(ERROR, "No memory"); + memset(ptr, 0, RNHDRSZ + len + 1); if (prev) - prev->next=ptr; + prev->next = ptr; return ptr; } int -RS_compile(Regis *r, int issuffix, const char *str) { - int i,len = strlen(str); - int state = RS_IN_WAIT; - RegisNode *ptr=NULL; - - memset(r,0,sizeof(Regis)); +RS_compile(Regis * r, int issuffix, const char *str) +{ + int i, + len = strlen(str); + int state = RS_IN_WAIT; + RegisNode *ptr = NULL; + + memset(r, 0, sizeof(Regis)); r->issuffix = (issuffix) ? 1 : 0; - for(i=0;i<len;i++) { - unsigned char c = *( ( (unsigned char*)str ) + i ); - if ( state == RS_IN_WAIT ) { - if ( isalpha(c) ) { - if ( ptr ) - ptr = newRegisNode(ptr,len); + for (i = 0; i < len; i++) + { + unsigned char c = *(((unsigned char *) str) + i); + + if (state == RS_IN_WAIT) + { + if (isalpha(c)) + { + if (ptr) + ptr = newRegisNode(ptr, len); else - ptr = r->node = newRegisNode(NULL,len); - ptr->data[ 0 ] = c; + ptr = r->node = newRegisNode(NULL, len); + ptr->data[0] = c; ptr->type = RSF_ONEOF; - ptr->len=1; - } else if ( c=='[' ) { - if ( ptr ) - ptr = newRegisNode(ptr,len); + ptr->len = 1; + } + else if (c == '[') + { + if (ptr) + ptr = newRegisNode(ptr, len); else - ptr = r->node = newRegisNode(NULL,len); + ptr = r->node = newRegisNode(NULL, len); ptr->type = RSF_ONEOF; - state=RS_IN_ONEOF; - } else - ts_error(ERROR,"Error in regis: %s at pos %d\n", str, i+1); - } else if ( state == RS_IN_ONEOF ) { - if ( c=='^' ) { + state = RS_IN_ONEOF; + } + else + ts_error(ERROR, "Error in regis: %s at pos %d\n", str, i + 1); + } + else if (state == RS_IN_ONEOF) + { + if (c == '^') + { ptr->type = RSF_NONEOF; - state=RS_IN_NONEOF; - } else if ( isalpha(c) ) { - ptr->data[ 0 ] = c; - ptr->len=1; - state=RS_IN_ONEOF_IN; - } else - ts_error(ERROR,"Error in regis: %s at pos %d\n", str, i+1); - } else if ( state == RS_IN_ONEOF_IN || state == RS_IN_NONEOF ) { - if ( isalpha(c) ) { - ptr->data[ ptr->len ] = c; + state = RS_IN_NONEOF; + } + else if (isalpha(c)) + { + ptr->data[0] = c; + ptr->len = 1; + state = RS_IN_ONEOF_IN; + } + else + ts_error(ERROR, "Error in regis: %s at pos %d\n", str, i + 1); + } + else if (state == RS_IN_ONEOF_IN || state == RS_IN_NONEOF) + { + if (isalpha(c)) + { + ptr->data[ptr->len] = c; ptr->len++; - } else if ( c==']' ) { - state=RS_IN_WAIT; - } else - ts_error(ERROR,"Error in regis: %s at pos %d\n", str, i+1); - } else - ts_error(ERROR,"Internal error in RS_compile: %d\n", state); + } + else if (c == ']') + state = RS_IN_WAIT; + else + ts_error(ERROR, "Error in regis: %s at pos %d\n", str, i + 1); + } + else + ts_error(ERROR, "Internal error in RS_compile: %d\n", state); } ptr = r->node; - while(ptr) { + while (ptr) + { r->nchar++; - ptr=ptr->next; + ptr = ptr->next; } return 0; } -void -RS_free(Regis *r) { - RegisNode *ptr=r->node,*tmp; +void +RS_free(Regis * r) +{ + RegisNode *ptr = r->node, + *tmp; - while(ptr) { - tmp=ptr->next; + while (ptr) + { + tmp = ptr->next; free(ptr); ptr = tmp; } @@ -108,42 +135,49 @@ RS_free(Regis *r) { r->node = NULL; } -int -RS_execute(Regis *r, const char *str, int len) { - RegisNode *ptr=r->node; +int +RS_execute(Regis * r, const char *str, int len) +{ + RegisNode *ptr = r->node; unsigned char *c; - if (len<0) - len=strlen(str); + if (len < 0) + len = strlen(str); - if (len<r->nchar) + if (len < r->nchar) return 0; - if ( r->issuffix ) - c = ((unsigned char*)str) + len - r->nchar; + if (r->issuffix) + c = ((unsigned char *) str) + len - r->nchar; else - c = (unsigned char*)str; + c = (unsigned char *) str; - while(ptr) { - switch(ptr->type) { + while (ptr) + { + switch (ptr->type) + { case RSF_ONEOF: - if ( ptr->len==0 ) { - if ( *c != *(ptr->data) ) + if (ptr->len == 0) + { + if (*c != *(ptr->data)) return 0; - } else if ( strchr((char*)ptr->data, *c) == NULL ) + } + else if (strchr((char *) ptr->data, *c) == NULL) return 0; break; case RSF_NONEOF: - if ( ptr->len==0 ) { - if ( *c == *(ptr->data) ) + if (ptr->len == 0) + { + if (*c == *(ptr->data)) return 0; - } else if ( strchr((char*)ptr->data, *c) != NULL ) + } + else if (strchr((char *) ptr->data, *c) != NULL) return 0; break; default: - ts_error(ERROR,"RS_execute: Unknown type node: %d\n", ptr->type); + ts_error(ERROR, "RS_execute: Unknown type node: %d\n", ptr->type); } - ptr=ptr->next; + ptr = ptr->next; c++; } diff --git a/contrib/tsearch2/ispell/regis.h b/contrib/tsearch2/ispell/regis.h index 64a7e9d996c..fd03de45e98 100644 --- a/contrib/tsearch2/ispell/regis.h +++ b/contrib/tsearch2/ispell/regis.h @@ -1,34 +1,38 @@ #ifndef __REGIS_H__ #define __REGIS_H__ -#include "postgres.h" +#include "postgres.h" -typedef struct RegisNode { - uint32 - type:2, - len:16, - unused:14; +typedef struct RegisNode +{ + uint32 + type:2, + len:16, + unused:14; struct RegisNode *next; - unsigned char data[1]; -} RegisNode; + unsigned char data[1]; +} RegisNode; -#define RNHDRSZ (sizeof(uint32)+sizeof(void*)) +#define RNHDRSZ (sizeof(uint32)+sizeof(void*)) -#define RSF_ONEOF 1 -#define RSF_NONEOF 2 +#define RSF_ONEOF 1 +#define RSF_NONEOF 2 -typedef struct Regis { - RegisNode *node; - uint32 - issuffix:1, - nchar:16, - unused:15; -} Regis; +typedef struct Regis +{ + RegisNode *node; + uint32 + issuffix:1, + nchar:16, + unused:15; +} Regis; -int RS_isRegis(const char *str); +int RS_isRegis(const char *str); + +int RS_compile(Regis * r, int issuffix, const char *str); +void RS_free(Regis * r); -int RS_compile(Regis *r, int issuffix, const char *str); -void RS_free(Regis *r); /*���������� 1 ���� �������� */ -int RS_execute(Regis *r, const char *str, int len); +int RS_execute(Regis * r, const char *str, int len); + #endif diff --git a/contrib/tsearch2/ispell/spell.c b/contrib/tsearch2/ispell/spell.c index 3a3f19b1e8c..c5783236b63 100644 --- a/contrib/tsearch2/ispell/spell.c +++ b/contrib/tsearch2/ispell/spell.c @@ -40,13 +40,16 @@ strlower(char *str) } } -static char* -strnduplicate(char *s, int len) { - char *d=(char*)palloc( len + 1 ); - memcpy(d, s, len ); - d[len]='\0'; +static char * +strnduplicate(char *s, int len) +{ + char *d = (char *) palloc(len + 1); + + memcpy(d, s, len); + d[len] = '\0'; return d; } + /* backward string compaire for suffix tree operations */ static int strbcmp(const unsigned char *s1, const unsigned char *s2) @@ -188,33 +191,39 @@ NIImportDictionary(IspellDict * Conf, const char *filename) static int FindWord(IspellDict * Conf, const char *word, int affixflag, char compoundonly) { - SPNode *node = Conf->Dictionary; - SPNodeData *StopLow, *StopHigh, *StopMiddle; - uint8 *ptr =(uint8*)word; + SPNode *node = Conf->Dictionary; + SPNodeData *StopLow, + *StopHigh, + *StopMiddle; + uint8 *ptr = (uint8 *) word; - while( node && *ptr) { + while (node && *ptr) + { StopLow = node->data; - StopHigh = node->data+node->length; - while (StopLow < StopHigh) { + StopHigh = node->data + node->length; + while (StopLow < StopHigh) + { StopMiddle = StopLow + ((StopHigh - StopLow) >> 1); - if ( StopMiddle->val == *ptr ) { - if ( *(ptr+1)=='\0' && StopMiddle->isword ) { - if ( compoundonly && !StopMiddle->compoundallow ) + if (StopMiddle->val == *ptr) + { + if (*(ptr + 1) == '\0' && StopMiddle->isword) + { + if (compoundonly && !StopMiddle->compoundallow) return 0; - if ( (affixflag == 0) || (strchr(Conf->AffixData[StopMiddle->affix], affixflag) != NULL)) + if ((affixflag == 0) || (strchr(Conf->AffixData[StopMiddle->affix], affixflag) != NULL)) return 1; } - node=StopMiddle->node; + node = StopMiddle->node; ptr++; break; - } else if ( StopMiddle->val < *ptr ) { + } + else if (StopMiddle->val < *ptr) StopLow = StopMiddle + 1; - } else { + else StopHigh = StopMiddle; - } } - if ( StopLow >= StopHigh ) - break; + if (StopLow >= StopHigh) + break; } return 0; } @@ -237,31 +246,36 @@ NIAddAffix(IspellDict * Conf, int flag, char flagflags, const char *mask, const MEMOUT(Conf->Affix); } - if ( strcmp(mask,".")==0 ) { - Conf->Affix[Conf->naffixes].issimple=1; - Conf->Affix[Conf->naffixes].isregis=0; - *( Conf->Affix[Conf->naffixes].mask )='\0'; - } else if ( RS_isRegis(mask) ) { - Conf->Affix[Conf->naffixes].issimple=0; - Conf->Affix[Conf->naffixes].isregis=1; - strcpy(Conf->Affix[Conf->naffixes].mask, mask); - } else { - Conf->Affix[Conf->naffixes].issimple=0; - Conf->Affix[Conf->naffixes].isregis=0; - if (type == FF_SUFFIX) - sprintf(Conf->Affix[Conf->naffixes].mask, "%s$", mask); - else - sprintf(Conf->Affix[Conf->naffixes].mask, "^%s", mask); - } - Conf->Affix[Conf->naffixes].compile = 1; - Conf->Affix[Conf->naffixes].flagflags = flagflags; - Conf->Affix[Conf->naffixes].flag = flag; - Conf->Affix[Conf->naffixes].type = type; - - strcpy(Conf->Affix[Conf->naffixes].find, find); - strcpy(Conf->Affix[Conf->naffixes].repl, repl); - Conf->Affix[Conf->naffixes].replen = strlen(repl); - Conf->naffixes++; + if (strcmp(mask, ".") == 0) + { + Conf->Affix[Conf->naffixes].issimple = 1; + Conf->Affix[Conf->naffixes].isregis = 0; + *(Conf->Affix[Conf->naffixes].mask) = '\0'; + } + else if (RS_isRegis(mask)) + { + Conf->Affix[Conf->naffixes].issimple = 0; + Conf->Affix[Conf->naffixes].isregis = 1; + strcpy(Conf->Affix[Conf->naffixes].mask, mask); + } + else + { + Conf->Affix[Conf->naffixes].issimple = 0; + Conf->Affix[Conf->naffixes].isregis = 0; + if (type == FF_SUFFIX) + sprintf(Conf->Affix[Conf->naffixes].mask, "%s$", mask); + else + sprintf(Conf->Affix[Conf->naffixes].mask, "^%s", mask); + } + Conf->Affix[Conf->naffixes].compile = 1; + Conf->Affix[Conf->naffixes].flagflags = flagflags; + Conf->Affix[Conf->naffixes].flag = flag; + Conf->Affix[Conf->naffixes].type = type; + + strcpy(Conf->Affix[Conf->naffixes].find, find); + strcpy(Conf->Affix[Conf->naffixes].repl, repl); + Conf->Affix[Conf->naffixes].replen = strlen(repl); + Conf->naffixes++; return (0); } @@ -304,46 +318,55 @@ NIImportAffixes(IspellDict * Conf, const char *filename) if (!(affix = fopen(filename, "r"))) return (1); - Conf->compoundcontrol='\t'; + Conf->compoundcontrol = '\t'; while (fgets(str, sizeof(str), affix)) { - if (STRNCASECMP(str, "compoundwords")==0) { - s=strchr(str, 'l'); - if ( s ) { - while( *s!=' ' ) s++; - while( *s==' ' ) s++; + if (STRNCASECMP(str, "compoundwords") == 0) + { + s = strchr(str, 'l'); + if (s) + { + while (*s != ' ') + s++; + while (*s == ' ') + s++; Conf->compoundcontrol = *s; - continue; + continue; } } - if (STRNCASECMP(str, "suffixes")==0) + if (STRNCASECMP(str, "suffixes") == 0) { suffixes = 1; prefixes = 0; continue; } - if (STRNCASECMP(str, "prefixes")==0) + if (STRNCASECMP(str, "prefixes") == 0) { suffixes = 0; prefixes = 1; continue; } - if (STRNCASECMP(str, "flag ")==0) + if (STRNCASECMP(str, "flag ") == 0) { s = str + 5; - flagflags=0; - while( *s==' ' ) s++; - if ( *s=='*' ) { - flagflags|=FF_CROSSPRODUCT; + flagflags = 0; + while (*s == ' ') + s++; + if (*s == '*') + { + flagflags |= FF_CROSSPRODUCT; s++; - } else if ( *s=='~' ) { - flagflags|=FF_COMPOUNDONLYAFX; + } + else if (*s == '~') + { + flagflags |= FF_COMPOUNDONLYAFX; s++; } - if ( *s=='\\' ) s++; - + if (*s == '\\') + s++; + flag = *s; continue; } @@ -387,84 +410,93 @@ NIImportAffixes(IspellDict * Conf, const char *filename) return (0); } -static int -MergeAffix(IspellDict *Conf, int a1, int a2) { - int naffix=0; - char **ptr=Conf->AffixData; +static int +MergeAffix(IspellDict * Conf, int a1, int a2) +{ + int naffix = 0; + char **ptr = Conf->AffixData; - while(*ptr) { + while (*ptr) + { naffix++; ptr++; } - - Conf->AffixData=(char**)realloc( Conf->AffixData, (naffix+2)*sizeof(char*) ); + + Conf->AffixData = (char **) realloc(Conf->AffixData, (naffix + 2) * sizeof(char *)); MEMOUT(Conf->AffixData); ptr = Conf->AffixData + naffix; - *ptr=malloc( strlen(Conf->AffixData[a1]) + strlen(Conf->AffixData[a2]) + 1 /* space */ + 1 /* \0 */ ); + *ptr = malloc(strlen(Conf->AffixData[a1]) + strlen(Conf->AffixData[a2]) + 1 /* space */ + 1 /* \0 */ ); MEMOUT(ptr); sprintf(*ptr, "%s %s", Conf->AffixData[a1], Conf->AffixData[a2]); ptr++; - *ptr='\0'; - return naffix; + *ptr = '\0'; + return naffix; } -static SPNode* -mkSPNode(IspellDict *Conf, int low, int high, int level) { - int i; - int nchar=0; - char lastchar='\0'; - SPNode *rs; +static SPNode * +mkSPNode(IspellDict * Conf, int low, int high, int level) +{ + int i; + int nchar = 0; + char lastchar = '\0'; + SPNode *rs; SPNodeData *data; - int lownew=low; + int lownew = low; - for(i=low; i<high; i++) - if ( Conf->Spell[i].p.d.len>level && lastchar!=Conf->Spell[i].word[level] ) { + for (i = low; i < high; i++) + if (Conf->Spell[i].p.d.len > level && lastchar != Conf->Spell[i].word[level]) + { nchar++; - lastchar=Conf->Spell[i].word[level]; + lastchar = Conf->Spell[i].word[level]; } if (!nchar) return NULL; - rs=(SPNode*)malloc(SPNHRDSZ+nchar*sizeof(SPNodeData)); + rs = (SPNode *) malloc(SPNHRDSZ + nchar * sizeof(SPNodeData)); MEMOUT(rs); - memset(rs,0,SPNHRDSZ+nchar*sizeof(SPNodeData)); + memset(rs, 0, SPNHRDSZ + nchar * sizeof(SPNodeData)); rs->length = nchar; - data=rs->data; - - lastchar='\0'; - for(i=low; i<high; i++) - if ( Conf->Spell[i].p.d.len>level ) { - if ( lastchar!=Conf->Spell[i].word[level] ) { - if ( lastchar ) { - data->node = mkSPNode(Conf, lownew, i, level+1); - lownew=i; + data = rs->data; + + lastchar = '\0'; + for (i = low; i < high; i++) + if (Conf->Spell[i].p.d.len > level) + { + if (lastchar != Conf->Spell[i].word[level]) + { + if (lastchar) + { + data->node = mkSPNode(Conf, lownew, i, level + 1); + lownew = i; data++; } - lastchar=Conf->Spell[i].word[level]; + lastchar = Conf->Spell[i].word[level]; } - data->val=((uint8*)(Conf->Spell[i].word))[level]; - if ( Conf->Spell[i].p.d.len == level+1 ) { - if ( data->isword && data->affix!=Conf->Spell[i].p.d.affix) { - /* - fprintf(stderr,"Word already exists: %s (affixes: '%s' and '%s')\n", - Conf->Spell[i].word, - Conf->AffixData[data->affix], - Conf->AffixData[Conf->Spell[i].p.d.affix] - ); - */ + data->val = ((uint8 *) (Conf->Spell[i].word))[level]; + if (Conf->Spell[i].p.d.len == level + 1) + { + if (data->isword && data->affix != Conf->Spell[i].p.d.affix) + { + /* + * fprintf(stderr,"Word already exists: %s (affixes: + * '%s' and '%s')\n", Conf->Spell[i].word, + * Conf->AffixData[data->affix], + * Conf->AffixData[Conf->Spell[i].p.d.affix] ); + */ /* MergeAffix called a few times */ data->affix = MergeAffix(Conf, data->affix, Conf->Spell[i].p.d.affix); - } else + } + else data->affix = Conf->Spell[i].p.d.affix; - data->isword=1; - if ( strchr( Conf->AffixData[ data->affix ], Conf->compoundcontrol ) ) - data->compoundallow=1; + data->isword = 1; + if (strchr(Conf->AffixData[data->affix], Conf->compoundcontrol)) + data->compoundallow = 1; } } - - data->node = mkSPNode(Conf, lownew, high, level+1); + + data->node = mkSPNode(Conf, lownew, high, level + 1); return rs; } @@ -475,132 +507,147 @@ void NISortDictionary(IspellDict * Conf) { size_t i; - int naffix=3; - + int naffix = 3; + /* compress affixes */ qsort((void *) Conf->Spell, Conf->nspell, sizeof(SPELL), cmpspellaffix); for (i = 1; i < Conf->nspell; i++) - if ( strcmp(Conf->Spell[i].p.flag,Conf->Spell[i-1].p.flag) ) + if (strcmp(Conf->Spell[i].p.flag, Conf->Spell[i - 1].p.flag)) naffix++; - Conf->AffixData=(char**)malloc( naffix*sizeof(char*) ); + Conf->AffixData = (char **) malloc(naffix * sizeof(char *)); MEMOUT(Conf->AffixData); - memset(Conf->AffixData, 0, naffix*sizeof(char*)); - naffix=1; - Conf->AffixData[0]=strdup(""); + memset(Conf->AffixData, 0, naffix * sizeof(char *)); + naffix = 1; + Conf->AffixData[0] = strdup(""); MEMOUT(Conf->AffixData[0]); - Conf->AffixData[1]=strdup( Conf->Spell[0].p.flag ); + Conf->AffixData[1] = strdup(Conf->Spell[0].p.flag); MEMOUT(Conf->AffixData[1]); Conf->Spell[0].p.d.affix = 1; Conf->Spell[0].p.d.len = strlen(Conf->Spell[0].word); - for (i = 1; i < Conf->nspell; i++) { - if ( strcmp(Conf->Spell[i].p.flag, Conf->AffixData[naffix]) ) { + for (i = 1; i < Conf->nspell; i++) + { + if (strcmp(Conf->Spell[i].p.flag, Conf->AffixData[naffix])) + { naffix++; - Conf->AffixData[naffix] = strdup( Conf->Spell[i].p.flag ); + Conf->AffixData[naffix] = strdup(Conf->Spell[i].p.flag); MEMOUT(Conf->AffixData[naffix]); } Conf->Spell[i].p.d.affix = naffix; Conf->Spell[i].p.d.len = strlen(Conf->Spell[i].word); } - + qsort((void *) Conf->Spell, Conf->nspell, sizeof(SPELL), cmpspell); Conf->Dictionary = mkSPNode(Conf, 0, Conf->nspell, 0); - - for (i = 0; i < Conf->nspell; i++) - free( Conf->Spell[i].word ); - free( Conf->Spell ); - Conf->Spell=NULL; + + for (i = 0; i < Conf->nspell; i++) + free(Conf->Spell[i].word); + free(Conf->Spell); + Conf->Spell = NULL; } -static AffixNode* -mkANode(IspellDict *Conf, int low, int high, int level, int type) { - int i; - int nchar=0; - uint8 lastchar='\0'; - AffixNode *rs; +static AffixNode * +mkANode(IspellDict * Conf, int low, int high, int level, int type) +{ + int i; + int nchar = 0; + uint8 lastchar = '\0'; + AffixNode *rs; AffixNodeData *data; - int lownew=low; + int lownew = low; - for(i=low; i<high; i++) - if ( Conf->Affix[i].replen>level && lastchar!=GETCHAR( Conf->Affix + i, level, type ) ) { + for (i = low; i < high; i++) + if (Conf->Affix[i].replen > level && lastchar != GETCHAR(Conf->Affix + i, level, type)) + { nchar++; - lastchar=GETCHAR( Conf->Affix + i, level, type ); + lastchar = GETCHAR(Conf->Affix + i, level, type); } if (!nchar) return NULL; - rs=(AffixNode*)malloc(ANHRDSZ+nchar*sizeof(AffixNodeData)); + rs = (AffixNode *) malloc(ANHRDSZ + nchar * sizeof(AffixNodeData)); MEMOUT(rs); - memset(rs,0,ANHRDSZ+nchar*sizeof(AffixNodeData)); + memset(rs, 0, ANHRDSZ + nchar * sizeof(AffixNodeData)); rs->length = nchar; - data=rs->data; - - lastchar='\0'; - for(i=low; i<high; i++) - if ( Conf->Affix[i].replen>level ) { - if ( lastchar!=GETCHAR( Conf->Affix + i, level, type ) ) { - if ( lastchar ) { - data->node = mkANode(Conf, lownew, i, level+1, type); - lownew=i; + data = rs->data; + + lastchar = '\0'; + for (i = low; i < high; i++) + if (Conf->Affix[i].replen > level) + { + if (lastchar != GETCHAR(Conf->Affix + i, level, type)) + { + if (lastchar) + { + data->node = mkANode(Conf, lownew, i, level + 1, type); + lownew = i; data++; } - lastchar=GETCHAR( Conf->Affix + i, level, type ); + lastchar = GETCHAR(Conf->Affix + i, level, type); } - data->val=GETCHAR( Conf->Affix + i, level, type ); - if ( Conf->Affix[i].replen == level+1 ) { /* affix stopped */ - if ( !data->naff ) { - data->aff=(AFFIX**)malloc(sizeof(AFFIX*)*(high-i+1)); + data->val = GETCHAR(Conf->Affix + i, level, type); + if (Conf->Affix[i].replen == level + 1) + { /* affix stopped */ + if (!data->naff) + { + data->aff = (AFFIX **) malloc(sizeof(AFFIX *) * (high - i + 1)); MEMOUT(data->aff); } - data->aff[ data->naff ] = Conf->Affix + i; + data->aff[data->naff] = Conf->Affix + i; data->naff++; } } - - data->node = mkANode(Conf, lownew, high, level+1, type); + + data->node = mkANode(Conf, lownew, high, level + 1, type); return rs; } static void -mkVoidAffix(IspellDict * Conf, int issuffix, int startsuffix) { - int i,cnt=0; - int start = (issuffix) ? startsuffix : 0; - int end = (issuffix) ? Conf->naffixes : startsuffix; - AffixNode *Affix = (AffixNode*)malloc( ANHRDSZ + sizeof(AffixNodeData)); +mkVoidAffix(IspellDict * Conf, int issuffix, int startsuffix) +{ + int i, + cnt = 0; + int start = (issuffix) ? startsuffix : 0; + int end = (issuffix) ? Conf->naffixes : startsuffix; + AffixNode *Affix = (AffixNode *) malloc(ANHRDSZ + sizeof(AffixNodeData)); MEMOUT(Affix); - memset(Affix, 0, ANHRDSZ + sizeof(AffixNodeData) ); - Affix->length=1; - Affix->isvoid=1; + memset(Affix, 0, ANHRDSZ + sizeof(AffixNodeData)); + Affix->length = 1; + Affix->isvoid = 1; - if (issuffix) { - Affix->data->node=Conf->Suffix; - Conf->Suffix = Affix; - } else { - Affix->data->node=Conf->Prefix; - Conf->Prefix = Affix; - } + if (issuffix) + { + Affix->data->node = Conf->Suffix; + Conf->Suffix = Affix; + } + else + { + Affix->data->node = Conf->Prefix; + Conf->Prefix = Affix; + } - for(i=start;i<end;i++) - if (Conf->Affix[i].replen==0) - cnt++; + for (i = start; i < end; i++) + if (Conf->Affix[i].replen == 0) + cnt++; - if ( cnt==0 ) - return; + if (cnt == 0) + return; - Affix->data->aff = (AFFIX**)malloc( sizeof(AFFIX*) * cnt ); + Affix->data->aff = (AFFIX **) malloc(sizeof(AFFIX *) * cnt); MEMOUT(Affix->data->aff); - Affix->data->naff = (uint32)cnt; - - cnt=0; - for(i=start;i<end;i++) - if (Conf->Affix[i].replen==0) { - Affix->data->aff[cnt] = Conf->Affix + i; - cnt++; - } + Affix->data->naff = (uint32) cnt; + + cnt = 0; + for (i = start; i < end; i++) + if (Conf->Affix[i].replen == 0) + { + Affix->data->aff[cnt] = Conf->Affix + i; + cnt++; + } } void @@ -608,120 +655,149 @@ NISortAffixes(IspellDict * Conf) { AFFIX *Affix; size_t i; - CMPDAffix* ptr; - int firstsuffix=-1; + CMPDAffix *ptr; + int firstsuffix = -1; if (Conf->naffixes > 1) qsort((void *) Conf->Affix, Conf->naffixes, sizeof(AFFIX), cmpaffix); - Conf->CompoundAffix = ptr = (CMPDAffix*)malloc( sizeof(CMPDAffix) * Conf->naffixes ); + Conf->CompoundAffix = ptr = (CMPDAffix *) malloc(sizeof(CMPDAffix) * Conf->naffixes); MEMOUT(Conf->CompoundAffix); - ptr->affix=NULL; + ptr->affix = NULL; - for (i = 0; i < Conf->naffixes; i++) { + for (i = 0; i < Conf->naffixes; i++) + { Affix = &(((AFFIX *) Conf->Affix)[i]); - if ( Affix->type == FF_SUFFIX ) { - if ( firstsuffix<0 ) firstsuffix=i; - if ( Affix->flagflags & FF_COMPOUNDONLYAFX ) { - if ( !ptr->affix || strbncmp((ptr-1)->affix, Affix->repl, (ptr-1)->len) ) { + if (Affix->type == FF_SUFFIX) + { + if (firstsuffix < 0) + firstsuffix = i; + if (Affix->flagflags & FF_COMPOUNDONLYAFX) + { + if (!ptr->affix || strbncmp((ptr - 1)->affix, Affix->repl, (ptr - 1)->len)) + { /* leave only unique and minimals suffixes */ - ptr->affix=Affix->repl; - ptr->len=Affix->replen; + ptr->affix = Affix->repl; + ptr->len = Affix->replen; ptr++; } } } } ptr->affix = NULL; - Conf->CompoundAffix = (CMPDAffix*)realloc( Conf->CompoundAffix, sizeof(CMPDAffix) * (ptr-Conf->CompoundAffix+1) ); + Conf->CompoundAffix = (CMPDAffix *) realloc(Conf->CompoundAffix, sizeof(CMPDAffix) * (ptr - Conf->CompoundAffix + 1)); - Conf->Prefix = mkANode(Conf, 0, firstsuffix, 0, FF_PREFIX); + Conf->Prefix = mkANode(Conf, 0, firstsuffix, 0, FF_PREFIX); Conf->Suffix = mkANode(Conf, firstsuffix, Conf->naffixes, 0, FF_SUFFIX); - mkVoidAffix(Conf, 1, firstsuffix); - mkVoidAffix(Conf, 0, firstsuffix); + mkVoidAffix(Conf, 1, firstsuffix); + mkVoidAffix(Conf, 0, firstsuffix); } -static AffixNodeData* -FinfAffixes(AffixNode *node, const char *word, int wrdlen, int *level, int type) { - AffixNodeData *StopLow, *StopHigh, *StopMiddle; - uint8 symbol; - - if ( node->isvoid ) { /* search void affixes */ - if (node->data->naff) - return node->data; - node = node->data->node; - } +static AffixNodeData * +FinfAffixes(AffixNode * node, const char *word, int wrdlen, int *level, int type) +{ + AffixNodeData *StopLow, + *StopHigh, + *StopMiddle; + uint8 symbol; + + if (node->isvoid) + { /* search void affixes */ + if (node->data->naff) + return node->data; + node = node->data->node; + } - while( node && *level<wrdlen) { + while (node && *level < wrdlen) + { StopLow = node->data; - StopHigh = node->data+node->length; - while (StopLow < StopHigh) { + StopHigh = node->data + node->length; + while (StopLow < StopHigh) + { StopMiddle = StopLow + ((StopHigh - StopLow) >> 1); - symbol = GETWCHAR(word,wrdlen,*level,type); - if ( StopMiddle->val == symbol ) { + symbol = GETWCHAR(word, wrdlen, *level, type); + if (StopMiddle->val == symbol) + { (*level)++; - if ( StopMiddle->naff ) + if (StopMiddle->naff) return StopMiddle; - node=StopMiddle->node; + node = StopMiddle->node; break; - } else if ( StopMiddle->val < symbol ) { + } + else if (StopMiddle->val < symbol) StopLow = StopMiddle + 1; - } else { + else StopHigh = StopMiddle; - } } - if ( StopLow >= StopHigh ) - break; + if (StopLow >= StopHigh) + break; } return NULL; } static char * -CheckAffix(const char *word, size_t len, AFFIX * Affix, char flagflags, char *newword) { +CheckAffix(const char *word, size_t len, AFFIX * Affix, char flagflags, char *newword) +{ - if ( flagflags & FF_COMPOUNDONLYAFX ) { - if ( (Affix->flagflags & FF_COMPOUNDONLYAFX) == 0 ) + if (flagflags & FF_COMPOUNDONLYAFX) + { + if ((Affix->flagflags & FF_COMPOUNDONLYAFX) == 0) return NULL; - } else { - if ( Affix->flagflags & FF_COMPOUNDONLYAFX ) + } + else + { + if (Affix->flagflags & FF_COMPOUNDONLYAFX) return NULL; - } + } - if ( Affix->type==FF_SUFFIX ) { + if (Affix->type == FF_SUFFIX) + { strcpy(newword, word); strcpy(newword + len - Affix->replen, Affix->find); - } else { + } + else + { strcpy(newword, Affix->find); strcat(newword, word + Affix->replen); } - if ( Affix->issimple ) { - return newword; - } else if ( Affix->isregis ) { - if (Affix->compile) { - RS_compile(&(Affix->reg.regis), (Affix->type==FF_SUFFIX) ? 1 : 0, Affix->mask); - Affix->compile = 0; - } - if ( RS_execute(&(Affix->reg.regis), newword, -1) ) - return newword; - } else { - regmatch_t subs[2]; /* workaround for apache&linux */ + if (Affix->issimple) + return newword; + else if (Affix->isregis) + { + if (Affix->compile) + { + RS_compile(&(Affix->reg.regis), (Affix->type == FF_SUFFIX) ? 1 : 0, Affix->mask); + Affix->compile = 0; + } + if (RS_execute(&(Affix->reg.regis), newword, -1)) + return newword; + } + else + { + regmatch_t subs[2]; /* workaround for apache&linux */ int err; pg_wchar *data; size_t data_len; - int dat_len; + int dat_len; + if (Affix->compile) { - int wmasklen,masklen = strlen(Affix->mask); - pg_wchar *mask; + int wmasklen, + masklen = strlen(Affix->mask); + pg_wchar *mask; + mask = (pg_wchar *) palloc((masklen + 1) * sizeof(pg_wchar)); - wmasklen = pg_mb2wchar_with_len( Affix->mask, mask, masklen); - + wmasklen = pg_mb2wchar_with_len(Affix->mask, mask, masklen); + err = pg_regcomp(&(Affix->reg.regex), mask, wmasklen, REG_EXTENDED | REG_ICASE | REG_NOSUB); pfree(mask); if (err) { - /* regerror(err, &(Affix->reg.regex), regerrstr, ERRSTRSIZE); */ + /* + * regerror(err, &(Affix->reg.regex), regerrstr, + * ERRSTRSIZE); + */ pg_regfree(&(Affix->reg.regex)); return (NULL); } @@ -733,9 +809,10 @@ CheckAffix(const char *word, size_t len, AFFIX * Affix, char flagflags, char *ne data = (pg_wchar *) palloc((dat_len + 1) * sizeof(pg_wchar)); data_len = pg_mb2wchar_with_len(newword, data, dat_len); - if (!(err = pg_regexec(&(Affix->reg.regex), data,dat_len,NULL, 1, subs, 0))) { - pfree(data); - return newword; + if (!(err = pg_regexec(&(Affix->reg.regex), data, dat_len, NULL, 1, subs, 0))) + { + pfree(data); + return newword; } pfree(data); } @@ -744,111 +821,143 @@ CheckAffix(const char *word, size_t len, AFFIX * Affix, char flagflags, char *ne } -static char ** -NormalizeSubWord(IspellDict * Conf, char *word, char flag) { - AffixNodeData *suffix=NULL, *prefix=NULL; - int slevel=0, plevel=0; - int wrdlen = strlen(word), swrdlen; +static char ** +NormalizeSubWord(IspellDict * Conf, char *word, char flag) +{ + AffixNodeData *suffix = NULL, + *prefix = NULL; + int slevel = 0, + plevel = 0; + int wrdlen = strlen(word), + swrdlen; char **forms; char **cur; char newword[2 * MAXNORMLEN] = ""; char pnewword[2 * MAXNORMLEN] = ""; - AffixNode *snode = Conf->Suffix, *pnode; - int i,j; + AffixNode *snode = Conf->Suffix, + *pnode; + int i, + j; - if (wrdlen > MAXNORMLEN) return NULL; - strlower(word); + if (wrdlen > MAXNORMLEN) + return NULL; + strlower(word); cur = forms = (char **) palloc(MAX_NORM * sizeof(char *)); *cur = NULL; /* Check that the word itself is normal form */ - if (FindWord(Conf, word, 0, flag & FF_COMPOUNDWORD)) { + if (FindWord(Conf, word, 0, flag & FF_COMPOUNDWORD)) + { *cur = pstrdup(word); cur++; *cur = NULL; } - /* Find all other NORMAL forms of the 'word' (check only prefix)*/ - pnode=Conf->Prefix; - plevel=0; - while(pnode) { - prefix=FinfAffixes(pnode, word, wrdlen, &plevel,FF_PREFIX); - if (!prefix) break; - for(j=0;j<prefix->naff;j++) { - if ( CheckAffix(word,wrdlen,prefix->aff[j], flag, newword) ) { + /* Find all other NORMAL forms of the 'word' (check only prefix) */ + pnode = Conf->Prefix; + plevel = 0; + while (pnode) + { + prefix = FinfAffixes(pnode, word, wrdlen, &plevel, FF_PREFIX); + if (!prefix) + break; + for (j = 0; j < prefix->naff; j++) + { + if (CheckAffix(word, wrdlen, prefix->aff[j], flag, newword)) + { /* prefix success */ - if ( FindWord(Conf, newword, prefix->aff[j]->flag, flag&FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM-1) ) { + if (FindWord(Conf, newword, prefix->aff[j]->flag, flag & FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM - 1)) + { /* word search success */ *cur = pstrdup(newword); cur++; - *cur=NULL; + *cur = NULL; } } } pnode = prefix->node; } - - /* Find all other NORMAL forms of the 'word' (check suffix and then prefix)*/ - while( snode ) { + + /* + * Find all other NORMAL forms of the 'word' (check suffix and then + * prefix) + */ + while (snode) + { /* find possible suffix */ suffix = FinfAffixes(snode, word, wrdlen, &slevel, FF_SUFFIX); - if (!suffix) break; + if (!suffix) + break; /* foreach suffix check affix */ - for(i=0;i<suffix->naff;i++) { - if ( CheckAffix(word, wrdlen, suffix->aff[i], flag, newword) ) { + for (i = 0; i < suffix->naff; i++) + { + if (CheckAffix(word, wrdlen, suffix->aff[i], flag, newword)) + { /* suffix success */ - if ( FindWord(Conf, newword, suffix->aff[i]->flag, flag&FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM-1) ) { + if (FindWord(Conf, newword, suffix->aff[i]->flag, flag & FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM - 1)) + { /* word search success */ *cur = pstrdup(newword); cur++; - *cur=NULL; + *cur = NULL; } /* now we will look changed word with prefixes */ - pnode=Conf->Prefix; - plevel=0; - swrdlen=strlen(newword); - while(pnode) { - prefix=FinfAffixes(pnode, newword, swrdlen, &plevel,FF_PREFIX); - if (!prefix) break; - for(j=0;j<prefix->naff;j++) { - if ( CheckAffix(newword,swrdlen,prefix->aff[j], flag, pnewword) ) { + pnode = Conf->Prefix; + plevel = 0; + swrdlen = strlen(newword); + while (pnode) + { + prefix = FinfAffixes(pnode, newword, swrdlen, &plevel, FF_PREFIX); + if (!prefix) + break; + for (j = 0; j < prefix->naff; j++) + { + if (CheckAffix(newword, swrdlen, prefix->aff[j], flag, pnewword)) + { /* prefix success */ - int ff=( prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT ) ? - 0 : prefix->aff[j]->flag; - if ( FindWord(Conf, pnewword, ff, flag&FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM-1) ) { + int ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ? + 0 : prefix->aff[j]->flag; + + if (FindWord(Conf, pnewword, ff, flag & FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM - 1)) + { /* word search success */ *cur = pstrdup(pnewword); cur++; - *cur=NULL; + *cur = NULL; } } } pnode = prefix->node; - } + } } } - snode=suffix->node; + snode = suffix->node; } - if (cur == forms) { + if (cur == forms) + { pfree(forms); return (NULL); } return (forms); } -typedef struct SplitVar { - int nstem; - char **stem; - struct SplitVar *next; -} SplitVar; +typedef struct SplitVar +{ + int nstem; + char **stem; + struct SplitVar *next; +} SplitVar; -static int -CheckCompoundAffixes(CMPDAffix **ptr, char *word, int len) { - while( (*ptr)->affix ) { - if ( len > (*ptr)->len && strncmp((*ptr)->affix, word, (*ptr)->len)==0 ) { +static int +CheckCompoundAffixes(CMPDAffix ** ptr, char *word, int len) +{ + while ((*ptr)->affix) + { + if (len > (*ptr)->len && strncmp((*ptr)->affix, word, (*ptr)->len) == 0) + { len = (*ptr)->len; (*ptr)++; return len; @@ -858,245 +967,290 @@ CheckCompoundAffixes(CMPDAffix **ptr, char *word, int len) { return 0; } -static SplitVar* -CopyVar(SplitVar *s, int makedup) { - SplitVar *v = (SplitVar*)palloc(sizeof(SplitVar)); +static SplitVar * +CopyVar(SplitVar * s, int makedup) +{ + SplitVar *v = (SplitVar *) palloc(sizeof(SplitVar)); + + v->stem = (char **) palloc(sizeof(char *) * (MAX_NORM)); + v->next = NULL; + if (s) + { + int i; - v->stem=(char**)palloc( sizeof(char*) * (MAX_NORM) ); - v->next=NULL; - if ( s ) { - int i; v->nstem = s->nstem; - for(i=0;i<s->nstem;i++) - v->stem[i] = (makedup) ? pstrdup( s->stem[i] ) : s->stem[i]; - } else { - v->nstem=0; + for (i = 0; i < s->nstem; i++) + v->stem[i] = (makedup) ? pstrdup(s->stem[i]) : s->stem[i]; } + else + v->nstem = 0; return v; } -static SplitVar* -SplitToVariants( IspellDict * Conf, SPNode *snode, SplitVar * orig, char *word, int wordlen, int startpos, int minpos ) { - SplitVar *var=NULL; - SPNodeData *StopLow, *StopHigh, *StopMiddle = NULL; - SPNode *node = (snode) ? snode : Conf->Dictionary; - int level=(snode) ? minpos : startpos; /* recursive minpos==level*/ - int lenaff; - CMPDAffix *caff; - char *notprobed; +static SplitVar * +SplitToVariants(IspellDict * Conf, SPNode * snode, SplitVar * orig, char *word, int wordlen, int startpos, int minpos) +{ + SplitVar *var = NULL; + SPNodeData *StopLow, + *StopHigh, + *StopMiddle = NULL; + SPNode *node = (snode) ? snode : Conf->Dictionary; + int level = (snode) ? minpos : startpos; /* recursive + * minpos==level */ + int lenaff; + CMPDAffix *caff; + char *notprobed; notprobed = (char *) palloc(wordlen); - memset(notprobed,1,wordlen); - var = CopyVar(orig,1); + memset(notprobed, 1, wordlen); + var = CopyVar(orig, 1); - while( node && level<wordlen) { + while (node && level < wordlen) + { StopLow = node->data; - StopHigh = node->data+node->length; - while (StopLow < StopHigh) { + StopHigh = node->data + node->length; + while (StopLow < StopHigh) + { StopMiddle = StopLow + ((StopHigh - StopLow) >> 1); - if ( StopMiddle->val == ((uint8*)(word))[level] ) { + if (StopMiddle->val == ((uint8 *) (word))[level]) break; - } else if ( StopMiddle->val < ((uint8*)(word))[level] ) { + else if (StopMiddle->val < ((uint8 *) (word))[level]) StopLow = StopMiddle + 1; - } else { + else StopHigh = StopMiddle; - } } - if ( StopLow >= StopHigh ) + if (StopLow >= StopHigh) break; /* find word with epenthetic */ caff = Conf->CompoundAffix; - while ( level>startpos && (lenaff=CheckCompoundAffixes( &caff, word + level, wordlen - level ))>0 ) { - /* there is one of compound suffixes, so check word for existings */ - char buf[MAXNORMLEN]; - char **subres; - - lenaff=level-startpos+lenaff; - - if ( !notprobed[startpos+lenaff-1] ) + while (level > startpos && (lenaff = CheckCompoundAffixes(&caff, word + level, wordlen - level)) > 0) + { + /* + * there is one of compound suffixes, so check word for + * existings + */ + char buf[MAXNORMLEN]; + char **subres; + + lenaff = level - startpos + lenaff; + + if (!notprobed[startpos + lenaff - 1]) continue; - - if ( level+lenaff-1 <= minpos ) + + if (level + lenaff - 1 <= minpos) continue; - memcpy(buf, word+startpos, lenaff); - buf[lenaff]='\0'; + memcpy(buf, word + startpos, lenaff); + buf[lenaff] = '\0'; subres = NormalizeSubWord(Conf, buf, FF_COMPOUNDWORD | FF_COMPOUNDONLYAFX); - if ( subres ) { + if (subres) + { /* Yes, it was a word from dictionary */ - SplitVar *new=CopyVar(var,0); - SplitVar *ptr=var; - char **sptr=subres; - - notprobed[startpos+lenaff-1]=0; - - while(*sptr) { - new->stem[ new->nstem ] = *sptr; + SplitVar *new = CopyVar(var, 0); + SplitVar *ptr = var; + char **sptr = subres; + + notprobed[startpos + lenaff - 1] = 0; + + while (*sptr) + { + new->stem[new->nstem] = *sptr; new->nstem++; sptr++; } pfree(subres); - while( ptr->next ) + while (ptr->next) ptr = ptr->next; - ptr->next = SplitToVariants(Conf, NULL, new, word, wordlen, startpos+lenaff, startpos+lenaff); - + ptr->next = SplitToVariants(Conf, NULL, new, word, wordlen, startpos + lenaff, startpos + lenaff); + pfree(new->stem); pfree(new); } } /* find infinitive */ - if ( StopMiddle->isword && StopMiddle->compoundallow && notprobed[level] ) { - /* ok, we found full compoundallowed word*/ - if ( level>minpos ) { + if (StopMiddle->isword && StopMiddle->compoundallow && notprobed[level]) + { + /* ok, we found full compoundallowed word */ + if (level > minpos) + { /* and its length more than minimal */ - if ( wordlen==level+1 ) { + if (wordlen == level + 1) + { /* well, it was last word */ - var->stem[ var->nstem ] = strnduplicate(word + startpos, wordlen - startpos); + var->stem[var->nstem] = strnduplicate(word + startpos, wordlen - startpos); var->nstem++; pfree(notprobed); return var; - } else { + } + else + { /* then we will search more big word at the same point */ - SplitVar *ptr=var; - while( ptr->next ) + SplitVar *ptr = var; + + while (ptr->next) ptr = ptr->next; - ptr->next=SplitToVariants(Conf, node, var, word, wordlen, startpos, level); + ptr->next = SplitToVariants(Conf, node, var, word, wordlen, startpos, level); /* we can find next word */ level++; - var->stem[ var->nstem ] = strnduplicate(word + startpos, level - startpos); + var->stem[var->nstem] = strnduplicate(word + startpos, level - startpos); var->nstem++; node = Conf->Dictionary; - startpos=level; + startpos = level; continue; } } } level++; - node=StopMiddle->node; + node = StopMiddle->node; } - var->stem[ var->nstem ] = strnduplicate(word + startpos, wordlen - startpos); + var->stem[var->nstem] = strnduplicate(word + startpos, wordlen - startpos); var->nstem++; pfree(notprobed); return var; -} - -char ** -NINormalizeWord(IspellDict * Conf, char *word) { - char **res= NormalizeSubWord(Conf, word, 0); - - if ( Conf->compoundcontrol != '\t' ) { - int wordlen=strlen(word); - SplitVar *ptr, *var = SplitToVariants(Conf,NULL,NULL, word, wordlen, 0, -1); - char **cur=res; - int i; - - while(var) { - if ( var->nstem > 1 ) { - char **subres = NormalizeSubWord(Conf, var->stem[ var->nstem-1 ], FF_COMPOUNDWORD); - if ( subres ) { - char **ptr=subres; - - if ( cur ) { - while(*cur) +} + +char ** +NINormalizeWord(IspellDict * Conf, char *word) +{ + char **res = NormalizeSubWord(Conf, word, 0); + + if (Conf->compoundcontrol != '\t') + { + int wordlen = strlen(word); + SplitVar *ptr, + *var = SplitToVariants(Conf, NULL, NULL, word, wordlen, 0, -1); + char **cur = res; + int i; + + while (var) + { + if (var->nstem > 1) + { + char **subres = NormalizeSubWord(Conf, var->stem[var->nstem - 1], FF_COMPOUNDWORD); + + if (subres) + { + char **ptr = subres; + + if (cur) + { + while (*cur) cur++; - } else { - res=cur=(char **) palloc(MAX_NORM * sizeof(char *)); } - - for(i=0;i<var->nstem-1;i++) { - *cur=var->stem[ i ]; + else + res = cur = (char **) palloc(MAX_NORM * sizeof(char *)); + + for (i = 0; i < var->nstem - 1; i++) + { + *cur = var->stem[i]; cur++; } - while(*ptr) { - *cur=*ptr; - cur++; ptr++; + while (*ptr) + { + *cur = *ptr; + cur++; + ptr++; } - *cur=NULL; + *cur = NULL; pfree(subres); - var->stem[ 0 ] = NULL; + var->stem[0] = NULL; } } - - for(i=0;i<var->nstem && var->stem[ i ];i++) - pfree( var->stem[i] ); + + for (i = 0; i < var->nstem && var->stem[i]; i++) + pfree(var->stem[i]); ptr = var->next; pfree(var->stem); - pfree(var); - var=ptr; + pfree(var); + var = ptr; } } return res; } -static void freeSPNode(SPNode *node) { +static void +freeSPNode(SPNode * node) +{ SPNodeData *data; - if (!node) return; - data=node->data; - while( node->length ) { + if (!node) + return; + data = node->data; + while (node->length) + { freeSPNode(data->node); data++; node->length--; } free(node); } - -static void freeANode(AffixNode *node) { + +static void +freeANode(AffixNode * node) +{ AffixNodeData *data; - if (!node) return; - data=node->data; - while( node->length ) { + if (!node) + return; + data = node->data; + while (node->length) + { freeANode(data->node); if (data->naff) - free(data->aff); + free(data->aff); data++; node->length--; } free(node); } - + void NIFree(IspellDict * Conf) { int i; AFFIX *Affix = (AFFIX *) Conf->Affix; - char** aff = Conf->AffixData; + char **aff = Conf->AffixData; - if ( aff ) { - while(*aff) { + if (aff) + { + while (*aff) + { free(*aff); aff++; } free(Conf->AffixData); } - + for (i = 0; i < Conf->naffixes; i++) { - if (Affix[i].compile == 0) { - if ( Affix[i].isregis ) - RS_free(&(Affix[i].reg.regis)); - else + if (Affix[i].compile == 0) + { + if (Affix[i].isregis) + RS_free(&(Affix[i].reg.regis)); + else pg_regfree(&(Affix[i].reg.regex)); } } - if (Conf->Spell) { + if (Conf->Spell) + { for (i = 0; i < Conf->nspell; i++) free(Conf->Spell[i].word); free(Conf->Spell); } - if (Conf->Affix) free(Conf->Affix); - if ( Conf->CompoundAffix ) free(Conf->CompoundAffix); + if (Conf->Affix) + free(Conf->Affix); + if (Conf->CompoundAffix) + free(Conf->CompoundAffix); freeSPNode(Conf->Dictionary); freeANode(Conf->Suffix); freeANode(Conf->Prefix); diff --git a/contrib/tsearch2/ispell/spell.h b/contrib/tsearch2/ispell/spell.h index 1e22a0d1bcd..44f1e7be08f 100644 --- a/contrib/tsearch2/ispell/spell.h +++ b/contrib/tsearch2/ispell/spell.h @@ -10,19 +10,21 @@ struct SPNode; -typedef struct { - uint32 - val:8, - isword:1, - compoundallow:1, - affix:22; - struct SPNode *node; -} SPNodeData; - -typedef struct SPNode { - uint32 length; - SPNodeData data[1]; -} SPNode; +typedef struct +{ + uint32 + val:8, + isword:1, + compoundallow:1, + affix:22; + struct SPNode *node; +} SPNodeData; + +typedef struct SPNode +{ + uint32 length; + SPNodeData data[1]; +} SPNode; #define SPNHRDSZ (sizeof(uint32)) @@ -30,81 +32,87 @@ typedef struct SPNode { typedef struct spell_struct { char *word; - union { + union + { char flag[16]; - struct { - int affix; - int len; - } d; - } p; + struct + { + int affix; + int len; + } d; + } p; } SPELL; typedef struct aff_struct { - uint32 - flag:8, - type:2, - compile:1, - flagflags:3, - issimple:1, - isregis:1, - unused:1, - replen:16; - char mask[32]; - char find[16]; - char repl[16]; - union { - regex_t regex; - Regis regis; - } reg; + uint32 + flag:8, + type:2, + compile:1, + flagflags:3, + issimple:1, + isregis:1, + unused:1, + replen:16; + char mask[32]; + char find[16]; + char repl[16]; + union + { + regex_t regex; + Regis regis; + } reg; } AFFIX; -#define FF_CROSSPRODUCT 0x01 -#define FF_COMPOUNDWORD 0x02 -#define FF_COMPOUNDONLYAFX 0x04 -#define FF_SUFFIX 2 -#define FF_PREFIX 1 +#define FF_CROSSPRODUCT 0x01 +#define FF_COMPOUNDWORD 0x02 +#define FF_COMPOUNDONLYAFX 0x04 +#define FF_SUFFIX 2 +#define FF_PREFIX 1 struct AffixNode; -typedef struct { +typedef struct +{ uint32 - val:8, - naff:24; - AFFIX **aff; + val:8, + naff:24; + AFFIX **aff; struct AffixNode *node; -} AffixNodeData; +} AffixNodeData; -typedef struct AffixNode { - uint32 isvoid:1, - length:31; - AffixNodeData data[1]; -} AffixNode; +typedef struct AffixNode +{ + uint32 isvoid:1, + length:31; + AffixNodeData data[1]; +} AffixNode; -#define ANHRDSZ (sizeof(uint32)) +#define ANHRDSZ (sizeof(uint32)) -typedef struct { - char *affix; - int len; -} CMPDAffix; +typedef struct +{ + char *affix; + int len; +} CMPDAffix; typedef struct { int maffixes; int naffixes; AFFIX *Affix; - char compoundcontrol; + char compoundcontrol; int nspell; int mspell; SPELL *Spell; - AffixNode *Suffix; - AffixNode *Prefix; + AffixNode *Suffix; + AffixNode *Prefix; - SPNode *Dictionary; - char **AffixData; - CMPDAffix *CompoundAffix; + SPNode *Dictionary; + char **AffixData; + CMPDAffix *CompoundAffix; } IspellDict; diff --git a/contrib/tsearch2/query.c b/contrib/tsearch2/query.c index 81343b0c462..eb931030ca7 100644 --- a/contrib/tsearch2/query.c +++ b/contrib/tsearch2/query.c @@ -469,7 +469,7 @@ TS_execute(ITEM * curitem, void *checkval, bool calcnot, bool (*chkcond) (void * Datum rexectsq(PG_FUNCTION_ARGS) { - SET_FUNCOID(); + SET_FUNCOID(); return DirectFunctionCall2( exectsq, PG_GETARG_DATUM(1), @@ -484,7 +484,8 @@ exectsq(PG_FUNCTION_ARGS) QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1))); CHKVAL chkval; bool result; - SET_FUNCOID(); + + SET_FUNCOID(); if (!val->size || !query->size) { PG_FREE_IF_COPY(val, 0); @@ -639,7 +640,7 @@ static QUERYTYPE * Datum tsquery_in(PG_FUNCTION_ARGS) { - SET_FUNCOID(); + SET_FUNCOID(); PG_RETURN_POINTER(queryin((char *) PG_GETARG_POINTER(0), pushval_asis, 0)); } @@ -865,7 +866,8 @@ to_tsquery(PG_FUNCTION_ARGS) QUERYTYPE *query; ITEM *res; int4 len; - SET_FUNCOID(); + + SET_FUNCOID(); str = text2char(in); PG_FREE_IF_COPY(in, 1); @@ -888,10 +890,11 @@ to_tsquery_name(PG_FUNCTION_ARGS) { text *name = PG_GETARG_TEXT_P(0); Datum res; - SET_FUNCOID(); + + SET_FUNCOID(); res = DirectFunctionCall2(to_tsquery, - Int32GetDatum(name2id_cfg(name)), - PG_GETARG_DATUM(1)); + Int32GetDatum(name2id_cfg(name)), + PG_GETARG_DATUM(1)); PG_FREE_IF_COPY(name, 0); PG_RETURN_DATUM(res); diff --git a/contrib/tsearch2/snmap.c b/contrib/tsearch2/snmap.c index d41fb45b0dc..c152b1ac238 100644 --- a/contrib/tsearch2/snmap.c +++ b/contrib/tsearch2/snmap.c @@ -13,11 +13,11 @@ static int compareSNMapEntry(const void *a, const void *b) { - if ( ((SNMapEntry *) a)->nsp < ((SNMapEntry *) b)->nsp ) + if (((SNMapEntry *) a)->nsp < ((SNMapEntry *) b)->nsp) return -1; - else if ( ((SNMapEntry *) a)->nsp > ((SNMapEntry *) b)->nsp ) + else if (((SNMapEntry *) a)->nsp > ((SNMapEntry *) b)->nsp) return 1; - else + else return strcmp(((SNMapEntry *) a)->key, ((SNMapEntry *) b)->key); } diff --git a/contrib/tsearch2/ts_cfg.c b/contrib/tsearch2/ts_cfg.c index 4e0a0bb9043..afebb113199 100644 --- a/contrib/tsearch2/ts_cfg.c +++ b/contrib/tsearch2/ts_cfg.c @@ -38,10 +38,10 @@ init_cfg(Oid id, TSCfgInfo * cfg) j; text *ptr; text *prsname = NULL; - char *nsp=get_namespace(TSNSP_FunctionOid); - char buf[1024]; + char *nsp = get_namespace(TSNSP_FunctionOid); + char buf[1024]; MemoryContext oldcontext; - void *plan; + void *plan; arg[0] = OIDOID; arg[1] = OIDOID; @@ -52,7 +52,7 @@ init_cfg(Oid id, TSCfgInfo * cfg) SPI_connect(); sprintf(buf, "select prs_name from %s.pg_ts_cfg where oid = $1", nsp); - plan= SPI_prepare(buf, 1, arg); + plan = SPI_prepare(buf, 1, arg); if (!plan) ts_error(ERROR, "SPI_prepare() failed"); @@ -77,7 +77,7 @@ init_cfg(Oid id, TSCfgInfo * cfg) arg[0] = TEXTOID; sprintf(buf, "select lt.tokid, map.dict_name from %s.pg_ts_cfgmap as map, %s.pg_ts_cfg as cfg, %s.token_type( $1 ) as lt where lt.alias = map.tok_alias and map.ts_name = cfg.ts_name and cfg.oid= $2 order by lt.tokid desc;", nsp, nsp, nsp); - plan= SPI_prepare(buf, 2, arg); + plan = SPI_prepare(buf, 2, arg); if (!plan) ts_error(ERROR, "SPI_prepare() failed"); @@ -118,7 +118,7 @@ init_cfg(Oid id, TSCfgInfo * cfg) cfg->map[lexid].len = ARRNELEMS(a); cfg->map[lexid].dict_id = (Datum *) malloc(sizeof(Datum) * cfg->map[lexid].len); if (!cfg->map[lexid].dict_id) - ts_error(ERROR, "No memory"); + ts_error(ERROR, "No memory"); memset(cfg->map[lexid].dict_id, 0, sizeof(Datum) * cfg->map[lexid].len); ptr = (text *) ARR_DATA_PTR(a); @@ -235,9 +235,9 @@ name2id_cfg(text *name) Datum pars[1]; int stat; Oid id = findSNMap_t(&(CList.name2id_map), name); - void *plan; - char *nsp; - char buf[1024]; + void *plan; + char *nsp; + char buf[1024]; arg[0] = TEXTOID; pars[0] = PointerGetDatum(name); @@ -245,10 +245,10 @@ name2id_cfg(text *name) if (id) return id; - nsp=get_namespace(TSNSP_FunctionOid); + nsp = get_namespace(TSNSP_FunctionOid); SPI_connect(); - sprintf(buf, "select oid from %s.pg_ts_cfg where ts_name = $1", nsp); - plan= SPI_prepare(buf, 1, arg); + sprintf(buf, "select oid from %s.pg_ts_cfg where ts_name = $1", nsp); + plan = SPI_prepare(buf, 1, arg); if (!plan) /* internal error */ elog(ERROR, "SPI_prepare() failed"); @@ -301,13 +301,14 @@ parsetext_v2(TSCfgInfo * cfg, PRSTEXT * prs, char *buf, int4 buflen) PointerGetDatum(&lenlemm)))) != 0) { - if (lenlemm >= MAXSTRLEN) { + if (lenlemm >= MAXSTRLEN) + { #ifdef IGNORE_LONGLEXEME ereport(NOTICE, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("word is too long"))); continue; -#else +#else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("word is too long"))); @@ -435,13 +436,14 @@ hlparsetext(TSCfgInfo * cfg, HLPRSTEXT * prs, QUERYTYPE * query, char *buf, int4 PointerGetDatum(&lenlemm)))) != 0) { - if (lenlemm >= MAXSTRLEN) { + if (lenlemm >= MAXSTRLEN) + { #ifdef IGNORE_LONGLEXEME ereport(NOTICE, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("word is too long"))); continue; -#else +#else ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("word is too long"))); @@ -532,9 +534,8 @@ genhl(HLPRSTEXT * prs) ptr += prs->stopsellen; } } - } else - - if (!wrd->repeated) + } + else if (!wrd->repeated) pfree(wrd->word); wrd++; @@ -552,16 +553,16 @@ get_currcfg(void) Datum pars[1]; bool isnull; int stat; - char buf[1024]; - char *nsp; - void *plan; + char buf[1024]; + char *nsp; + void *plan; if (current_cfg_id > 0) return current_cfg_id; - nsp=get_namespace(TSNSP_FunctionOid); + nsp = get_namespace(TSNSP_FunctionOid); SPI_connect(); - sprintf(buf, "select oid from %s.pg_ts_cfg where locale = $1 ", nsp); + sprintf(buf, "select oid from %s.pg_ts_cfg where locale = $1 ", nsp); pfree(nsp); plan = SPI_prepare(buf, 1, arg); if (!plan) @@ -593,7 +594,7 @@ Datum set_curcfg(PG_FUNCTION_ARGS); Datum set_curcfg(PG_FUNCTION_ARGS) { - SET_FUNCOID(); + SET_FUNCOID(); findcfg(PG_GETARG_OID(0)); current_cfg_id = PG_GETARG_OID(0); PG_RETURN_VOID(); @@ -605,7 +606,8 @@ Datum set_curcfg_byname(PG_FUNCTION_ARGS) { text *name = PG_GETARG_TEXT_P(0); - SET_FUNCOID(); + + SET_FUNCOID(); DirectFunctionCall1( set_curcfg, ObjectIdGetDatum(name2id_cfg(name)) @@ -619,7 +621,7 @@ Datum show_curcfg(PG_FUNCTION_ARGS); Datum show_curcfg(PG_FUNCTION_ARGS) { - SET_FUNCOID(); + SET_FUNCOID(); PG_RETURN_OID(get_currcfg()); } @@ -628,8 +630,7 @@ Datum reset_tsearch(PG_FUNCTION_ARGS); Datum reset_tsearch(PG_FUNCTION_ARGS) { - SET_FUNCOID(); + SET_FUNCOID(); ts_error(NOTICE, "TSearch cache cleaned"); PG_RETURN_VOID(); } - diff --git a/contrib/tsearch2/ts_stat.c b/contrib/tsearch2/ts_stat.c index 7995406f43f..2e6a98197e3 100644 --- a/contrib/tsearch2/ts_stat.c +++ b/contrib/tsearch2/ts_stat.c @@ -15,7 +15,7 @@ Datum tsstat_in(PG_FUNCTION_ARGS) { tsstat *stat = palloc(STATHDRSIZE); - + stat->len = STATHDRSIZE; stat->size = 0; stat->weight = 0; @@ -34,12 +34,14 @@ tsstat_out(PG_FUNCTION_ARGS) } static int -check_weight(tsvector *txt, WordEntry *wptr, int8 weight) { - int len = POSDATALEN(txt, wptr); - int num=0; - WordEntryPos *ptr = POSDATAPTR(txt, wptr); +check_weight(tsvector * txt, WordEntry * wptr, int8 weight) +{ + int len = POSDATALEN(txt, wptr); + int num = 0; + WordEntryPos *ptr = POSDATAPTR(txt, wptr); - while (len--) { + while (len--) + { if (weight & (1 << ptr->weight)) num++; ptr++; @@ -123,9 +125,9 @@ formstat(tsstat * stat, tsvector * txt, WordEntry ** entry, uint32 len) } nptr = STATPTR(newstat) + (StopLow - STATPTR(stat)); memcpy(STATPTR(newstat), STATPTR(stat), sizeof(StatEntry) * (StopLow - STATPTR(stat))); - if ( (*ptr)->haspos ) { - nptr->nentry = ( stat->weight ) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr); - } else + if ((*ptr)->haspos) + nptr->nentry = (stat->weight) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr); + else nptr->nentry = 1; nptr->ndoc = 1; nptr->len = (*ptr)->len; @@ -144,9 +146,9 @@ formstat(tsstat * stat, tsvector * txt, WordEntry ** entry, uint32 len) } else { - if ( (*ptr)->haspos ) { - nptr->nentry = ( stat->weight ) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr); - } else + if ((*ptr)->haspos) + nptr->nentry = (stat->weight) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr); + else nptr->nentry = 1; nptr->ndoc = 1; nptr->len = (*ptr)->len; @@ -162,9 +164,9 @@ formstat(tsstat * stat, tsvector * txt, WordEntry ** entry, uint32 len) while (ptr - entry < len) { - if ( (*ptr)->haspos ) { - nptr->nentry = ( stat->weight ) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr); - } else + if ((*ptr)->haspos) + nptr->nentry = (stat->weight) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr); + else nptr->nentry = 1; nptr->ndoc = 1; nptr->len = (*ptr)->len; @@ -192,7 +194,7 @@ ts_accum(PG_FUNCTION_ARGS) cur = 0; StatEntry *sptr; WordEntry *wptr; - int n=0; + int n = 0; if (stat == NULL || PG_ARGISNULL(0)) { /* Init in first */ @@ -222,10 +224,13 @@ ts_accum(PG_FUNCTION_ARGS) sptr++; else if (cmp == 0) { - if ( stat->weight == 0 ) { + if (stat->weight == 0) + { sptr->ndoc++; sptr->nentry += (wptr->haspos) ? POSDATALEN(txt, wptr) : 1; - } else if ( wptr->haspos && (n=check_weight(txt, wptr, stat->weight))!=0 ) { + } + else if (wptr->haspos && (n = check_weight(txt, wptr, stat->weight)) != 0) + { sptr->ndoc++; sptr->nentry += n; } @@ -234,7 +239,8 @@ ts_accum(PG_FUNCTION_ARGS) } else { - if ( stat->weight == 0 || check_weight(txt, wptr, stat->weight)!=0 ) { + if (stat->weight == 0 || check_weight(txt, wptr, stat->weight) != 0) + { if (cur == len) newentry = SEI_realloc(newentry, &len); newentry[cur] = wptr; @@ -246,7 +252,8 @@ ts_accum(PG_FUNCTION_ARGS) while (wptr - ARRPTR(txt) < txt->size) { - if ( stat->weight == 0 || check_weight(txt, wptr, stat->weight)!=0 ) { + if (stat->weight == 0 || check_weight(txt, wptr, stat->weight) != 0) + { if (cur == len) newentry = SEI_realloc(newentry, &len); newentry[cur] = wptr; @@ -269,10 +276,13 @@ ts_accum(PG_FUNCTION_ARGS) cmp = compareStatWord(sptr, wptr, stat, txt); if (cmp == 0) { - if ( stat->weight == 0 ) { + if (stat->weight == 0) + { sptr->ndoc++; sptr->nentry += (wptr->haspos) ? POSDATALEN(txt, wptr) : 1; - } else if ( wptr->haspos && (n=check_weight(txt, wptr, stat->weight))!=0 ) { + } + else if (wptr->haspos && (n = check_weight(txt, wptr, stat->weight)) != 0) + { sptr->ndoc++; sptr->nentry += n; } @@ -286,7 +296,8 @@ ts_accum(PG_FUNCTION_ARGS) if (StopLow >= StopHigh) { /* not found */ - if ( stat->weight == 0 || check_weight(txt, wptr, stat->weight)!=0 ) { + if (stat->weight == 0 || check_weight(txt, wptr, stat->weight) != 0) + { if (cur == len) newentry = SEI_realloc(newentry, &len); newentry[cur] = wptr; @@ -454,11 +465,15 @@ ts_stat_sql(text *txt, text *ws) stat->size = 0; stat->weight = 0; - if ( ws ) { - char *buf; + if (ws) + { + char *buf; + buf = VARDATA(ws); - while( buf - VARDATA(ws) < VARSIZE(ws) - VARHDRSZ ) { - switch (tolower(*buf)) { + while (buf - VARDATA(ws) < VARSIZE(ws) - VARHDRSZ) + { + switch (tolower(*buf)) + { case 'a': stat->weight |= 1 << 3; break; @@ -521,13 +536,14 @@ ts_stat(PG_FUNCTION_ARGS) { tsstat *stat; text *txt = PG_GETARG_TEXT_P(0); - text *ws = (PG_NARGS() > 1) ? PG_GETARG_TEXT_P(1) : NULL; + text *ws = (PG_NARGS() > 1) ? PG_GETARG_TEXT_P(1) : NULL; funcctx = SRF_FIRSTCALL_INIT(); SPI_connect(); - stat = ts_stat_sql(txt,ws); + stat = ts_stat_sql(txt, ws); PG_FREE_IF_COPY(txt, 0); - if (PG_NARGS() > 1 ) PG_FREE_IF_COPY(ws, 1); + if (PG_NARGS() > 1) + PG_FREE_IF_COPY(ws, 1); ts_setup_firstcall(funcctx, stat); SPI_finish(); } diff --git a/contrib/tsearch2/tsvector.c b/contrib/tsearch2/tsvector.c index a39a40fb366..9f26dec670f 100644 --- a/contrib/tsearch2/tsvector.c +++ b/contrib/tsearch2/tsvector.c @@ -404,7 +404,8 @@ tsvector_in(PG_FUNCTION_ARGS) *cur; int4 i, buflen = 256; - SET_FUNCOID(); + + SET_FUNCOID(); state.prsbuf = buf; state.len = 32; state.word = (char *) palloc(state.len); @@ -453,7 +454,7 @@ tsvector_in(PG_FUNCTION_ARGS) if (len > 0) len = uniqueentry(arr, len, tmpbuf, &buflen); else - buflen=0; + buflen = 0; totallen = CALCDATASIZE(len, buflen); in = (tsvector *) palloc(totallen); memset(in, 0, totallen); @@ -638,7 +639,8 @@ uniqueWORD(TSWORD * a, int4 l) res->alen *= 2; res->pos.apos = (uint16 *) repalloc(res->pos.apos, sizeof(uint16) * res->alen); } - if ( res->pos.apos[0]==0 || res->pos.apos[res->pos.apos[0]] != LIMITPOS(ptr->pos.pos) ) { + if (res->pos.apos[0] == 0 || res->pos.apos[res->pos.apos[0]] != LIMITPOS(ptr->pos.pos)) + { res->pos.apos[res->pos.apos[0] + 1] = LIMITPOS(ptr->pos.pos); res->pos.apos[0]++; } @@ -725,7 +727,7 @@ to_tsvector(PG_FUNCTION_ARGS) tsvector *out = NULL; TSCfgInfo *cfg; - SET_FUNCOID(); + SET_FUNCOID(); cfg = findcfg(PG_GETARG_INT32(0)); prs.lenwords = 32; @@ -753,13 +755,14 @@ to_tsvector_name(PG_FUNCTION_ARGS) { text *cfg = PG_GETARG_TEXT_P(0); Datum res; - SET_FUNCOID(); + + SET_FUNCOID(); res = DirectFunctionCall3( - to_tsvector, - Int32GetDatum(name2id_cfg(cfg)), - PG_GETARG_DATUM(1), - (Datum) 0 - ); + to_tsvector, + Int32GetDatum(name2id_cfg(cfg)), + PG_GETARG_DATUM(1), + (Datum) 0 + ); PG_FREE_IF_COPY(cfg, 0); PG_RETURN_DATUM(res); @@ -769,13 +772,14 @@ Datum to_tsvector_current(PG_FUNCTION_ARGS) { Datum res; - SET_FUNCOID(); + + SET_FUNCOID(); res = DirectFunctionCall3( - to_tsvector, - Int32GetDatum(get_currcfg()), - PG_GETARG_DATUM(0), - (Datum) 0 - ); + to_tsvector, + Int32GetDatum(get_currcfg()), + PG_GETARG_DATUM(0), + (Datum) 0 + ); PG_RETURN_DATUM(res); } @@ -823,7 +827,7 @@ tsearch2(PG_FUNCTION_ARGS) Oid funcoid = InvalidOid; TSCfgInfo *cfg; - SET_FUNCOID(); + SET_FUNCOID(); cfg = findcfg(get_currcfg()); if (!CALLED_AS_TRIGGER(fcinfo)) @@ -947,26 +951,30 @@ tsearch2(PG_FUNCTION_ARGS) } static int -silly_cmp_tsvector(const tsvector *a, const tsvector *b) { - if ( a->len < b->len ) +silly_cmp_tsvector(const tsvector * a, const tsvector * b) +{ + if (a->len < b->len) return -1; - else if ( a->len > b->len ) + else if (a->len > b->len) return 1; - else if ( a->size < b->size ) + else if (a->size < b->size) return -1; - else if ( a->size > b->size ) + else if (a->size > b->size) return 1; - else { - unsigned char *aptr=(unsigned char *)(a->data) + DATAHDRSIZE; - unsigned char *bptr=(unsigned char *)(b->data) + DATAHDRSIZE; - - while( aptr - ( (unsigned char *)(a->data) ) < a->len ) { - if ( *aptr != *bptr ) - return ( *aptr < *bptr ) ? -1 : 1; - aptr++; bptr++; - } + else + { + unsigned char *aptr = (unsigned char *) (a->data) + DATAHDRSIZE; + unsigned char *bptr = (unsigned char *) (b->data) + DATAHDRSIZE; + + while (aptr - ((unsigned char *) (a->data)) < a->len) + { + if (*aptr != *bptr) + return (*aptr < *bptr) ? -1 : 1; + aptr++; + bptr++; + } } - return 0; + return 0; } PG_FUNCTION_INFO_V1(tsvector_cmp); @@ -976,60 +984,66 @@ PG_FUNCTION_INFO_V1(tsvector_eq); PG_FUNCTION_INFO_V1(tsvector_ne); PG_FUNCTION_INFO_V1(tsvector_ge); PG_FUNCTION_INFO_V1(tsvector_gt); -Datum tsvector_cmp(PG_FUNCTION_ARGS); -Datum tsvector_lt(PG_FUNCTION_ARGS); -Datum tsvector_le(PG_FUNCTION_ARGS); -Datum tsvector_eq(PG_FUNCTION_ARGS); -Datum tsvector_ne(PG_FUNCTION_ARGS); -Datum tsvector_ge(PG_FUNCTION_ARGS); -Datum tsvector_gt(PG_FUNCTION_ARGS); - -#define RUNCMP \ -tsvector *a = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(0)));\ -tsvector *b = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));\ +Datum tsvector_cmp(PG_FUNCTION_ARGS); +Datum tsvector_lt(PG_FUNCTION_ARGS); +Datum tsvector_le(PG_FUNCTION_ARGS); +Datum tsvector_eq(PG_FUNCTION_ARGS); +Datum tsvector_ne(PG_FUNCTION_ARGS); +Datum tsvector_ge(PG_FUNCTION_ARGS); +Datum tsvector_gt(PG_FUNCTION_ARGS); + +#define RUNCMP \ +tsvector *a = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(0)));\ +tsvector *b = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));\ int res = silly_cmp_tsvector(a,b); \ PG_FREE_IF_COPY(a,0); \ PG_FREE_IF_COPY(b,1); \ Datum -tsvector_cmp(PG_FUNCTION_ARGS) { +tsvector_cmp(PG_FUNCTION_ARGS) +{ RUNCMP PG_RETURN_INT32(res); } Datum -tsvector_lt(PG_FUNCTION_ARGS) { +tsvector_lt(PG_FUNCTION_ARGS) +{ RUNCMP PG_RETURN_BOOL((res < 0) ? true : false); } Datum -tsvector_le(PG_FUNCTION_ARGS) { +tsvector_le(PG_FUNCTION_ARGS) +{ RUNCMP PG_RETURN_BOOL((res <= 0) ? true : false); } Datum -tsvector_eq(PG_FUNCTION_ARGS) { +tsvector_eq(PG_FUNCTION_ARGS) +{ RUNCMP PG_RETURN_BOOL((res == 0) ? true : false); } Datum -tsvector_ge(PG_FUNCTION_ARGS) { +tsvector_ge(PG_FUNCTION_ARGS) +{ RUNCMP PG_RETURN_BOOL((res >= 0) ? true : false); } - + Datum -tsvector_gt(PG_FUNCTION_ARGS) { +tsvector_gt(PG_FUNCTION_ARGS) +{ RUNCMP PG_RETURN_BOOL((res > 0) ? true : false); -} - +} + Datum -tsvector_ne(PG_FUNCTION_ARGS) { - RUNCMP +tsvector_ne(PG_FUNCTION_ARGS) +{ + RUNCMP PG_RETURN_BOOL((res != 0) ? true : false); } - diff --git a/contrib/tsearch2/wordparser/parser.h b/contrib/tsearch2/wordparser/parser.h index 7fb2d0a2868..3f0e0cd6359 100644 --- a/contrib/tsearch2/wordparser/parser.h +++ b/contrib/tsearch2/wordparser/parser.h @@ -1,8 +1,8 @@ #ifndef __PARSER_H__ #define __PARSER_H__ -extern char *token; -extern int tokenlen; +extern char *token; +extern int tokenlen; int tsearch2_yylex(void); void tsearch2_start_parse_str(char *, int); void tsearch2_end_parse(void); diff --git a/contrib/tsearch2/wparser.c b/contrib/tsearch2/wparser.c index eba2cd6eb58..554c2684b9b 100644 --- a/contrib/tsearch2/wparser.c +++ b/contrib/tsearch2/wparser.c @@ -30,18 +30,19 @@ init_prs(Oid id, WParserInfo * prs) bool isnull; Datum pars[1]; int stat; - void *plan; - char buf[1024], *nsp; + void *plan; + char buf[1024], + *nsp; arg[0] = OIDOID; pars[0] = ObjectIdGetDatum(id); memset(prs, 0, sizeof(WParserInfo)); SPI_connect(); - nsp=get_namespace(TSNSP_FunctionOid); + nsp = get_namespace(TSNSP_FunctionOid); sprintf(buf, "select prs_start, prs_nexttoken, prs_end, prs_lextype, prs_headline from %s.pg_ts_parser where oid = $1", nsp); pfree(nsp); - plan= SPI_prepare(buf, 1, arg); + plan = SPI_prepare(buf, 1, arg); if (!plan) ts_error(ERROR, "SPI_prepare() failed"); @@ -140,8 +141,9 @@ name2id_prs(text *name) Datum pars[1]; int stat; Oid id = findSNMap_t(&(PList.name2id_map), name); - char buf[1024], *nsp; - void *plan; + char buf[1024], + *nsp; + void *plan; arg[0] = TEXTOID; pars[0] = PointerGetDatum(name); @@ -153,7 +155,7 @@ name2id_prs(text *name) nsp = get_namespace(TSNSP_FunctionOid); sprintf(buf, "select oid from %s.pg_ts_parser where prs_name = $1", nsp); pfree(nsp); - plan= SPI_prepare(buf, 1, arg); + plan = SPI_prepare(buf, 1, arg); if (!plan) ts_error(ERROR, "SPI_prepare() failed"); @@ -242,7 +244,8 @@ token_type(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; Datum result; - SET_FUNCOID(); + + SET_FUNCOID(); if (SRF_IS_FIRSTCALL()) { funcctx = SRF_FIRSTCALL_INIT(); @@ -263,7 +266,8 @@ token_type_byname(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; Datum result; - SET_FUNCOID(); + + SET_FUNCOID(); if (SRF_IS_FIRSTCALL()) { text *name = PG_GETARG_TEXT_P(0); @@ -287,7 +291,8 @@ token_type_current(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; Datum result; - SET_FUNCOID(); + + SET_FUNCOID(); if (SRF_IS_FIRSTCALL()) { funcctx = SRF_FIRSTCALL_INIT(); @@ -309,7 +314,7 @@ Datum set_curprs(PG_FUNCTION_ARGS); Datum set_curprs(PG_FUNCTION_ARGS) { - SET_FUNCOID(); + SET_FUNCOID(); findprs(PG_GETARG_OID(0)); current_parser_id = PG_GETARG_OID(0); PG_RETURN_VOID(); @@ -321,7 +326,8 @@ Datum set_curprs_byname(PG_FUNCTION_ARGS) { text *name = PG_GETARG_TEXT_P(0); - SET_FUNCOID(); + + SET_FUNCOID(); DirectFunctionCall1( set_curprs, ObjectIdGetDatum(name2id_prs(name)) @@ -444,7 +450,8 @@ parse(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; Datum result; - SET_FUNCOID(); + + SET_FUNCOID(); if (SRF_IS_FIRSTCALL()) { text *txt = PG_GETARG_TEXT_P(1); @@ -468,7 +475,8 @@ parse_byname(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; Datum result; - SET_FUNCOID(); + + SET_FUNCOID(); if (SRF_IS_FIRSTCALL()) { text *name = PG_GETARG_TEXT_P(0); @@ -495,7 +503,8 @@ parse_current(PG_FUNCTION_ARGS) { FuncCallContext *funcctx; Datum result; - SET_FUNCOID(); + + SET_FUNCOID(); if (SRF_IS_FIRSTCALL()) { text *txt = PG_GETARG_TEXT_P(0); @@ -527,7 +536,7 @@ headline(PG_FUNCTION_ARGS) TSCfgInfo *cfg; WParserInfo *prsobj; - SET_FUNCOID(); + SET_FUNCOID(); cfg = findcfg(PG_GETARG_OID(0)); prsobj = findprs(cfg->prs_id); @@ -566,14 +575,15 @@ headline_byname(PG_FUNCTION_ARGS) text *cfg = PG_GETARG_TEXT_P(0); Datum out; - SET_FUNCOID(); + + SET_FUNCOID(); out = DirectFunctionCall4( - headline, - ObjectIdGetDatum(name2id_cfg(cfg)), - PG_GETARG_DATUM(1), - PG_GETARG_DATUM(2), + headline, + ObjectIdGetDatum(name2id_cfg(cfg)), + PG_GETARG_DATUM(1), + PG_GETARG_DATUM(2), (PG_NARGS() > 3) ? PG_GETARG_DATUM(3) : PointerGetDatum(NULL) - ); + ); PG_FREE_IF_COPY(cfg, 0); PG_RETURN_DATUM(out); @@ -584,7 +594,7 @@ Datum headline_current(PG_FUNCTION_ARGS); Datum headline_current(PG_FUNCTION_ARGS) { - SET_FUNCOID(); + SET_FUNCOID(); PG_RETURN_DATUM(DirectFunctionCall4( headline, ObjectIdGetDatum(get_currcfg()), diff --git a/contrib/tsearch2/wparser_def.c b/contrib/tsearch2/wparser_def.c index 035e5f2495d..21b41eef8f6 100644 --- a/contrib/tsearch2/wparser_def.c +++ b/contrib/tsearch2/wparser_def.c @@ -192,12 +192,13 @@ prsd_headline(PG_FUNCTION_ARGS) int bestb = -1, beste = -1; int bestlen = -1; - int pose = 0, posb, + int pose = 0, + posb, poslen, curlen; int i; - int highlight=0; + int highlight = 0; /* config */ prs->startsel = NULL; @@ -224,13 +225,13 @@ prsd_headline(PG_FUNCTION_ARGS) prs->stopsel = pstrdup(mptr->value); else if (pg_strcasecmp(mptr->key, "HighlightAll") == 0) highlight = ( - pg_strcasecmp(mptr->value, "1")==0 || - pg_strcasecmp(mptr->value, "on")==0 || - pg_strcasecmp(mptr->value, "true")==0 || - pg_strcasecmp(mptr->value, "t")==0 || - pg_strcasecmp(mptr->value, "y")==0 || - pg_strcasecmp(mptr->value, "yes")==0 ) ? - 1 : 0; + pg_strcasecmp(mptr->value, "1") == 0 || + pg_strcasecmp(mptr->value, "on") == 0 || + pg_strcasecmp(mptr->value, "true") == 0 || + pg_strcasecmp(mptr->value, "t") == 0 || + pg_strcasecmp(mptr->value, "y") == 0 || + pg_strcasecmp(mptr->value, "yes") == 0) ? + 1 : 0; pfree(mptr->key); pfree(mptr->value); @@ -239,23 +240,25 @@ prsd_headline(PG_FUNCTION_ARGS) } pfree(map); - if (highlight==0) { + if (highlight == 0) + { if (min_words >= max_words) ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("MinWords should be less than MaxWords"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("MinWords should be less than MaxWords"))); if (min_words <= 0) ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("MinWords should be positive"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("MinWords should be positive"))); if (shortword < 0) ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("ShortWord should be >= 0"))); + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("ShortWord should be >= 0"))); } } - if (highlight==0) { + if (highlight == 0) + { while (hlCover(prs, query, &p, &q)) { /* find cover len in words */ @@ -269,17 +272,17 @@ prsd_headline(PG_FUNCTION_ARGS) poslen++; pose = i; } - + if (poslen < bestlen && !(NOENDTOKEN(prs->words[beste].type) || prs->words[beste].len <= shortword)) { /* best already finded, so try one more cover */ p++; continue; } - - posb=p; + + posb = p; if (curlen < max_words) - { /* find good end */ + { /* find good end */ for (i = i - 1; i < prs->curwords && curlen < max_words; i++) { if (i != q) @@ -295,8 +298,11 @@ prsd_headline(PG_FUNCTION_ARGS) if (curlen >= min_words) break; } - if ( curlen < min_words && i>=prs->curwords ) { /* got end of text and our cover is shoter than min_words */ - for(i=p; i>= 0; i--) { + if (curlen < min_words && i >= prs->curwords) + { /* got end of text and our cover is shoter + * than min_words */ + for (i = p; i >= 0; i--) + { if (!NONWORDTOKEN(prs->words[i].type)) curlen++; if (prs->words[i].item && !prs->words[i].repeated) @@ -306,11 +312,11 @@ prsd_headline(PG_FUNCTION_ARGS) if (curlen >= min_words) break; } - posb=(i>=0) ? i : 0; + posb = (i >= 0) ? i : 0; } } else - { /* shorter cover :((( */ + { /* shorter cover :((( */ for (; curlen > min_words; i--) { if (!NONWORDTOKEN(prs->words[i].type)) @@ -323,7 +329,7 @@ prsd_headline(PG_FUNCTION_ARGS) break; } } - + if (bestlen < 0 || (poslen > bestlen && !(NOENDTOKEN(prs->words[pose].type) || prs->words[pose].len <= shortword)) || (bestlen >= 0 && !(NOENDTOKEN(prs->words[pose].type) || prs->words[pose].len <= shortword) && (NOENDTOKEN(prs->words[beste].type) || prs->words[beste].len <= shortword))) @@ -332,7 +338,7 @@ prsd_headline(PG_FUNCTION_ARGS) beste = pose; bestlen = poslen; } - + p++; } @@ -348,19 +354,24 @@ prsd_headline(PG_FUNCTION_ARGS) bestb = 0; beste = pose; } - } else { - bestb=0; - beste=prs->curwords-1; + } + else + { + bestb = 0; + beste = prs->curwords - 1; } for (i = bestb; i <= beste; i++) { if (prs->words[i].item) prs->words[i].selected = 1; - if ( highlight==0 ) { + if (highlight == 0) + { if (HLIDIGNORE(prs->words[i].type)) prs->words[i].replace = 1; - } else { + } + else + { if (HTMLHLIDIGNORE(prs->words[i].type)) prs->words[i].replace = 1; } diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c index 84792ca2484..67ae99fbfbb 100644 --- a/contrib/xml2/xpath.c +++ b/contrib/xml2/xpath.c @@ -22,34 +22,34 @@ static void *pgxml_palloc(size_t size); static void *pgxml_repalloc(void *ptr, size_t size); static void pgxml_pfree(void *ptr); static char *pgxml_pstrdup(const char *string); -static void pgxml_errorHandler (void * ctxt, const char *msg, ...); +static void pgxml_errorHandler(void *ctxt, const char *msg,...); -void elog_error(int level, char *explain, int force); -void pgxml_parser_init(void); +void elog_error(int level, char *explain, int force); +void pgxml_parser_init(void); static xmlChar *pgxmlNodeSetToText(xmlNodeSetPtr nodeset, xmlChar * toptagname, xmlChar * septagname, xmlChar * plainsep); -text *pgxml_result_to_text(xmlXPathObjectPtr res, xmlChar *toptag, - xmlChar *septag, xmlChar *plainsep); +text *pgxml_result_to_text(xmlXPathObjectPtr res, xmlChar * toptag, + xmlChar * septag, xmlChar * plainsep); -xmlChar *pgxml_texttoxmlchar(text *textstring); +xmlChar *pgxml_texttoxmlchar(text *textstring); -static xmlXPathObjectPtr pgxml_xpath(text *document, xmlChar* xpath); +static xmlXPathObjectPtr pgxml_xpath(text *document, xmlChar * xpath); Datum xml_valid(PG_FUNCTION_ARGS); -Datum xpath_nodeset(PG_FUNCTION_ARGS); +Datum xpath_nodeset(PG_FUNCTION_ARGS); Datum xpath_string(PG_FUNCTION_ARGS); Datum xpath_number(PG_FUNCTION_ARGS); -Datum xpath_bool(PG_FUNCTION_ARGS); -Datum xpath_list(PG_FUNCTION_ARGS); -Datum xpath_table(PG_FUNCTION_ARGS); +Datum xpath_bool(PG_FUNCTION_ARGS); +Datum xpath_list(PG_FUNCTION_ARGS); +Datum xpath_table(PG_FUNCTION_ARGS); /* Global variables */ -char *errbuf; /* per line error buffer */ -char *pgxml_errorMsg = NULL; /* overall error message */ +char *errbuf; /* per line error buffer */ +char *pgxml_errorMsg = NULL; /* overall error message */ /* Convenience macros */ @@ -93,47 +93,47 @@ pgxml_pstrdup(const char *string) */ static void -pgxml_errorHandler (void * ctxt, const char *msg, ...) +pgxml_errorHandler(void *ctxt, const char *msg,...) { - va_list args; - - va_start(args, msg); - vsnprintf(errbuf, ERRBUF_SIZE, msg, args); - va_end(args); - /* Now copy the argument across */ - if (pgxml_errorMsg == NULL) - { - pgxml_errorMsg = pstrdup(errbuf); - } -else - { - int32 xsize = strlen(pgxml_errorMsg); - pgxml_errorMsg = repalloc(pgxml_errorMsg, - (size_t) (xsize + strlen(errbuf) + 1)); - strncpy(&pgxml_errorMsg[xsize-1],errbuf,strlen(errbuf)); - pgxml_errorMsg[xsize+strlen(errbuf)-1]='\0'; - - } - memset(errbuf,0,ERRBUF_SIZE); + va_list args; + + va_start(args, msg); + vsnprintf(errbuf, ERRBUF_SIZE, msg, args); + va_end(args); + /* Now copy the argument across */ + if (pgxml_errorMsg == NULL) + pgxml_errorMsg = pstrdup(errbuf); + else + { + int32 xsize = strlen(pgxml_errorMsg); + + pgxml_errorMsg = repalloc(pgxml_errorMsg, + (size_t) (xsize + strlen(errbuf) + 1)); + strncpy(&pgxml_errorMsg[xsize - 1], errbuf, strlen(errbuf)); + pgxml_errorMsg[xsize + strlen(errbuf) - 1] = '\0'; + + } + memset(errbuf, 0, ERRBUF_SIZE); } /* This function reports the current message at the level specified */ -void elog_error(int level, char *explain, int force) +void +elog_error(int level, char *explain, int force) { - if (force || (pgxml_errorMsg != NULL)) - { - if (pgxml_errorMsg == NULL) + if (force || (pgxml_errorMsg != NULL)) { - ereport(level,(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), - errmsg(explain))); - } - else - { - ereport(level,(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), - errmsg("%s:%s",explain,pgxml_errorMsg))); - pfree(pgxml_errorMsg); + if (pgxml_errorMsg == NULL) + { + ereport(level, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg(explain))); + } + else + { + ereport(level, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s:%s", explain, pgxml_errorMsg))); + pfree(pgxml_errorMsg); + } } - } } void @@ -155,7 +155,7 @@ pgxml_parser_init() pgxml_errorMsg = NULL; errbuf = palloc(200); - memset(errbuf,0,200); + memset(errbuf, 0, 200); } @@ -191,7 +191,7 @@ static xmlChar pgxmlNodeSetToText(xmlNodeSetPtr nodeset, xmlChar * toptagname, xmlChar * septagname, - xmlChar * plainsep) + xmlChar * plainsep) { /* Function translates a nodeset into a text representation */ @@ -201,9 +201,12 @@ pgxmlNodeSetToText(xmlNodeSetPtr nodeset, */ /* each representation is surrounded by <tagname> ... </tagname> */ - /* plainsep is an ordinary (not tag) seperator - if used, then - * nodes are cast to string as output method */ - + + /* + * plainsep is an ordinary (not tag) seperator - if used, then nodes + * are cast to string as output method + */ + xmlBufferPtr buf; xmlChar *result; @@ -222,35 +225,37 @@ pgxmlNodeSetToText(xmlNodeSetPtr nodeset, for (i = 0; i < nodeset->nodeNr; i++) { - if (plainsep != NULL) { - xmlBufferWriteCHAR(buf, - xmlXPathCastNodeToString(nodeset->nodeTab[i])); - - /* If this isn't the last entry, write the plain sep. */ - if (i < (nodeset->nodeNr)-1) { - xmlBufferWriteChar(buf, plainsep); - } - } else { - - - if ((septagname != NULL) && (xmlStrlen(septagname) > 0)) + if (plainsep != NULL) { - xmlBufferWriteChar(buf, "<"); - xmlBufferWriteCHAR(buf, septagname); - xmlBufferWriteChar(buf, ">"); - } - xmlNodeDump(buf, - nodeset->nodeTab[i]->doc, - nodeset->nodeTab[i], - 1, 0); + xmlBufferWriteCHAR(buf, + xmlXPathCastNodeToString(nodeset->nodeTab[i])); - if ((septagname != NULL) && (xmlStrlen(septagname) > 0)) + /* If this isn't the last entry, write the plain sep. */ + if (i < (nodeset->nodeNr) - 1) + xmlBufferWriteChar(buf, plainsep); + } + else { - xmlBufferWriteChar(buf, "</"); - xmlBufferWriteCHAR(buf, septagname); - xmlBufferWriteChar(buf, ">"); + + + if ((septagname != NULL) && (xmlStrlen(septagname) > 0)) + { + xmlBufferWriteChar(buf, "<"); + xmlBufferWriteCHAR(buf, septagname); + xmlBufferWriteChar(buf, ">"); + } + xmlNodeDump(buf, + nodeset->nodeTab[i]->doc, + nodeset->nodeTab[i], + 1, 0); + + if ((septagname != NULL) && (xmlStrlen(septagname) > 0)) + { + xmlBufferWriteChar(buf, "</"); + xmlBufferWriteCHAR(buf, septagname); + xmlBufferWriteChar(buf, ">"); + } } - } } } @@ -294,11 +299,13 @@ PG_FUNCTION_INFO_V1(xpath_nodeset); Datum xpath_nodeset(PG_FUNCTION_ARGS) { - xmlChar *xpath, *toptag, *septag; - int32 pathsize; - text - *xpathsupp, - *xpres; + xmlChar *xpath, + *toptag, + *septag; + int32 pathsize; + text + *xpathsupp, + *xpres; /* PG_GETARG_TEXT_P(0) is document buffer */ xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ @@ -311,32 +318,31 @@ xpath_nodeset(PG_FUNCTION_ARGS) xpath = pgxml_texttoxmlchar(xpathsupp); xpres = pgxml_result_to_text( - pgxml_xpath(PG_GETARG_TEXT_P(0),xpath), - toptag,septag,NULL); + pgxml_xpath(PG_GETARG_TEXT_P(0), xpath), + toptag, septag, NULL); /* xmlCleanupParser(); done by result_to_text routine */ pfree((void *) xpath); - if (xpres == NULL) - { - PG_RETURN_NULL(); - } + if (xpres == NULL) + PG_RETURN_NULL(); PG_RETURN_TEXT_P(xpres); } -// The following function is almost identical, but returns the elements in -// a list. +/* The following function is almost identical, but returns the elements in */ +/* a list. */ PG_FUNCTION_INFO_V1(xpath_list); Datum xpath_list(PG_FUNCTION_ARGS) { - xmlChar *xpath, *plainsep; - int32 pathsize; - text - *xpathsupp, - *xpres; + xmlChar *xpath, + *plainsep; + int32 pathsize; + text + *xpathsupp, + *xpres; /* PG_GETARG_TEXT_P(0) is document buffer */ xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ @@ -348,16 +354,14 @@ xpath_list(PG_FUNCTION_ARGS) xpath = pgxml_texttoxmlchar(xpathsupp); xpres = pgxml_result_to_text( - pgxml_xpath(PG_GETARG_TEXT_P(0),xpath), - NULL,NULL,plainsep); + pgxml_xpath(PG_GETARG_TEXT_P(0), xpath), + NULL, NULL, plainsep); /* xmlCleanupParser(); done by result_to_text routine */ pfree((void *) xpath); - if (xpres == NULL) - { - PG_RETURN_NULL(); - } + if (xpres == NULL) + PG_RETURN_NULL(); PG_RETURN_TEXT_P(xpres); } @@ -367,38 +371,38 @@ PG_FUNCTION_INFO_V1(xpath_string); Datum xpath_string(PG_FUNCTION_ARGS) { - xmlChar *xpath; - int32 pathsize; - text - *xpathsupp, - *xpres; + xmlChar *xpath; + int32 pathsize; + text + *xpathsupp, + *xpres; /* PG_GETARG_TEXT_P(0) is document buffer */ xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */ pathsize = VARSIZE(xpathsupp) - VARHDRSZ; - /* We encapsulate the supplied path with "string()" - * = 8 chars + 1 for NUL at end */ + /* + * We encapsulate the supplied path with "string()" = 8 chars + 1 for + * NUL at end + */ /* We could try casting to string using the libxml function? */ - xpath =(xmlChar *) palloc(pathsize + 9); - memcpy((char *) (xpath+7), VARDATA(xpathsupp), pathsize); - strncpy((char *) xpath, "string(",7); - xpath[pathsize+7] = ')'; - xpath[pathsize+8] = '\0'; + xpath = (xmlChar *) palloc(pathsize + 9); + memcpy((char *) (xpath + 7), VARDATA(xpathsupp), pathsize); + strncpy((char *) xpath, "string(", 7); + xpath[pathsize + 7] = ')'; + xpath[pathsize + 8] = '\0'; xpres = pgxml_result_to_text( - pgxml_xpath(PG_GETARG_TEXT_P(0),xpath), - NULL,NULL,NULL); + pgxml_xpath(PG_GETARG_TEXT_P(0), xpath), + NULL, NULL, NULL); xmlCleanupParser(); pfree((void *) xpath); - if (xpres == NULL) - { - PG_RETURN_NULL(); - } + if (xpres == NULL) + PG_RETURN_NULL(); PG_RETURN_TEXT_P(xpres); } @@ -408,12 +412,12 @@ PG_FUNCTION_INFO_V1(xpath_number); Datum xpath_number(PG_FUNCTION_ARGS) { - xmlChar *xpath; - int32 pathsize; - text - *xpathsupp; - - float4 fRes; + xmlChar *xpath; + int32 pathsize; + text + *xpathsupp; + + float4 fRes; xmlXPathObjectPtr res; @@ -424,21 +428,19 @@ xpath_number(PG_FUNCTION_ARGS) xpath = pgxml_texttoxmlchar(xpathsupp); - res = pgxml_xpath(PG_GETARG_TEXT_P(0),xpath); + res = pgxml_xpath(PG_GETARG_TEXT_P(0), xpath); pfree((void *) xpath); if (res == NULL) - { - xmlCleanupParser(); - PG_RETURN_NULL(); - } + { + xmlCleanupParser(); + PG_RETURN_NULL(); + } fRes = xmlXPathCastToNumber(res); xmlCleanupParser(); if (xmlXPathIsNaN(fRes)) - { - PG_RETURN_NULL(); - } + PG_RETURN_NULL(); PG_RETURN_FLOAT4(fRes); @@ -450,12 +452,12 @@ PG_FUNCTION_INFO_V1(xpath_bool); Datum xpath_bool(PG_FUNCTION_ARGS) { - xmlChar *xpath; - int32 pathsize; - text - *xpathsupp; - - int bRes; + xmlChar *xpath; + int32 pathsize; + text + *xpathsupp; + + int bRes; xmlXPathObjectPtr res; @@ -466,14 +468,14 @@ xpath_bool(PG_FUNCTION_ARGS) xpath = pgxml_texttoxmlchar(xpathsupp); - res = pgxml_xpath(PG_GETARG_TEXT_P(0),xpath); + res = pgxml_xpath(PG_GETARG_TEXT_P(0), xpath); pfree((void *) xpath); if (res == NULL) - { - xmlCleanupParser(); - PG_RETURN_BOOL(false); - } + { + xmlCleanupParser(); + PG_RETURN_BOOL(false); + } bRes = xmlXPathCastToBoolean(res); xmlCleanupParser(); @@ -486,8 +488,8 @@ xpath_bool(PG_FUNCTION_ARGS) /* Core function to evaluate XPath query */ xmlXPathObjectPtr - pgxml_xpath(text *document, xmlChar *xpath) - { +pgxml_xpath(text *document, xmlChar * xpath) +{ xmlDocPtr doctree; xmlXPathContextPtr ctxt; @@ -497,14 +499,14 @@ xmlXPathObjectPtr int32 docsize; - + docsize = VARSIZE(document) - VARHDRSZ; pgxml_parser_init(); doctree = xmlParseMemory((char *) VARDATA(document), docsize); if (doctree == NULL) - { /* not well-formed */ + { /* not well-formed */ return NULL; } @@ -518,9 +520,9 @@ xmlXPathObjectPtr { xmlCleanupParser(); xmlFreeDoc(doctree); - elog_error(ERROR,"XPath Syntax Error",1); + elog_error(ERROR, "XPath Syntax Error", 1); - return NULL; + return NULL; } /* Now evaluate the path expression. */ @@ -529,36 +531,35 @@ xmlXPathObjectPtr if (res == NULL) { - xmlXPathFreeContext(ctxt); - // xmlCleanupParser(); + xmlXPathFreeContext(ctxt); + /* xmlCleanupParser(); */ xmlFreeDoc(doctree); return NULL; } /* xmlFreeDoc(doctree); */ return res; - } +} -text -*pgxml_result_to_text(xmlXPathObjectPtr res, - xmlChar *toptag, - xmlChar *septag, - xmlChar *plainsep) +text + * +pgxml_result_to_text(xmlXPathObjectPtr res, + xmlChar * toptag, + xmlChar * septag, + xmlChar * plainsep) { - xmlChar *xpresstr; - int32 ressize; - text *xpres; - - if (res == NULL) - { - return NULL; - } + xmlChar *xpresstr; + int32 ressize; + text *xpres; + + if (res == NULL) + return NULL; switch (res->type) { case XPATH_NODESET: xpresstr = pgxmlNodeSetToText(res->nodesetval, - toptag, - septag, plainsep); + toptag, + septag, plainsep); break; case XPATH_STRING: @@ -583,7 +584,7 @@ text xmlFree(xpresstr); - elog_error(ERROR,"XPath error",0); + elog_error(ERROR, "XPath error", 0); return xpres; @@ -595,299 +596,314 @@ text PG_FUNCTION_INFO_V1(xpath_table); -Datum xpath_table(PG_FUNCTION_ARGS) +Datum +xpath_table(PG_FUNCTION_ARGS) { /* SPI (input tuple) support */ - SPITupleTable *tuptable; - HeapTuple spi_tuple; - TupleDesc spi_tupdesc; + SPITupleTable *tuptable; + HeapTuple spi_tuple; + TupleDesc spi_tupdesc; /* Output tuple (tuplestore) support */ - Tuplestorestate *tupstore = NULL; - TupleDesc ret_tupdesc; - HeapTuple ret_tuple; - - ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; - AttInMetadata *attinmeta; - MemoryContext per_query_ctx; - MemoryContext oldcontext; - -/* Function parameters */ - char *pkeyfield = GET_STR(PG_GETARG_TEXT_P(0)); - char *xmlfield = GET_STR(PG_GETARG_TEXT_P(1)); - char *relname = GET_STR(PG_GETARG_TEXT_P(2)); - char *xpathset = GET_STR(PG_GETARG_TEXT_P(3)); - char *condition = GET_STR(PG_GETARG_TEXT_P(4)); - - char **values; - xmlChar **xpaths; - xmlChar *pos; - xmlChar *pathsep= "|"; - - int numpaths; - int ret; - int proc; - int i; - int j; - int rownr; /* For issuing multiple rows from one original document */ - int had_values; /* To determine end of nodeset results */ - - StringInfo querysql; + Tuplestorestate *tupstore = NULL; + TupleDesc ret_tupdesc; + HeapTuple ret_tuple; + + ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; + AttInMetadata *attinmeta; + MemoryContext per_query_ctx; + MemoryContext oldcontext; + +/* Function parameters */ + char *pkeyfield = GET_STR(PG_GETARG_TEXT_P(0)); + char *xmlfield = GET_STR(PG_GETARG_TEXT_P(1)); + char *relname = GET_STR(PG_GETARG_TEXT_P(2)); + char *xpathset = GET_STR(PG_GETARG_TEXT_P(3)); + char *condition = GET_STR(PG_GETARG_TEXT_P(4)); + + char **values; + xmlChar **xpaths; + xmlChar *pos; + xmlChar *pathsep = "|"; + + int numpaths; + int ret; + int proc; + int i; + int j; + int rownr; /* For issuing multiple rows from one + * original document */ + int had_values; /* To determine end of nodeset results */ + + StringInfo querysql; /* We only have a valid tuple description in table function mode */ - if (rsinfo->expectedDesc == NULL) { - ereport(ERROR,(errcode(ERRCODE_SYNTAX_ERROR), - errmsg("xpath_table must be called as a table function"))); - } - -/* The tuplestore must exist in a higher context than + if (rsinfo->expectedDesc == NULL) + { + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("xpath_table must be called as a table function"))); + } + +/* The tuplestore must exist in a higher context than * this function call (per_query_ctx is used) */ - per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; - oldcontext = MemoryContextSwitchTo(per_query_ctx); + per_query_ctx = rsinfo->econtext->ecxt_per_query_memory; + oldcontext = MemoryContextSwitchTo(per_query_ctx); /* Create the tuplestore - work_mem is the max in-memory size before a * file is created on disk to hold it. */ - tupstore = tuplestore_begin_heap(true, false, work_mem); + tupstore = tuplestore_begin_heap(true, false, work_mem); - MemoryContextSwitchTo(oldcontext); + MemoryContextSwitchTo(oldcontext); - /* get the requested return tuple description */ - ret_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); + /* get the requested return tuple description */ + ret_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc); - /* At the moment we assume that the returned attributes make sense - * for the XPath specififed (i.e. we trust the caller). - * It's not fatal if they get it wrong - the input function for the - * column type will raise an error if the path result can't be converted - * into the correct binary representation. - */ + /* + * At the moment we assume that the returned attributes make sense for + * the XPath specififed (i.e. we trust the caller). It's not fatal if + * they get it wrong - the input function for the column type will + * raise an error if the path result can't be converted into the + * correct binary representation. + */ - attinmeta = TupleDescGetAttInMetadata(ret_tupdesc); + attinmeta = TupleDescGetAttInMetadata(ret_tupdesc); - /* We want to materialise because it means that we don't have to - * carry libxml2 parser state between invocations of this function - */ + /* + * We want to materialise because it means that we don't have to carry + * libxml2 parser state between invocations of this function + */ - /* check to see if caller supports us returning a tuplestore */ - if (!rsinfo || !(rsinfo->allowedModes & SFRM_Materialize)) - ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("xpath_table requires Materialize mode, but it is not " - "allowed in this context"))); + /* check to see if caller supports us returning a tuplestore */ + if (!rsinfo || !(rsinfo->allowedModes & SFRM_Materialize)) + ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("xpath_table requires Materialize mode, but it is not " + "allowed in this context"))); - // Set return mode and allocate value space. - rsinfo->returnMode = SFRM_Materialize; - rsinfo->setDesc = ret_tupdesc; - - values = (char **) palloc(ret_tupdesc->natts * sizeof(char *)); + /* Set return mode and allocate value space. */ + rsinfo->returnMode = SFRM_Materialize; + rsinfo->setDesc = ret_tupdesc; - xpaths = (xmlChar **) palloc(ret_tupdesc->natts * sizeof(xmlChar *)); + values = (char **) palloc(ret_tupdesc->natts * sizeof(char *)); - /* Split XPaths. xpathset is a writable CString. */ + xpaths = (xmlChar **) palloc(ret_tupdesc->natts * sizeof(xmlChar *)); - /* Note that we stop splitting once we've done all needed for tupdesc */ + /* Split XPaths. xpathset is a writable CString. */ - numpaths=0; - pos = xpathset; - do { - xpaths[numpaths] = pos; - pos = strstr(pos,pathsep); - if (pos != NULL) { - *pos = '\0'; - pos++; - } - numpaths++; - } while ((pos != NULL) && (numpaths < (ret_tupdesc->natts - 1) )); + /* Note that we stop splitting once we've done all needed for tupdesc */ - /* Now build query */ + numpaths = 0; + pos = xpathset; + do + { + xpaths[numpaths] = pos; + pos = strstr(pos, pathsep); + if (pos != NULL) + { + *pos = '\0'; + pos++; + } + numpaths++; + } while ((pos != NULL) && (numpaths < (ret_tupdesc->natts - 1))); + + /* Now build query */ - querysql = makeStringInfo(); + querysql = makeStringInfo(); - /* Build initial sql statement */ - appendStringInfo(querysql, "SELECT %s, %s FROM %s WHERE %s", - pkeyfield, - xmlfield, - relname, - condition - ); + /* Build initial sql statement */ + appendStringInfo(querysql, "SELECT %s, %s FROM %s WHERE %s", + pkeyfield, + xmlfield, + relname, + condition + ); - if ((ret = SPI_connect()) < 0) { - elog(ERROR, "xpath_table: SPI_connect returned %d", ret); - } + if ((ret = SPI_connect()) < 0) + elog(ERROR, "xpath_table: SPI_connect returned %d", ret); - if ((ret = SPI_exec(querysql->data,0)) != SPI_OK_SELECT) { - elog(ERROR,"xpath_table: SPI execution failed for query %s",querysql->data); - } + if ((ret = SPI_exec(querysql->data, 0)) != SPI_OK_SELECT) + elog(ERROR, "xpath_table: SPI execution failed for query %s", querysql->data); - proc= SPI_processed; - /* elog(DEBUG1,"xpath_table: SPI returned %d rows",proc); */ - tuptable = SPI_tuptable; - spi_tupdesc = tuptable->tupdesc; + proc = SPI_processed; + /* elog(DEBUG1,"xpath_table: SPI returned %d rows",proc); */ + tuptable = SPI_tuptable; + spi_tupdesc = tuptable->tupdesc; /* Switch out of SPI context */ - MemoryContextSwitchTo(oldcontext); + MemoryContextSwitchTo(oldcontext); /* Check that SPI returned correct result. If you put a comma into one of * the function parameters, this will catch it when the SPI query returns - * e.g. 3 columns. + * e.g. 3 columns. */ - if (spi_tupdesc->natts != 2) { - ereport(ERROR,(errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("Expression returning multiple columns is not valid in parameter list"), - errdetail("Expected two columns in SPI result, got %d",spi_tupdesc->natts))); - } + if (spi_tupdesc->natts != 2) + { + ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("Expression returning multiple columns is not valid in parameter list"), + errdetail("Expected two columns in SPI result, got %d", spi_tupdesc->natts))); + } /* Setup the parser. Beware that this must happen in the same context as the * cleanup - which means that any error from here on must do cleanup to * ensure that the entity table doesn't get freed by being out of context. */ - pgxml_parser_init(); - - /* For each row i.e. document returned from SPI */ - for (i=0; i < proc; i++) { - char *pkey; - char *xmldoc; - - xmlDocPtr doctree; - xmlXPathContextPtr ctxt; - xmlXPathObjectPtr res; - xmlChar *resstr; - - - xmlXPathCompExprPtr comppath; - - /* Extract the row data as C Strings */ - - spi_tuple = tuptable->vals[i]; - pkey = SPI_getvalue(spi_tuple, spi_tupdesc,1); - xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc,2); - - - /* Clear the values array, so that not-well-formed documents - * return NULL in all columns. - */ - - /* Note that this also means that spare columns will be NULL. */ - for (j=0; j < ret_tupdesc->natts; j++) { - values[j]= NULL; - } - - /* Insert primary key */ - values[0]=pkey; - - /* Parse the document */ - doctree = xmlParseMemory(xmldoc, strlen(xmldoc)); - - if (doctree == NULL) - { /* not well-formed, so output all-NULL tuple */ - - ret_tuple = BuildTupleFromCStrings(attinmeta, values); - oldcontext = MemoryContextSwitchTo(per_query_ctx); - tuplestore_puttuple(tupstore, ret_tuple); - MemoryContextSwitchTo(oldcontext); - heap_freetuple(ret_tuple); - } - else - { - /* New loop here - we have to deal with nodeset results */ - rownr=0; - - do { - /* Now evaluate the set of xpaths. */ - had_values=0; - for (j=0; j < numpaths; j++) { - - ctxt = xmlXPathNewContext(doctree); - ctxt->node = xmlDocGetRootElement(doctree); - xmlSetGenericErrorFunc(ctxt, pgxml_errorHandler); - - /* compile the path */ - comppath = xmlXPathCompile(xpaths[j]); - if (comppath == NULL) - { - xmlCleanupParser(); - xmlFreeDoc(doctree); - - elog_error(ERROR,"XPath Syntax Error",1); - - PG_RETURN_NULL(); /* Keep compiler happy */ - } - - /* Now evaluate the path expression. */ - res = xmlXPathCompiledEval(comppath, ctxt); - xmlXPathFreeCompExpr(comppath); - - if (res != NULL) - { - switch (res->type) - { - case XPATH_NODESET: - /* We see if this nodeset has enough nodes */ - if ((res->nodesetval != NULL) && (rownr < res->nodesetval->nodeNr)) { - resstr = - xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]); - had_values=1; - } else { - resstr = NULL; - } - - break; - - case XPATH_STRING: - resstr = xmlStrdup(res->stringval); - break; - - default: - elog(NOTICE, "Unsupported XQuery result: %d", res->type); - resstr = xmlStrdup("<unsupported/>"); - } - - - // Insert this into the appropriate column in the result tuple. - values[j+1] = resstr; - } - xmlXPathFreeContext(ctxt); - } - // Now add the tuple to the output, if there is one. - if (had_values) { - ret_tuple = BuildTupleFromCStrings(attinmeta, values); - oldcontext = MemoryContextSwitchTo(per_query_ctx); - tuplestore_puttuple(tupstore, ret_tuple); - MemoryContextSwitchTo(oldcontext); - heap_freetuple(ret_tuple); - } - - rownr++; - - } while (had_values); - - } - - xmlFreeDoc(doctree); - - pfree(pkey); - pfree(xmldoc); - } - - xmlCleanupParser(); + pgxml_parser_init(); + + /* For each row i.e. document returned from SPI */ + for (i = 0; i < proc; i++) + { + char *pkey; + char *xmldoc; + + xmlDocPtr doctree; + xmlXPathContextPtr ctxt; + xmlXPathObjectPtr res; + xmlChar *resstr; + + + xmlXPathCompExprPtr comppath; + + /* Extract the row data as C Strings */ + + spi_tuple = tuptable->vals[i]; + pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1); + xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2); + + + /* + * Clear the values array, so that not-well-formed documents + * return NULL in all columns. + */ + + /* Note that this also means that spare columns will be NULL. */ + for (j = 0; j < ret_tupdesc->natts; j++) + values[j] = NULL; + + /* Insert primary key */ + values[0] = pkey; + + /* Parse the document */ + doctree = xmlParseMemory(xmldoc, strlen(xmldoc)); + + if (doctree == NULL) + { /* not well-formed, so output all-NULL + * tuple */ + + ret_tuple = BuildTupleFromCStrings(attinmeta, values); + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tuplestore_puttuple(tupstore, ret_tuple); + MemoryContextSwitchTo(oldcontext); + heap_freetuple(ret_tuple); + } + else + { + /* New loop here - we have to deal with nodeset results */ + rownr = 0; + + do + { + /* Now evaluate the set of xpaths. */ + had_values = 0; + for (j = 0; j < numpaths; j++) + { + + ctxt = xmlXPathNewContext(doctree); + ctxt->node = xmlDocGetRootElement(doctree); + xmlSetGenericErrorFunc(ctxt, pgxml_errorHandler); + + /* compile the path */ + comppath = xmlXPathCompile(xpaths[j]); + if (comppath == NULL) + { + xmlCleanupParser(); + xmlFreeDoc(doctree); + + elog_error(ERROR, "XPath Syntax Error", 1); + + PG_RETURN_NULL(); /* Keep compiler happy */ + } + + /* Now evaluate the path expression. */ + res = xmlXPathCompiledEval(comppath, ctxt); + xmlXPathFreeCompExpr(comppath); + + if (res != NULL) + { + switch (res->type) + { + case XPATH_NODESET: + /* We see if this nodeset has enough nodes */ + if ((res->nodesetval != NULL) && (rownr < res->nodesetval->nodeNr)) + { + resstr = + xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]); + had_values = 1; + } + else + resstr = NULL; + + break; + + case XPATH_STRING: + resstr = xmlStrdup(res->stringval); + break; + + default: + elog(NOTICE, "Unsupported XQuery result: %d", res->type); + resstr = xmlStrdup("<unsupported/>"); + } + + + /* + * Insert this into the appropriate column in the + * result tuple. + */ + values[j + 1] = resstr; + } + xmlXPathFreeContext(ctxt); + } + /* Now add the tuple to the output, if there is one. */ + if (had_values) + { + ret_tuple = BuildTupleFromCStrings(attinmeta, values); + oldcontext = MemoryContextSwitchTo(per_query_ctx); + tuplestore_puttuple(tupstore, ret_tuple); + MemoryContextSwitchTo(oldcontext); + heap_freetuple(ret_tuple); + } + + rownr++; + + } while (had_values); + + } + + xmlFreeDoc(doctree); + + pfree(pkey); + pfree(xmldoc); + } + + xmlCleanupParser(); /* Needed to flag completeness in 7.3.1. 7.4 defines it as a no-op. */ - tuplestore_donestoring(tupstore); - - SPI_finish(); - - rsinfo->setResult=tupstore; - - /* - * SFRM_Materialize mode expects us to return a NULL Datum. The actual - * tuples are in our tuplestore and passed back through - * rsinfo->setResult. rsinfo->setDesc is set to the tuple description - * that we actually used to build our tuples with, so the caller can - * verify we did what it was expecting. - */ - return (Datum) 0; - + tuplestore_donestoring(tupstore); + + SPI_finish(); + + rsinfo->setResult = tupstore; + + /* + * SFRM_Materialize mode expects us to return a NULL Datum. The actual + * tuples are in our tuplestore and passed back through + * rsinfo->setResult. rsinfo->setDesc is set to the tuple description + * that we actually used to build our tuples with, so the caller can + * verify we did what it was expecting. + */ + return (Datum) 0; + } diff --git a/contrib/xml2/xslt_proc.c b/contrib/xml2/xslt_proc.c index 64f97366226..d0b348958d5 100644 --- a/contrib/xml2/xslt_proc.c +++ b/contrib/xml2/xslt_proc.c @@ -32,153 +32,150 @@ extern xmlChar *pgxml_texttoxmlchar(text *textstring); /* local defs */ static void parse_params(const char **params, text *paramstr); -Datum xslt_process(PG_FUNCTION_ARGS); +Datum xslt_process(PG_FUNCTION_ARGS); #define MAXPARAMS 20 PG_FUNCTION_INFO_V1(xslt_process); -Datum xslt_process(PG_FUNCTION_ARGS) { - - - const char *params[MAXPARAMS + 1]; /* +1 for the terminator */ - xsltStylesheetPtr stylesheet = NULL; - xmlDocPtr doctree; - xmlDocPtr restree; - xmlDocPtr ssdoc = NULL; - xmlChar *resstr; - int resstat; - int reslen; - - text *doct = PG_GETARG_TEXT_P(0); - text *ssheet = PG_GETARG_TEXT_P(1); - text *paramstr; - text *tres; - - - if (fcinfo->nargs == 3) - { - paramstr = PG_GETARG_TEXT_P(2); - parse_params(params,paramstr); - } - else /* No parameters */ - { - params[0] = NULL; - } - - /* Setup parser */ - pgxml_parser_init(); - - /* Check to see if document is a file or a literal */ - - if (VARDATA(doct)[0] == '<') - { - doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct)-VARHDRSZ); - } - else - { - doctree = xmlParseFile(GET_STR(doct)); - } - - if (doctree == NULL) - { - xmlCleanupParser(); - elog_error(ERROR,"Error parsing XML document",0); - - PG_RETURN_NULL(); - } - - /* Same for stylesheet */ - if (VARDATA(ssheet)[0] == '<') - { - ssdoc = xmlParseMemory((char *) VARDATA(ssheet), - VARSIZE(ssheet)-VARHDRSZ); - if (ssdoc == NULL) +Datum +xslt_process(PG_FUNCTION_ARGS) +{ + + + const char *params[MAXPARAMS + 1]; /* +1 for the terminator */ + xsltStylesheetPtr stylesheet = NULL; + xmlDocPtr doctree; + xmlDocPtr restree; + xmlDocPtr ssdoc = NULL; + xmlChar *resstr; + int resstat; + int reslen; + + text *doct = PG_GETARG_TEXT_P(0); + text *ssheet = PG_GETARG_TEXT_P(1); + text *paramstr; + text *tres; + + + if (fcinfo->nargs == 3) + { + paramstr = PG_GETARG_TEXT_P(2); + parse_params(params, paramstr); + } + else +/* No parameters */ + params[0] = NULL; + + /* Setup parser */ + pgxml_parser_init(); + + /* Check to see if document is a file or a literal */ + + if (VARDATA(doct)[0] == '<') + doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ); + else + doctree = xmlParseFile(GET_STR(doct)); + + if (doctree == NULL) + { + xmlCleanupParser(); + elog_error(ERROR, "Error parsing XML document", 0); + + PG_RETURN_NULL(); + } + + /* Same for stylesheet */ + if (VARDATA(ssheet)[0] == '<') { - xmlFreeDoc(doctree); - xmlCleanupParser(); - elog_error(ERROR,"Error parsing stylesheet as XML document",0); - PG_RETURN_NULL(); + ssdoc = xmlParseMemory((char *) VARDATA(ssheet), + VARSIZE(ssheet) - VARHDRSZ); + if (ssdoc == NULL) + { + xmlFreeDoc(doctree); + xmlCleanupParser(); + elog_error(ERROR, "Error parsing stylesheet as XML document", 0); + PG_RETURN_NULL(); + } + + stylesheet = xsltParseStylesheetDoc(ssdoc); } + else + stylesheet = xsltParseStylesheetFile(GET_STR(ssheet)); - stylesheet = xsltParseStylesheetDoc(ssdoc); - } - else - { - stylesheet = xsltParseStylesheetFile(GET_STR(ssheet)); - } - - - if (stylesheet == NULL) - { - xmlFreeDoc(doctree); - xsltCleanupGlobals(); - xmlCleanupParser(); - elog_error(ERROR,"Failed to parse stylesheet",0); - PG_RETURN_NULL(); - } - - restree = xsltApplyStylesheet(stylesheet, doctree, params); - resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet); - - xsltFreeStylesheet(stylesheet); - xmlFreeDoc(restree); - xmlFreeDoc(doctree); - - xsltCleanupGlobals(); - xmlCleanupParser(); - - if (resstat < 0) { - PG_RETURN_NULL(); - } - - tres = palloc(reslen + VARHDRSZ); - memcpy(VARDATA(tres),resstr,reslen); - VARATT_SIZEP(tres) = reslen + VARHDRSZ; - - PG_RETURN_TEXT_P(tres); + + if (stylesheet == NULL) + { + xmlFreeDoc(doctree); + xsltCleanupGlobals(); + xmlCleanupParser(); + elog_error(ERROR, "Failed to parse stylesheet", 0); + PG_RETURN_NULL(); + } + + restree = xsltApplyStylesheet(stylesheet, doctree, params); + resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet); + + xsltFreeStylesheet(stylesheet); + xmlFreeDoc(restree); + xmlFreeDoc(doctree); + + xsltCleanupGlobals(); + xmlCleanupParser(); + + if (resstat < 0) + PG_RETURN_NULL(); + + tres = palloc(reslen + VARHDRSZ); + memcpy(VARDATA(tres), resstr, reslen); + VARATT_SIZEP(tres) = reslen + VARHDRSZ; + + PG_RETURN_TEXT_P(tres); } -void parse_params(const char **params, text *paramstr) +void +parse_params(const char **params, text *paramstr) { - char *pos; - char *pstr; - - int i; - char *nvsep="="; - char *itsep=","; - - pstr = GET_STR(paramstr); - - pos=pstr; - - for (i=0; i < MAXPARAMS; i++) - { - params[i] = pos; - pos = strstr(pos,nvsep); - if (pos != NULL) { - *pos = '\0'; - pos++; - } else { - params[i]=NULL; - break; - } - /* Value */ - i++; - params[i]=pos; - pos = strstr(pos,itsep); - if (pos != NULL) { - *pos = '\0'; - pos++; - } else { - break; - } - - } - if (i < MAXPARAMS) - { - params[i+1]=NULL; - } + char *pos; + char *pstr; + + int i; + char *nvsep = "="; + char *itsep = ","; + + pstr = GET_STR(paramstr); + + pos = pstr; + + for (i = 0; i < MAXPARAMS; i++) + { + params[i] = pos; + pos = strstr(pos, nvsep); + if (pos != NULL) + { + *pos = '\0'; + pos++; + } + else + { + params[i] = NULL; + break; + } + /* Value */ + i++; + params[i] = pos; + pos = strstr(pos, itsep); + if (pos != NULL) + { + *pos = '\0'; + pos++; + } + else + break; + + } + if (i < MAXPARAMS) + params[i + 1] = NULL; } diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c index 97aa50855f1..c4bbd5923fa 100644 --- a/src/backend/access/common/heaptuple.c +++ b/src/backend/access/common/heaptuple.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.93 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.94 2004/08/29 05:06:39 momjian Exp $ * * NOTES * The old interface functions have been converted to macros @@ -468,17 +468,19 @@ heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull) break; /* - * If the attribute number is 0, then we are supposed to return - * the entire tuple as a row-type Datum. (Using zero for this - * purpose is unclean since it risks confusion with "invalid attr" - * result codes, but it's not worth changing now.) + * If the attribute number is 0, then we are supposed to + * return the entire tuple as a row-type Datum. (Using zero + * for this purpose is unclean since it risks confusion with + * "invalid attr" result codes, but it's not worth changing + * now.) * - * We have to make a copy of the tuple so we can safely insert the - * Datum overhead fields, which are not set in on-disk tuples. + * We have to make a copy of the tuple so we can safely insert + * the Datum overhead fields, which are not set in on-disk + * tuples. */ case InvalidAttrNumber: { - HeapTupleHeader dtup; + HeapTupleHeader dtup; dtup = (HeapTupleHeader) palloc(tup->t_len); memcpy((char *) dtup, (char *) tup->t_data, tup->t_len); @@ -555,7 +557,7 @@ heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest) * construct a tuple from the given values[] and nulls[] arrays * * Null attributes are indicated by a 'n' in the appropriate byte - * of nulls[]. Non-null attributes are indicated by a ' ' (space). + * of nulls[]. Non-null attributes are indicated by a ' ' (space). * ---------------- */ HeapTuple @@ -580,7 +582,7 @@ heap_formtuple(TupleDesc tupleDescriptor, /* * Check for nulls and embedded tuples; expand any toasted attributes - * in embedded tuples. This preserves the invariant that toasting can + * in embedded tuples. This preserves the invariant that toasting can * only go one level deep. * * We can skip calling toast_flatten_tuple_attribute() if the attribute @@ -620,7 +622,7 @@ heap_formtuple(TupleDesc tupleDescriptor, len += ComputeDataSize(tupleDescriptor, values, nulls); /* - * Allocate and zero the space needed. Note that the tuple body and + * Allocate and zero the space needed. Note that the tuple body and * HeapTupleData management structure are allocated in one chunk. */ tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len); @@ -683,9 +685,9 @@ heap_modifytuple(HeapTuple tuple, * allocate and fill values and nulls arrays from either the tuple or * the repl information, as appropriate. * - * NOTE: it's debatable whether to use heap_deformtuple() here or - * just heap_getattr() only the non-replaced colums. The latter could - * win if there are many replaced columns and few non-replaced ones. + * NOTE: it's debatable whether to use heap_deformtuple() here or just + * heap_getattr() only the non-replaced colums. The latter could win + * if there are many replaced columns and few non-replaced ones. * However, heap_deformtuple costs only O(N) while the heap_getattr * way would cost O(N^2) if there are many non-replaced columns, so it * seems better to err on the side of linear cost. @@ -763,10 +765,11 @@ heap_deformtuple(HeapTuple tuple, bool slow = false; /* can we use/set attcacheoff? */ natts = tup->t_natts; + /* - * In inheritance situations, it is possible that the given tuple actually - * has more fields than the caller is expecting. Don't run off the end - * of the caller's arrays. + * In inheritance situations, it is possible that the given tuple + * actually has more fields than the caller is expecting. Don't run + * off the end of the caller's arrays. */ natts = Min(natts, tdesc_natts); @@ -787,9 +790,7 @@ heap_deformtuple(HeapTuple tuple, nulls[attnum] = ' '; if (!slow && att[attnum]->attcacheoff >= 0) - { off = att[attnum]->attcacheoff; - } else { off = att_align(off, att[attnum]->attalign); @@ -807,8 +808,8 @@ heap_deformtuple(HeapTuple tuple, } /* - * If tuple doesn't have all the atts indicated by tupleDesc, read - * the rest as null + * If tuple doesn't have all the atts indicated by tupleDesc, read the + * rest as null */ for (; attnum < tdesc_natts; attnum++) { diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c index d6191a2cfe4..d305734c3e7 100644 --- a/src/backend/access/common/indextuple.c +++ b/src/backend/access/common/indextuple.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.70 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.71 2004/08/29 05:06:39 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -162,9 +162,9 @@ index_formtuple(TupleDesc tupleDescriptor, if ((size & INDEX_SIZE_MASK) != size) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("index row requires %lu bytes, maximum size is %lu", - (unsigned long) size, - (unsigned long) INDEX_SIZE_MASK))); + errmsg("index row requires %lu bytes, maximum size is %lu", + (unsigned long) size, + (unsigned long) INDEX_SIZE_MASK))); infomask |= size; diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c index 98dc37a76ea..4477a65bb2d 100644 --- a/src/backend/access/common/printtup.c +++ b/src/backend/access/common/printtup.c @@ -9,7 +9,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.84 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.85 2004/08/29 05:06:39 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -356,7 +356,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) outputstr = DatumGetCString(FunctionCall3(&thisState->finfo, attr, - ObjectIdGetDatum(thisState->typioparam), + ObjectIdGetDatum(thisState->typioparam), Int32GetDatum(typeinfo->attrs[i]->atttypmod))); pq_sendcountedtext(&buf, outputstr, strlen(outputstr), false); pfree(outputstr); @@ -368,7 +368,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo, attr, - ObjectIdGetDatum(thisState->typioparam))); + ObjectIdGetDatum(thisState->typioparam))); /* We assume the result will not have been toasted */ pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4); pq_sendbytes(&buf, VARDATA(outputbytes), @@ -458,7 +458,7 @@ printtup_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) outputstr = DatumGetCString(FunctionCall3(&thisState->finfo, attr, - ObjectIdGetDatum(thisState->typioparam), + ObjectIdGetDatum(thisState->typioparam), Int32GetDatum(typeinfo->attrs[i]->atttypmod))); pq_sendcountedtext(&buf, outputstr, strlen(outputstr), true); pfree(outputstr); @@ -579,7 +579,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) value = DatumGetCString(OidFunctionCall3(typoutput, attr, - ObjectIdGetDatum(typioparam), + ObjectIdGetDatum(typioparam), Int32GetDatum(typeinfo->attrs[i]->atttypmod))); printatt((unsigned) i + 1, typeinfo->attrs[i], value); @@ -672,7 +672,7 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self) outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo, attr, - ObjectIdGetDatum(thisState->typioparam))); + ObjectIdGetDatum(thisState->typioparam))); /* We assume the result will not have been toasted */ pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4); pq_sendbytes(&buf, VARDATA(outputbytes), diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c index 66403a1b6b6..ed932d35ab6 100644 --- a/src/backend/access/common/tupdesc.c +++ b/src/backend/access/common/tupdesc.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.105 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.106 2004/08/29 05:06:39 momjian Exp $ * * NOTES * some of the executor utility code such as "ExecTypeFromTL" should be @@ -52,8 +52,8 @@ CreateTemplateTupleDesc(int natts, bool hasoid) /* * Allocate enough memory for the tuple descriptor, and zero the - * attrs[] array since TupleDescInitEntry assumes that the array - * is filled with NULL pointers. + * attrs[] array since TupleDescInitEntry assumes that the array is + * filled with NULL pointers. */ desc = (TupleDesc) palloc(sizeof(struct tupleDesc)); @@ -420,8 +420,8 @@ TupleDescInitEntry(TupleDesc desc, /* * Note: attributeName can be NULL, because the planner doesn't always - * fill in valid resname values in targetlists, particularly for resjunk - * attributes. + * fill in valid resname values in targetlists, particularly for + * resjunk attributes. */ if (attributeName != NULL) namestrcpy(&(att->attname), attributeName); @@ -464,7 +464,7 @@ TupleDescInitEntry(TupleDesc desc, * Given a relation schema (list of ColumnDef nodes), build a TupleDesc. * * Note: the default assumption is no OIDs; caller may modify the returned - * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in + * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in * later on. */ TupleDesc diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c index d3fde5fbc56..75e88002772 100644 --- a/src/backend/access/gist/gist.c +++ b/src/backend/access/gist/gist.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.110 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.111 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -667,7 +667,7 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate) Datum attr[INDEX_MAX_KEYS]; bool whatfree[INDEX_MAX_KEYS]; char isnull[INDEX_MAX_KEYS]; - GistEntryVector *evec; + GistEntryVector *evec; Datum datum; int datumsize, i, @@ -715,8 +715,8 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate) { evec->n = 2; gistentryinit(evec->vector[1], - evec->vector[0].key, r, NULL, - (OffsetNumber) 0, evec->vector[0].bytes, FALSE); + evec->vector[0].key, r, NULL, + (OffsetNumber) 0, evec->vector[0].bytes, FALSE); } else @@ -763,7 +763,7 @@ gistunion(Relation r, IndexTuple *itvec, int len, GISTSTATE *giststate) static IndexTuple gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate) { - GistEntryVector *evec; + GistEntryVector *evec; Datum datum; int datumsize; bool result, @@ -879,7 +879,7 @@ gistunionsubkey(Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITV int len, *attrsize; OffsetNumber *entries; - GistEntryVector *evec; + GistEntryVector *evec; Datum datum; int datumsize; int reallen; @@ -940,8 +940,8 @@ gistunionsubkey(Relation r, GISTSTATE *giststate, IndexTuple *itvec, GIST_SPLITV else { /* - * evec->vector[0].bytes may be not - * defined, so form union with itself + * evec->vector[0].bytes may be not defined, so form union + * with itself */ if (reallen == 1) { @@ -1056,7 +1056,7 @@ gistadjsubkey(Relation r, *ev1p; float lpenalty, rpenalty; - GistEntryVector *evec; + GistEntryVector *evec; int datumsize; bool isnull[INDEX_MAX_KEYS]; int i, @@ -1222,7 +1222,7 @@ gistSplit(Relation r, rbknum; GISTPageOpaque opaque; GIST_SPLITVEC v; - GistEntryVector *entryvec; + GistEntryVector *entryvec; bool *decompvec; int i, j, diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c index e3a267ba67a..3580f1f3e60 100644 --- a/src/backend/access/gist/gistget.c +++ b/src/backend/access/gist/gistget.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.41 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.42 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -250,9 +250,10 @@ gistindex_keytest(IndexTuple tuple, FALSE, isNull); /* - * Call the Consistent function to evaluate the test. The arguments - * are the index datum (as a GISTENTRY*), the comparison datum, and - * the comparison operator's strategy number and subtype from pg_amop. + * Call the Consistent function to evaluate the test. The + * arguments are the index datum (as a GISTENTRY*), the comparison + * datum, and the comparison operator's strategy number and + * subtype from pg_amop. * * (Presently there's no need to pass the subtype since it'll always * be zero, but might as well pass it for possible future use.) diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c index 02af1ef53ec..822b97e8e92 100644 --- a/src/backend/access/gist/gistscan.c +++ b/src/backend/access/gist/gistscan.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.54 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.55 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -115,9 +115,7 @@ gistrescan(PG_FUNCTION_ARGS) * the sk_subtype field. */ for (i = 0; i < s->numberOfKeys; i++) - { s->keyData[i].sk_func = p->giststate->consistentFn[s->keyData[i].sk_attno - 1]; - } } PG_RETURN_VOID(); @@ -266,9 +264,9 @@ ReleaseResources_gist(void) GISTScanList next; /* - * Note: this should be a no-op during normal query shutdown. - * However, in an abort situation ExecutorEnd is not called and so - * there may be open index scans to clean up. + * Note: this should be a no-op during normal query shutdown. However, + * in an abort situation ExecutorEnd is not called and so there may be + * open index scans to clean up. */ prev = NULL; diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c index db00490e582..40b05720fb2 100644 --- a/src/backend/access/hash/hash.c +++ b/src/backend/access/hash/hash.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.72 2004/08/29 04:12:17 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.73 2004/08/29 05:06:40 momjian Exp $ * * NOTES * This file contains only the public interface routines. @@ -210,8 +210,8 @@ hashgettuple(PG_FUNCTION_ARGS) bool res; /* - * We hold pin but not lock on current buffer while outside the hash AM. - * Reacquire the read lock here. + * We hold pin but not lock on current buffer while outside the hash + * AM. Reacquire the read lock here. */ if (BufferIsValid(so->hashso_curbuf)) _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ); @@ -470,7 +470,7 @@ hashbulkdelete(PG_FUNCTION_ARGS) /* * Read the metapage to fetch original bucket and tuple counts. Also, * we keep a copy of the last-seen metapage so that we can use its - * hashm_spares[] values to compute bucket page addresses. This is a + * hashm_spares[] values to compute bucket page addresses. This is a * bit hokey but perfectly safe, since the interesting entries in the * spares array cannot change under us; and it beats rereading the * metapage for each bucket. @@ -532,7 +532,7 @@ loop_top: ItemPointer htup; hitem = (HashItem) PageGetItem(page, - PageGetItemId(page, offno)); + PageGetItemId(page, offno)); htup = &(hitem->hash_itup.t_tid); if (callback(htup, callback_state)) { @@ -595,8 +595,8 @@ loop_top: orig_ntuples == metap->hashm_ntuples) { /* - * No one has split or inserted anything since start of scan, - * so believe our count as gospel. + * No one has split or inserted anything since start of scan, so + * believe our count as gospel. */ metap->hashm_ntuples = num_index_tuples; } @@ -604,7 +604,7 @@ loop_top: { /* * Otherwise, our count is untrustworthy since we may have - * double-scanned tuples in split buckets. Proceed by + * double-scanned tuples in split buckets. Proceed by * dead-reckoning. */ if (metap->hashm_ntuples > tuples_removed) diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c index b1c303f8d07..91ae559e3a6 100644 --- a/src/backend/access/hash/hashinsert.c +++ b/src/backend/access/hash/hashinsert.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.33 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.34 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -20,7 +20,7 @@ static OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf, - Size itemsize, HashItem hitem); + Size itemsize, HashItem hitem); /* @@ -81,7 +81,7 @@ _hash_doinsert(Relation rel, HashItem hitem) /* * Check whether the item can fit on a hash page at all. (Eventually, - * we ought to try to apply TOAST methods if not.) Note that at this + * we ought to try to apply TOAST methods if not.) Note that at this * point, itemsz doesn't include the ItemId. */ if (itemsz > HashMaxItemSize((Page) metap)) @@ -105,7 +105,8 @@ _hash_doinsert(Relation rel, HashItem hitem) _hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK); /* - * Acquire share lock on target bucket; then we can release split lock. + * Acquire share lock on target bucket; then we can release split + * lock. */ _hash_getlock(rel, blkno, HASH_SHARE); @@ -124,7 +125,7 @@ _hash_doinsert(Relation rel, HashItem hitem) /* * no space on this page; check for an overflow page */ - BlockNumber nextblkno = pageopaque->hasho_nextblkno; + BlockNumber nextblkno = pageopaque->hasho_nextblkno; if (BlockNumberIsValid(nextblkno)) { @@ -169,8 +170,8 @@ _hash_doinsert(Relation rel, HashItem hitem) _hash_droplock(rel, blkno, HASH_SHARE); /* - * Write-lock the metapage so we can increment the tuple count. - * After incrementing it, check to see if it's time for a split. + * Write-lock the metapage so we can increment the tuple count. After + * incrementing it, check to see if it's time for a split. */ _hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE); diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c index 740f119bc7a..c02da93dc1e 100644 --- a/src/backend/access/hash/hashovfl.c +++ b/src/backend/access/hash/hashovfl.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.43 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.44 2004/08/29 05:06:40 momjian Exp $ * * NOTES * Overflow pages look like ordinary relation pages. @@ -41,11 +41,11 @@ bitno_to_blkno(HashMetaPage metap, uint32 ovflbitnum) for (i = 1; i < splitnum && ovflbitnum > metap->hashm_spares[i]; i++) - /* loop */ ; + /* loop */ ; /* - * Convert to absolute page number by adding the number of bucket pages - * that exist before this split point. + * Convert to absolute page number by adding the number of bucket + * pages that exist before this split point. */ return (BlockNumber) ((1 << i) + ovflbitnum); } @@ -79,7 +79,7 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * * Add an overflow page to the bucket whose last page is pointed to by 'buf'. * - * On entry, the caller must hold a pin but no lock on 'buf'. The pin is + * On entry, the caller must hold a pin but no lock on 'buf'. The pin is * dropped before exiting (we assume the caller is not interested in 'buf' * anymore). The returned overflow page will be pinned and write-locked; * it is guaranteed to be empty. @@ -88,12 +88,12 @@ blkno_to_bitno(HashMetaPage metap, BlockNumber ovflblkno) * That buffer is returned in the same state. * * The caller must hold at least share lock on the bucket, to ensure that - * no one else tries to compact the bucket meanwhile. This guarantees that + * no one else tries to compact the bucket meanwhile. This guarantees that * 'buf' won't stop being part of the bucket while it's unlocked. * * NB: since this could be executed concurrently by multiple processes, * one should not assume that the returned overflow page will be the - * immediate successor of the originally passed 'buf'. Additional overflow + * immediate successor of the originally passed 'buf'. Additional overflow * pages might have been added to the bucket chain in between. */ Buffer @@ -197,7 +197,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf) /* outer loop iterates once per bitmap page */ for (;;) { - BlockNumber mapblkno; + BlockNumber mapblkno; Page mappage; uint32 last_inpage; @@ -274,9 +274,9 @@ _hash_getovflpage(Relation rel, Buffer metabuf) blkno = bitno_to_blkno(metap, bit); /* - * Adjust hashm_firstfree to avoid redundant searches. But don't - * risk changing it if someone moved it while we were searching - * bitmap pages. + * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * changing it if someone moved it while we were searching bitmap + * pages. */ if (metap->hashm_firstfree == orig_firstfree) metap->hashm_firstfree = bit + 1; @@ -304,9 +304,9 @@ found: blkno = bitno_to_blkno(metap, bit); /* - * Adjust hashm_firstfree to avoid redundant searches. But don't - * risk changing it if someone moved it while we were searching - * bitmap pages. + * Adjust hashm_firstfree to avoid redundant searches. But don't risk + * changing it if someone moved it while we were searching bitmap + * pages. */ if (metap->hashm_firstfree == orig_firstfree) { @@ -381,7 +381,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf) Bucket bucket; /* Get information from the doomed page */ - ovflblkno = BufferGetBlockNumber(ovflbuf); + ovflblkno = BufferGetBlockNumber(ovflbuf); ovflpage = BufferGetPage(ovflbuf); _hash_checkpage(rel, ovflpage, LH_OVERFLOW_PAGE); ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); @@ -396,7 +396,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf) /* * Fix up the bucket chain. this is a doubly-linked list, so we must * fix up the bucket chain members behind and ahead of the overflow - * page being deleted. No concurrency issues since we hold exclusive + * page being deleted. No concurrency issues since we hold exclusive * lock on the entire bucket. */ if (BlockNumberIsValid(prevblkno)) @@ -488,7 +488,8 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno) /* * It is okay to write-lock the new bitmap page while holding metapage - * write lock, because no one else could be contending for the new page. + * write lock, because no one else could be contending for the new + * page. * * There is some loss of concurrency in possibly doing I/O for the new * page while holding the metapage lock, but this path is taken so @@ -654,8 +655,8 @@ _hash_squeezebucket(Relation rel, /* * delete the tuple from the "read" page. PageIndexTupleDelete - * repacks the ItemId array, so 'roffnum' will be "advanced" to - * the "next" ItemId. + * repacks the ItemId array, so 'roffnum' will be "advanced" + * to the "next" ItemId. */ PageIndexTupleDelete(rpage, roffnum); } @@ -667,8 +668,9 @@ _hash_squeezebucket(Relation rel, * Tricky point here: if our read and write pages are adjacent in the * bucket chain, our write lock on wbuf will conflict with * _hash_freeovflpage's attempt to update the sibling links of the - * removed page. However, in that case we are done anyway, so we can - * simply drop the write lock before calling _hash_freeovflpage. + * removed page. However, in that case we are done anyway, so we + * can simply drop the write lock before calling + * _hash_freeovflpage. */ if (PageIsEmpty(rpage)) { diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 787bb9bf621..d3088f50cef 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.45 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.46 2004/08/29 05:06:40 momjian Exp $ * * NOTES * Postgres hash pages look like ordinary relation pages. The opaque @@ -35,11 +35,11 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, - Bucket obucket, Bucket nbucket, - BlockNumber start_oblkno, - BlockNumber start_nblkno, - uint32 maxbucket, - uint32 highmask, uint32 lowmask); + Bucket obucket, Bucket nbucket, + BlockNumber start_oblkno, + BlockNumber start_nblkno, + uint32 maxbucket, + uint32 highmask, uint32 lowmask); /* @@ -47,7 +47,7 @@ static void _hash_splitbucket(Relation rel, Buffer metabuf, * of the locking rules). However, we can skip taking lmgr locks when the * index is local to the current backend (ie, either temp or new in the * current transaction). No one else can see it, so there's no reason to - * take locks. We still take buffer-level locks, but not lmgr locks. + * take locks. We still take buffer-level locks, but not lmgr locks. */ #define USELOCKING(rel) (!RELATION_IS_LOCAL(rel)) @@ -239,13 +239,13 @@ _hash_metapinit(Relation rel) RelationGetRelationName(rel)); /* - * Determine the target fill factor (tuples per bucket) for this index. - * The idea is to make the fill factor correspond to pages about 3/4ths - * full. We can compute it exactly if the index datatype is fixed-width, - * but for var-width there's some guessing involved. + * Determine the target fill factor (tuples per bucket) for this + * index. The idea is to make the fill factor correspond to pages + * about 3/4ths full. We can compute it exactly if the index datatype + * is fixed-width, but for var-width there's some guessing involved. */ data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid, - RelationGetDescr(rel)->attrs[0]->atttypmod); + RelationGetDescr(rel)->attrs[0]->atttypmod); item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) + sizeof(ItemIdData); /* include the line pointer */ ffactor = (BLCKSZ * 3 / 4) / item_width; @@ -288,8 +288,9 @@ _hash_metapinit(Relation rel) metap->hashm_procid = index_getprocid(rel, 1, HASHPROC); /* - * We initialize the index with two buckets, 0 and 1, occupying physical - * blocks 1 and 2. The first freespace bitmap page is in block 3. + * We initialize the index with two buckets, 0 and 1, occupying + * physical blocks 1 and 2. The first freespace bitmap page is in + * block 3. */ metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */ metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */ @@ -297,7 +298,7 @@ _hash_metapinit(Relation rel) MemSet((char *) metap->hashm_spares, 0, sizeof(metap->hashm_spares)); MemSet((char *) metap->hashm_mapp, 0, sizeof(metap->hashm_mapp)); - metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */ + metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */ metap->hashm_ovflpoint = 1; metap->hashm_firstfree = 0; @@ -319,8 +320,8 @@ _hash_metapinit(Relation rel) } /* - * Initialize first bitmap page. Can't do this until we - * create the first two buckets, else smgr will complain. + * Initialize first bitmap page. Can't do this until we create the + * first two buckets, else smgr will complain. */ _hash_initbitmap(rel, metap, 3); @@ -362,17 +363,18 @@ _hash_expandtable(Relation rel, Buffer metabuf) uint32 lowmask; /* - * Obtain the page-zero lock to assert the right to begin a split - * (see README). + * Obtain the page-zero lock to assert the right to begin a split (see + * README). * * Note: deadlock should be impossible here. Our own backend could only - * be holding bucket sharelocks due to stopped indexscans; those will not - * block other holders of the page-zero lock, who are only interested in - * acquiring bucket sharelocks themselves. Exclusive bucket locks are - * only taken here and in hashbulkdelete, and neither of these operations - * needs any additional locks to complete. (If, due to some flaw in this - * reasoning, we manage to deadlock anyway, it's okay to error out; the - * index will be left in a consistent state.) + * be holding bucket sharelocks due to stopped indexscans; those will + * not block other holders of the page-zero lock, who are only + * interested in acquiring bucket sharelocks themselves. Exclusive + * bucket locks are only taken here and in hashbulkdelete, and neither + * of these operations needs any additional locks to complete. (If, + * due to some flaw in this reasoning, we manage to deadlock anyway, + * it's okay to error out; the index will be left in a consistent + * state.) */ _hash_getlock(rel, 0, HASH_EXCLUSIVE); @@ -383,8 +385,8 @@ _hash_expandtable(Relation rel, Buffer metabuf) _hash_checkpage(rel, (Page) metap, LH_META_PAGE); /* - * Check to see if split is still needed; someone else might have already - * done one while we waited for the lock. + * Check to see if split is still needed; someone else might have + * already done one while we waited for the lock. * * Make sure this stays in sync with_hash_doinsert() */ @@ -394,16 +396,16 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * Determine which bucket is to be split, and attempt to lock the old - * bucket. If we can't get the lock, give up. + * bucket. If we can't get the lock, give up. * * The lock protects us against other backends, but not against our own * backend. Must check for active scans separately. * - * Ideally we would lock the new bucket too before proceeding, but if - * we are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping + * Ideally we would lock the new bucket too before proceeding, but if we + * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping * isn't correct yet. For simplicity we update the metapage first and - * then lock. This should be okay because no one else should be trying - * to lock the new bucket yet... + * then lock. This should be okay because no one else should be + * trying to lock the new bucket yet... */ new_bucket = metap->hashm_maxbucket + 1; old_bucket = (new_bucket & metap->hashm_lowmask); @@ -417,7 +419,8 @@ _hash_expandtable(Relation rel, Buffer metabuf) goto fail; /* - * Okay to proceed with split. Update the metapage bucket mapping info. + * Okay to proceed with split. Update the metapage bucket mapping + * info. */ metap->hashm_maxbucket = new_bucket; @@ -431,11 +434,11 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * If the split point is increasing (hashm_maxbucket's log base 2 * increases), we need to adjust the hashm_spares[] array and - * hashm_ovflpoint so that future overflow pages will be created beyond - * this new batch of bucket pages. + * hashm_ovflpoint so that future overflow pages will be created + * beyond this new batch of bucket pages. * - * XXX should initialize new bucket pages to prevent out-of-order - * page creation? Don't wanna do it right here though. + * XXX should initialize new bucket pages to prevent out-of-order page + * creation? Don't wanna do it right here though. */ spare_ndx = _hash_log2(metap->hashm_maxbucket + 1); if (spare_ndx > metap->hashm_ovflpoint) @@ -456,9 +459,10 @@ _hash_expandtable(Relation rel, Buffer metabuf) /* * Copy bucket mapping info now; this saves re-accessing the meta page * inside _hash_splitbucket's inner loop. Note that once we drop the - * split lock, other splits could begin, so these values might be out of - * date before _hash_splitbucket finishes. That's okay, since all it - * needs is to tell which of these two buckets to map hashkeys into. + * split lock, other splits could begin, so these values might be out + * of date before _hash_splitbucket finishes. That's okay, since all + * it needs is to tell which of these two buckets to map hashkeys + * into. */ maxbucket = metap->hashm_maxbucket; highmask = metap->hashm_highmask; @@ -539,8 +543,8 @@ _hash_splitbucket(Relation rel, /* * It should be okay to simultaneously write-lock pages from each - * bucket, since no one else can be trying to acquire buffer lock - * on pages of either bucket. + * bucket, since no one else can be trying to acquire buffer lock on + * pages of either bucket. */ oblkno = start_oblkno; nblkno = start_nblkno; @@ -562,9 +566,9 @@ _hash_splitbucket(Relation rel, nopaque->hasho_filler = HASHO_FILL; /* - * Partition the tuples in the old bucket between the old bucket and the - * new bucket, advancing along the old bucket's overflow bucket chain - * and adding overflow pages to the new bucket as needed. + * Partition the tuples in the old bucket between the old bucket and + * the new bucket, advancing along the old bucket's overflow bucket + * chain and adding overflow pages to the new bucket as needed. */ ooffnum = FirstOffsetNumber; omaxoffnum = PageGetMaxOffsetNumber(opage); @@ -582,9 +586,10 @@ _hash_splitbucket(Relation rel, oblkno = oopaque->hasho_nextblkno; if (!BlockNumberIsValid(oblkno)) break; + /* - * we ran out of tuples on this particular page, but we - * have more overflow pages; advance to next page. + * we ran out of tuples on this particular page, but we have + * more overflow pages; advance to next page. */ _hash_wrtbuf(rel, obuf); @@ -600,8 +605,8 @@ _hash_splitbucket(Relation rel, /* * Re-hash the tuple to determine which bucket it now belongs in. * - * It is annoying to call the hash function while holding locks, - * but releasing and relocking the page for each tuple is unappealing + * It is annoying to call the hash function while holding locks, but + * releasing and relocking the page for each tuple is unappealing * too. */ hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum)); @@ -666,10 +671,11 @@ _hash_splitbucket(Relation rel, } /* - * We're at the end of the old bucket chain, so we're done partitioning - * the tuples. Before quitting, call _hash_squeezebucket to ensure the - * tuples remaining in the old bucket (including the overflow pages) are - * packed as tightly as possible. The new bucket is already tight. + * We're at the end of the old bucket chain, so we're done + * partitioning the tuples. Before quitting, call _hash_squeezebucket + * to ensure the tuples remaining in the old bucket (including the + * overflow pages) are packed as tightly as possible. The new bucket + * is already tight. */ _hash_wrtbuf(rel, obuf); _hash_wrtbuf(rel, nbuf); diff --git a/src/backend/access/hash/hashscan.c b/src/backend/access/hash/hashscan.c index 2fc24dd9e12..16d2a77d49b 100644 --- a/src/backend/access/hash/hashscan.c +++ b/src/backend/access/hash/hashscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.36 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.37 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -44,9 +44,9 @@ ReleaseResources_hash(void) HashScanList next; /* - * Note: this should be a no-op during normal query shutdown. - * However, in an abort situation ExecutorEnd is not called and so - * there may be open index scans to clean up. + * Note: this should be a no-op during normal query shutdown. However, + * in an abort situation ExecutorEnd is not called and so there may be + * open index scans to clean up. */ prev = NULL; diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c index 76ad5d31849..daaff4adc50 100644 --- a/src/backend/access/hash/hashsearch.c +++ b/src/backend/access/hash/hashsearch.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.36 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.37 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -137,12 +137,13 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) * We do not support hash scans with no index qualification, because * we would have to read the whole index rather than just one bucket. * That creates a whole raft of problems, since we haven't got a - * practical way to lock all the buckets against splits or compactions. + * practical way to lock all the buckets against splits or + * compactions. */ if (scan->numberOfKeys < 1) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("hash indexes do not support whole-index scans"))); + errmsg("hash indexes do not support whole-index scans"))); /* * If the constant in the index qual is NULL, assume it cannot match @@ -182,7 +183,8 @@ _hash_first(IndexScanDesc scan, ScanDirection dir) _hash_relbuf(rel, metabuf); /* - * Acquire share lock on target bucket; then we can release split lock. + * Acquire share lock on target bucket; then we can release split + * lock. */ _hash_getlock(rel, blkno, HASH_SHARE); @@ -287,9 +289,8 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) while (offnum > maxoff) { /* - * either this page is empty - * (maxoff == InvalidOffsetNumber) - * or we ran off the end. + * either this page is empty (maxoff == + * InvalidOffsetNumber) or we ran off the end. */ _hash_readnext(rel, &buf, &page, &opaque); if (BufferIsValid(buf)) @@ -315,15 +316,12 @@ _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) while (offnum < FirstOffsetNumber) { /* - * either this page is empty - * (offnum == InvalidOffsetNumber) - * or we ran off the end. + * either this page is empty (offnum == + * InvalidOffsetNumber) or we ran off the end. */ _hash_readprev(rel, &buf, &page, &opaque); if (BufferIsValid(buf)) - { maxoff = offnum = PageGetMaxOffsetNumber(page); - } else { /* end of bucket */ diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c index 3fb04e77d86..bf9999dc92b 100644 --- a/src/backend/access/hash/hashutil.c +++ b/src/backend/access/hash/hashutil.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.39 2004/08/29 04:12:18 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.40 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -113,6 +113,7 @@ void _hash_checkpage(Relation rel, Page page, int flags) { Assert(page); + /* * When checking the metapage, always verify magic number and version. */ diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 4f965eb2bf5..6dd0c357fbb 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.172 2004/08/29 04:12:20 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.173 2004/08/29 05:06:40 momjian Exp $ * * * INTERFACE ROUTINES @@ -75,9 +75,9 @@ initscan(HeapScanDesc scan, ScanKey key) /* * Determine the number of blocks we have to scan. * - * It is sufficient to do this once at scan start, since any tuples - * added while the scan is in progress will be invisible to my - * transaction anyway... + * It is sufficient to do this once at scan start, since any tuples added + * while the scan is in progress will be invisible to my transaction + * anyway... */ scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd); @@ -1141,12 +1141,13 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid) tup->t_data->t_infomask |= HEAP_XMAX_INVALID; HeapTupleHeaderSetXmin(tup->t_data, GetCurrentTransactionId()); HeapTupleHeaderSetCmin(tup->t_data, cid); - HeapTupleHeaderSetCmax(tup->t_data, 0); /* zero out Datum fields */ + HeapTupleHeaderSetCmax(tup->t_data, 0); /* zero out Datum fields */ tup->t_tableOid = relation->rd_id; /* * If the new tuple is too big for storage or contains already toasted - * out-of-line attributes from some other relation, invoke the toaster. + * out-of-line attributes from some other relation, invoke the + * toaster. */ if (HeapTupleHasExternal(tup) || (MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD)) @@ -1273,7 +1274,7 @@ simple_heap_insert(Relation relation, HeapTuple tup) */ int heap_delete(Relation relation, ItemPointer tid, - ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait) + ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait) { ItemId lp; HeapTupleData tp; @@ -1404,9 +1405,9 @@ l1: /* * If the tuple has toasted out-of-line attributes, we need to delete - * those items too. We have to do this before WriteBuffer because we need - * to look at the contents of the tuple, but it's OK to release the - * context lock on the buffer first. + * those items too. We have to do this before WriteBuffer because we + * need to look at the contents of the tuple, but it's OK to release + * the context lock on the buffer first. */ if (HeapTupleHasExternal(&tp)) heap_tuple_toast_attrs(relation, NULL, &tp); @@ -1443,7 +1444,7 @@ simple_heap_delete(Relation relation, ItemPointer tid) result = heap_delete(relation, tid, &ctid, GetCurrentCommandId(), SnapshotAny, - true /* wait for commit */); + true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: @@ -1490,7 +1491,7 @@ simple_heap_delete(Relation relation, ItemPointer tid) */ int heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, - ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait) + ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait) { ItemId lp; HeapTupleData oldtup; @@ -1804,7 +1805,7 @@ simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup) result = heap_update(relation, otid, tup, &ctid, GetCurrentCommandId(), SnapshotAny, - true /* wait for commit */); + true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: @@ -2198,8 +2199,8 @@ heap_xlog_newpage(bool redo, XLogRecPtr lsn, XLogRecord *record) Page page; /* - * Note: the NEWPAGE log record is used for both heaps and indexes, - * so do not do anything that assumes we are touching a heap. + * Note: the NEWPAGE log record is used for both heaps and indexes, so + * do not do anything that assumes we are touching a heap. */ if (!redo || (record->xl_info & XLR_BKP_BLOCK_1)) @@ -2668,7 +2669,7 @@ static void out_target(char *buf, xl_heaptid *target) { sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u", - target->node.spcNode, target->node.dbNode, target->node.relNode, + target->node.spcNode, target->node.dbNode, target->node.relNode, ItemPointerGetBlockNumber(&(target->tid)), ItemPointerGetOffsetNumber(&(target->tid))); } diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c index 1a3b4ef896a..fe389991415 100644 --- a/src/backend/access/heap/tuptoaster.c +++ b/src/backend/access/heap/tuptoaster.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.44 2004/08/29 04:12:20 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.45 2004/08/29 05:06:40 momjian Exp $ * * * INTERFACE ROUTINES @@ -288,13 +288,13 @@ toast_delete(Relation rel, HeapTuple oldtup) /* * Get the tuple descriptor and break down the tuple into fields. * - * NOTE: it's debatable whether to use heap_deformtuple() here or - * just heap_getattr() only the varlena columns. The latter could - * win if there are few varlena columns and many non-varlena ones. - * However, heap_deformtuple costs only O(N) while the heap_getattr - * way would cost O(N^2) if there are many varlena columns, so it - * seems better to err on the side of linear cost. (We won't even - * be here unless there's at least one varlena column, by the way.) + * NOTE: it's debatable whether to use heap_deformtuple() here or just + * heap_getattr() only the varlena columns. The latter could win if + * there are few varlena columns and many non-varlena ones. However, + * heap_deformtuple costs only O(N) while the heap_getattr way would + * cost O(N^2) if there are many varlena columns, so it seems better + * to err on the side of linear cost. (We won't even be here unless + * there's at least one varlena column, by the way.) */ tupleDesc = rel->rd_att; att = tupleDesc->attrs; @@ -311,7 +311,7 @@ toast_delete(Relation rel, HeapTuple oldtup) { if (att[i]->attlen == -1) { - Datum value = toast_values[i]; + Datum value = toast_values[i]; if (toast_nulls[i] != 'n' && VARATT_IS_EXTERNAL(value)) toast_delete_datum(rel, value); @@ -791,7 +791,7 @@ toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup) * * If a Datum is of composite type, "flatten" it to contain no toasted fields. * This must be invoked on any potentially-composite field that is to be - * inserted into a tuple. Doing this preserves the invariant that toasting + * inserted into a tuple. Doing this preserves the invariant that toasting * goes only one level deep in a tuple. * ---------- */ @@ -1105,7 +1105,7 @@ toast_delete_datum(Relation rel, Datum value) ScanKeyInit(&toastkey, (AttrNumber) 1, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); + ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); /* * Find the chunks by index @@ -1176,7 +1176,7 @@ toast_fetch_datum(varattrib *attr) ScanKeyInit(&toastkey, (AttrNumber) 1, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); + ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); /* * Read the chunks by index @@ -1330,7 +1330,7 @@ toast_fetch_datum_slice(varattrib *attr, int32 sliceoffset, int32 length) ScanKeyInit(&toastkey[0], (AttrNumber) 1, BTEqualStrategyNumber, F_OIDEQ, - ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); + ObjectIdGetDatum(attr->va_content.va_external.va_valueid)); /* * Use equality condition for one chunk, a range condition otherwise: diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 71266a5bfb0..815e207fb2f 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.115 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.116 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -200,26 +200,26 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel, * We can skip items that are marked killed. * * Formerly, we applied _bt_isequal() before checking the kill - * flag, so as to fall out of the item loop as soon as possible. - * However, in the presence of heavy update activity an index - * may contain many killed items with the same key; running - * _bt_isequal() on each killed item gets expensive. Furthermore - * it is likely that the non-killed version of each key appears - * first, so that we didn't actually get to exit any sooner anyway. - * So now we just advance over killed items as quickly as we can. - * We only apply _bt_isequal() when we get to a non-killed item or - * the end of the page. + * flag, so as to fall out of the item loop as soon as + * possible. However, in the presence of heavy update activity + * an index may contain many killed items with the same key; + * running _bt_isequal() on each killed item gets expensive. + * Furthermore it is likely that the non-killed version of + * each key appears first, so that we didn't actually get to + * exit any sooner anyway. So now we just advance over killed + * items as quickly as we can. We only apply _bt_isequal() + * when we get to a non-killed item or the end of the page. */ if (!ItemIdDeleted(curitemid)) { /* - * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's - * how we handling NULLs - and so we must not use _bt_compare - * in real comparison, but only for ordering/finding items on - * pages. - vadim 03/24/97 + * _bt_compare returns 0 for (1,NULL) and (1,NULL) - + * this's how we handling NULLs - and so we must not use + * _bt_compare in real comparison, but only for + * ordering/finding items on pages. - vadim 03/24/97 */ if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey)) - break; /* we're past all the equal tuples */ + break; /* we're past all the equal tuples */ /* okay, we gotta fetch the heap tuple ... */ cbti = (BTItem) PageGetItem(page, curitemid); diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c index 7175fd5b2c3..fcdb45d952f 100644 --- a/src/backend/access/nbtree/nbtpage.c +++ b/src/backend/access/nbtree/nbtpage.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.79 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.80 2004/08/29 05:06:40 momjian Exp $ * * NOTES * Postgres btree pages look like ordinary relation pages. The opaque @@ -276,8 +276,8 @@ _bt_getroot(Relation rel, int access) rootlevel = metad->btm_fastlevel; /* - * We are done with the metapage; arrange to release it via - * first _bt_relandgetbuf call + * We are done with the metapage; arrange to release it via first + * _bt_relandgetbuf call */ rootbuf = metabuf; @@ -368,8 +368,8 @@ _bt_gettrueroot(Relation rel) rootlevel = metad->btm_level; /* - * We are done with the metapage; arrange to release it via - * first _bt_relandgetbuf call + * We are done with the metapage; arrange to release it via first + * _bt_relandgetbuf call */ rootbuf = metabuf; @@ -433,21 +433,22 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access) * page could have been re-used between the time the last VACUUM * scanned it and the time the VACUUM made its FSM updates.) * - * In fact, it's worse than that: we can't even assume that it's - * safe to take a lock on the reported page. If somebody else - * has a lock on it, or even worse our own caller does, we could + * In fact, it's worse than that: we can't even assume that it's safe + * to take a lock on the reported page. If somebody else has a + * lock on it, or even worse our own caller does, we could * deadlock. (The own-caller scenario is actually not improbable. * Consider an index on a serial or timestamp column. Nearly all * splits will be at the rightmost page, so it's entirely likely - * that _bt_split will call us while holding a lock on the page most - * recently acquired from FSM. A VACUUM running concurrently with - * the previous split could well have placed that page back in FSM.) + * that _bt_split will call us while holding a lock on the page + * most recently acquired from FSM. A VACUUM running concurrently + * with the previous split could well have placed that page back + * in FSM.) * * To get around that, we ask for only a conditional lock on the - * reported page. If we fail, then someone else is using the page, - * and we may reasonably assume it's not free. (If we happen to be - * wrong, the worst consequence is the page will be lost to use till - * the next VACUUM, which is no big problem.) + * reported page. If we fail, then someone else is using the + * page, and we may reasonably assume it's not free. (If we + * happen to be wrong, the worst consequence is the page will be + * lost to use till the next VACUUM, which is no big problem.) */ for (;;) { diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c index 15dc433d112..f75fde4c9f4 100644 --- a/src/backend/access/nbtree/nbtsearch.c +++ b/src/backend/access/nbtree/nbtsearch.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.88 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.89 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -155,15 +155,16 @@ _bt_moveright(Relation rel, opaque = (BTPageOpaque) PageGetSpecialPointer(page); /* - * When nextkey = false (normal case): if the scan key that brought us to - * this page is > the high key stored on the page, then the page has split - * and we need to move right. (If the scan key is equal to the high key, - * we might or might not need to move right; have to scan the page first - * anyway.) + * When nextkey = false (normal case): if the scan key that brought us + * to this page is > the high key stored on the page, then the page + * has split and we need to move right. (If the scan key is equal to + * the high key, we might or might not need to move right; have to + * scan the page first anyway.) * * When nextkey = true: move right if the scan key is >= page's high key. * - * The page could even have split more than once, so scan as far as needed. + * The page could even have split more than once, so scan as far as + * needed. * * We also have to move right if we followed a link that brought us to a * dead page. @@ -253,13 +254,11 @@ _bt_binsrch(Relation rel, * Binary search to find the first key on the page >= scan key, or * first key > scankey when nextkey is true. * - * For nextkey=false (cmpval=1), the loop invariant is: all slots - * before 'low' are < scan key, all slots at or after 'high' - * are >= scan key. + * For nextkey=false (cmpval=1), the loop invariant is: all slots before + * 'low' are < scan key, all slots at or after 'high' are >= scan key. * - * For nextkey=true (cmpval=0), the loop invariant is: all slots - * before 'low' are <= scan key, all slots at or after 'high' - * are > scan key. + * For nextkey=true (cmpval=0), the loop invariant is: all slots before + * 'low' are <= scan key, all slots at or after 'high' are > scan key. * * We can fall out when high == low. */ @@ -285,15 +284,15 @@ _bt_binsrch(Relation rel, * At this point we have high == low, but be careful: they could point * past the last slot on the page. * - * On a leaf page, we always return the first key >= scan key (resp. - * > scan key), which could be the last slot + 1. + * On a leaf page, we always return the first key >= scan key (resp. > + * scan key), which could be the last slot + 1. */ if (P_ISLEAF(opaque)) return low; /* - * On a non-leaf page, return the last key < scan key (resp. <= scan key). - * There must be one if _bt_compare() is playing by the rules. + * On a non-leaf page, return the last key < scan key (resp. <= scan + * key). There must be one if _bt_compare() is playing by the rules. */ Assert(low > P_FIRSTDATAKEY(opaque)); @@ -382,10 +381,10 @@ _bt_compare(Relation rel, { /* * The sk_func needs to be passed the index value as left arg - * and the sk_argument as right arg (they might be of different - * types). Since it is convenient for callers to think of - * _bt_compare as comparing the scankey to the index item, - * we have to flip the sign of the comparison result. + * and the sk_argument as right arg (they might be of + * different types). Since it is convenient for callers to + * think of _bt_compare as comparing the scankey to the index + * item, we have to flip the sign of the comparison result. * * Note: curious-looking coding is to avoid overflow if * comparison function returns INT_MIN. There is no risk of @@ -497,7 +496,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) bool goback; bool continuescan; ScanKey scankeys; - ScanKey *startKeys = NULL; + ScanKey *startKeys = NULL; int keysCount = 0; int i; StrategyNumber strat_total; @@ -521,7 +520,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) * We want to identify the keys that can be used as starting boundaries; * these are =, >, or >= keys for a forward scan or =, <, <= keys for * a backwards scan. We can use keys for multiple attributes so long as - * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept + * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept * a > or < boundary or find an attribute with no boundary (which can be * thought of as the same as "> -infinity"), we can't use keys for any * attributes to its right, because it would break our simplistic notion @@ -554,13 +553,15 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) ScanKey cur; startKeys = (ScanKey *) palloc(so->numberOfKeys * sizeof(ScanKey)); + /* - * chosen is the so-far-chosen key for the current attribute, if any. - * We don't cast the decision in stone until we reach keys for the - * next attribute. + * chosen is the so-far-chosen key for the current attribute, if + * any. We don't cast the decision in stone until we reach keys + * for the next attribute. */ curattr = 1; chosen = NULL; + /* * Loop iterates from 0 to numberOfKeys inclusive; we use the last * pass to handle after-last-key processing. Actual exit from the @@ -578,8 +579,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) if (chosen == NULL) break; startKeys[keysCount++] = chosen; + /* - * Adjust strat_total, and quit if we have stored a > or < key. + * Adjust strat_total, and quit if we have stored a > or < + * key. */ strat = chosen->sk_strategy; if (strat != BTEqualStrategyNumber) @@ -589,11 +592,13 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) strat == BTLessStrategyNumber) break; } + /* * Done if that was the last attribute. */ if (i >= so->numberOfKeys) break; + /* * Reset for next attr, which should be in sequence. */ @@ -646,8 +651,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) ScanKey cur = startKeys[i]; /* - * _bt_preprocess_keys disallows it, but it's place to add some code - * later + * _bt_preprocess_keys disallows it, but it's place to add some + * code later */ if (cur->sk_flags & SK_ISNULL) { @@ -656,10 +661,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) elog(ERROR, "btree doesn't support is(not)null, yet"); return false; } + /* * If scankey operator is of default subtype, we can use the - * cached comparison procedure; otherwise gotta look it up in - * the catalogs. + * cached comparison procedure; otherwise gotta look it up in the + * catalogs. */ if (cur->sk_subtype == InvalidOid) { @@ -695,43 +701,46 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) /* * Examine the selected initial-positioning strategy to determine - * exactly where we need to start the scan, and set flag variables - * to control the code below. + * exactly where we need to start the scan, and set flag variables to + * control the code below. * - * If nextkey = false, _bt_search and _bt_binsrch will locate the - * first item >= scan key. If nextkey = true, they will locate the - * first item > scan key. + * If nextkey = false, _bt_search and _bt_binsrch will locate the first + * item >= scan key. If nextkey = true, they will locate the first + * item > scan key. * - * If goback = true, we will then step back one item, while if - * goback = false, we will start the scan on the located item. + * If goback = true, we will then step back one item, while if goback = + * false, we will start the scan on the located item. * * it's yet other place to add some code later for is(not)null ... */ switch (strat_total) { case BTLessStrategyNumber: + /* - * Find first item >= scankey, then back up one to arrive at last - * item < scankey. (Note: this positioning strategy is only used - * for a backward scan, so that is always the correct starting - * position.) + * Find first item >= scankey, then back up one to arrive at + * last item < scankey. (Note: this positioning strategy is + * only used for a backward scan, so that is always the + * correct starting position.) */ nextkey = false; goback = true; break; case BTLessEqualStrategyNumber: + /* - * Find first item > scankey, then back up one to arrive at last - * item <= scankey. (Note: this positioning strategy is only used - * for a backward scan, so that is always the correct starting - * position.) + * Find first item > scankey, then back up one to arrive at + * last item <= scankey. (Note: this positioning strategy is + * only used for a backward scan, so that is always the + * correct starting position.) */ nextkey = true; goback = true; break; case BTEqualStrategyNumber: + /* * If a backward scan was specified, need to start with last * equal item not first one. @@ -739,8 +748,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) if (ScanDirectionIsBackward(dir)) { /* - * This is the same as the <= strategy. We will check - * at the end whether the found item is actually =. + * This is the same as the <= strategy. We will check at + * the end whether the found item is actually =. */ nextkey = true; goback = true; @@ -748,8 +757,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) else { /* - * This is the same as the >= strategy. We will check - * at the end whether the found item is actually =. + * This is the same as the >= strategy. We will check at + * the end whether the found item is actually =. */ nextkey = false; goback = false; @@ -757,18 +766,20 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) break; case BTGreaterEqualStrategyNumber: + /* - * Find first item >= scankey. (This is only used for - * forward scans.) + * Find first item >= scankey. (This is only used for forward + * scans.) */ nextkey = false; goback = false; break; case BTGreaterStrategyNumber: + /* - * Find first item > scankey. (This is only used for - * forward scans.) + * Find first item > scankey. (This is only used for forward + * scans.) */ nextkey = true; goback = false; @@ -814,23 +825,23 @@ _bt_first(IndexScanDesc scan, ScanDirection dir) pfree(scankeys); /* - * If nextkey = false, we are positioned at the first item >= scan key, - * or possibly at the end of a page on which all the existing items are - * less than the scan key and we know that everything on later pages - * is greater than or equal to scan key. + * If nextkey = false, we are positioned at the first item >= scan + * key, or possibly at the end of a page on which all the existing + * items are less than the scan key and we know that everything on + * later pages is greater than or equal to scan key. * - * If nextkey = true, we are positioned at the first item > scan key, - * or possibly at the end of a page on which all the existing items are + * If nextkey = true, we are positioned at the first item > scan key, or + * possibly at the end of a page on which all the existing items are * less than or equal to the scan key and we know that everything on * later pages is greater than scan key. * * The actually desired starting point is either this item or the prior - * one, or in the end-of-page case it's the first item on the next page - * or the last item on this page. We apply _bt_step if needed to get to - * the right place. + * one, or in the end-of-page case it's the first item on the next + * page or the last item on this page. We apply _bt_step if needed to + * get to the right place. * - * If _bt_step fails (meaning we fell off the end of the index in - * one direction or the other), then there are no matches so we just + * If _bt_step fails (meaning we fell off the end of the index in one + * direction or the other), then there are no matches so we just * return false. */ if (goback) @@ -1292,7 +1303,8 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir) itup = &(btitem->bti_itup); /* - * Okay, we are on the first or last tuple. Does it pass all the quals? + * Okay, we are on the first or last tuple. Does it pass all the + * quals? */ if (_bt_checkkeys(scan, itup, dir, &continuescan)) { diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index ac251a4ee4e..98cdccb3bac 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -41,11 +41,11 @@ * * Since the index will never be used unless it is completely built, * from a crash-recovery point of view there is no need to WAL-log the - * steps of the build. After completing the index build, we can just sync + * steps of the build. After completing the index build, we can just sync * the whole file to disk using smgrimmedsync() before exiting this module. * This can be seen to be sufficient for crash recovery by considering that * it's effectively equivalent to what would happen if a CHECKPOINT occurred - * just after the index build. However, it is clearly not sufficient if the + * just after the index build. However, it is clearly not sufficient if the * DBA is using the WAL log for PITR or replication purposes, since another * machine would not be able to reconstruct the index from WAL. Therefore, * we log the completed index pages to WAL if and only if WAL archiving is @@ -56,7 +56,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.87 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.88 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -98,7 +98,7 @@ struct BTSpool typedef struct BTPageState { Page btps_page; /* workspace for page building */ - BlockNumber btps_blkno; /* block # to write this page at */ + BlockNumber btps_blkno; /* block # to write this page at */ BTItem btps_minkey; /* copy of minimum key (first item) on * page */ OffsetNumber btps_lastoff; /* last item offset loaded */ @@ -114,10 +114,10 @@ typedef struct BTPageState typedef struct BTWriteState { Relation index; - bool btws_use_wal; /* dump pages to WAL? */ - BlockNumber btws_pages_alloced; /* # pages allocated */ - BlockNumber btws_pages_written; /* # pages written out */ - Page btws_zeropage; /* workspace for filling zeroes */ + bool btws_use_wal; /* dump pages to WAL? */ + BlockNumber btws_pages_alloced; /* # pages allocated */ + BlockNumber btws_pages_written; /* # pages written out */ + Page btws_zeropage; /* workspace for filling zeroes */ } BTWriteState; @@ -136,7 +136,7 @@ static void _bt_sortaddtup(Page page, Size itemsize, static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti); static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state); static void _bt_load(BTWriteState *wstate, - BTSpool *btspool, BTSpool *btspool2); + BTSpool *btspool, BTSpool *btspool2); /* @@ -157,12 +157,12 @@ _bt_spoolinit(Relation index, bool isunique, bool isdead) btspool->isunique = isunique; /* - * We size the sort area as maintenance_work_mem rather than work_mem to - * speed index creation. This should be OK since a single backend can't - * run multiple index creations in parallel. Note that creation of a - * unique index actually requires two BTSpool objects. We expect that the - * second one (for dead tuples) won't get very full, so we give it only - * work_mem. + * We size the sort area as maintenance_work_mem rather than work_mem + * to speed index creation. This should be OK since a single backend + * can't run multiple index creations in parallel. Note that creation + * of a unique index actually requires two BTSpool objects. We expect + * that the second one (for dead tuples) won't get very full, so we + * give it only work_mem. */ btKbytes = isdead ? work_mem : maintenance_work_mem; btspool->sortstate = tuplesort_begin_index(index, isunique, @@ -205,7 +205,7 @@ _bt_spool(BTItem btitem, BTSpool *btspool) void _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) { - BTWriteState wstate; + BTWriteState wstate; #ifdef BTREE_BUILD_STATS if (log_btree_build_stats) @@ -220,6 +220,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) tuplesort_performsort(btspool2->sortstate); wstate.index = btspool->index; + /* * We need to log index creation in WAL iff WAL archiving is enabled * AND it's not a temp index. @@ -229,7 +230,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) /* reserve the metapage */ wstate.btws_pages_alloced = BTREE_METAPAGE + 1; wstate.btws_pages_written = 0; - wstate.btws_zeropage = NULL; /* until needed */ + wstate.btws_zeropage = NULL; /* until needed */ _bt_load(&wstate, btspool, btspool2); } @@ -246,7 +247,7 @@ _bt_leafbuild(BTSpool *btspool, BTSpool *btspool2) static Page _bt_blnewpage(uint32 level) { - Page page; + Page page; BTPageOpaque opaque; page = (Page) palloc(BLCKSZ); @@ -313,8 +314,8 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) * If we have to write pages nonsequentially, fill in the space with * zeroes until we come back and overwrite. This is not logically * necessary on standard Unix filesystems (unwritten space will read - * as zeroes anyway), but it should help to avoid fragmentation. - * The dummy pages aren't WAL-logged though. + * as zeroes anyway), but it should help to avoid fragmentation. The + * dummy pages aren't WAL-logged though. */ while (blkno > wstate->btws_pages_written) { @@ -326,9 +327,9 @@ _bt_blwritepage(BTWriteState *wstate, Page page, BlockNumber blkno) } /* - * Now write the page. We say isTemp = true even if it's not a - * temp index, because there's no need for smgr to schedule an fsync - * for this write; we'll do it ourselves before ending the build. + * Now write the page. We say isTemp = true even if it's not a temp + * index, because there's no need for smgr to schedule an fsync for + * this write; we'll do it ourselves before ending the build. */ smgrwrite(wstate->index->rd_smgr, blkno, (char *) page, true); @@ -468,7 +469,7 @@ static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) { Page npage; - BlockNumber nblkno; + BlockNumber nblkno; OffsetNumber last_off; Size pgspc; Size btisz; @@ -506,7 +507,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) * already. Finish off the page and write it out. */ Page opage = npage; - BlockNumber oblkno = nblkno; + BlockNumber oblkno = nblkno; ItemId ii; ItemId hii; BTItem obti; @@ -539,8 +540,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) ((PageHeader) opage)->pd_lower -= sizeof(ItemIdData); /* - * Link the old page into its parent, using its minimum key. If - * we don't have a parent, we have to create one; this adds a new + * Link the old page into its parent, using its minimum key. If we + * don't have a parent, we have to create one; this adds a new * btree level. */ if (state->btps_next == NULL) @@ -572,8 +573,8 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti) } /* - * Write out the old page. We never need to touch it again, - * so we can free the opage workspace too. + * Write out the old page. We never need to touch it again, so we + * can free the opage workspace too. */ _bt_blwritepage(wstate, opage, oblkno); @@ -613,7 +614,7 @@ static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) { BTPageState *s; - BlockNumber rootblkno = P_NONE; + BlockNumber rootblkno = P_NONE; uint32 rootlevel = 0; Page metapage; @@ -663,9 +664,9 @@ _bt_uppershutdown(BTWriteState *wstate, BTPageState *state) /* * As the last step in the process, construct the metapage and make it - * point to the new root (unless we had no data at all, in which case it's - * set to point to "P_NONE"). This changes the index to the "valid" - * state by filling in a valid magic number in the metapage. + * point to the new root (unless we had no data at all, in which case + * it's set to point to "P_NONE"). This changes the index to the + * "valid" state by filling in a valid magic number in the metapage. */ metapage = (Page) palloc(BLCKSZ); _bt_initmetapage(metapage, rootblkno, rootlevel); @@ -744,7 +745,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) compare = DatumGetInt32(FunctionCall2(&entry->sk_func, attrDatum1, - attrDatum2)); + attrDatum2)); if (compare > 0) { load1 = false; @@ -768,7 +769,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) if (should_free) pfree((void *) bti); bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, - true, &should_free); + true, &should_free); } else { @@ -776,7 +777,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) if (should_free2) pfree((void *) bti2); bti2 = (BTItem) tuplesort_getindextuple(btspool2->sortstate, - true, &should_free2); + true, &should_free2); } } _bt_freeskey(indexScanKey); @@ -785,7 +786,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) { /* merge is unnecessary */ while ((bti = (BTItem) tuplesort_getindextuple(btspool->sortstate, - true, &should_free)) != NULL) + true, &should_free)) != NULL) { /* When we see first tuple, create first index page */ if (state == NULL) @@ -802,18 +803,18 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) /* * If the index isn't temp, we must fsync it down to disk before it's - * safe to commit the transaction. (For a temp index we don't care + * safe to commit the transaction. (For a temp index we don't care * since the index will be uninteresting after a crash anyway.) * - * It's obvious that we must do this when not WAL-logging the build. - * It's less obvious that we have to do it even if we did WAL-log the - * index pages. The reason is that since we're building outside - * shared buffers, a CHECKPOINT occurring during the build has no way - * to flush the previously written data to disk (indeed it won't know - * the index even exists). A crash later on would replay WAL from the + * It's obvious that we must do this when not WAL-logging the build. It's + * less obvious that we have to do it even if we did WAL-log the index + * pages. The reason is that since we're building outside shared + * buffers, a CHECKPOINT occurring during the build has no way to + * flush the previously written data to disk (indeed it won't know the + * index even exists). A crash later on would replay WAL from the * checkpoint, therefore it wouldn't replay our earlier WAL entries. - * If we do not fsync those pages here, they might still not be on disk - * when the crash occurs. + * If we do not fsync those pages here, they might still not be on + * disk when the crash occurs. */ if (!wstate->index->rd_istemp) smgrimmedsync(wstate->index->rd_smgr); diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 425e3b057e7..bd640f6f8b5 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.59 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.60 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -48,8 +48,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup) bool null; /* - * We can use the cached (default) support procs since no cross-type - * comparison can be needed. + * We can use the cached (default) support procs since no + * cross-type comparison can be needed. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); arg = index_getattr(itup, i + 1, itupdesc, &null); @@ -68,7 +68,7 @@ _bt_mkscankey(Relation rel, IndexTuple itup) /* * _bt_mkscankey_nodata * Build a scan key that contains comparator routines appropriate to - * the key datatypes, but no comparison data. The comparison data + * the key datatypes, but no comparison data. The comparison data * ultimately used must match the key datatypes. * * The result cannot be used with _bt_compare(). Currently this @@ -93,8 +93,8 @@ _bt_mkscankey_nodata(Relation rel) FmgrInfo *procinfo; /* - * We can use the cached (default) support procs since no cross-type - * comparison can be needed. + * We can use the cached (default) support procs since no + * cross-type comparison can be needed. */ procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC); ScanKeyEntryInitializeWithInfo(&skey[i], @@ -163,12 +163,12 @@ _bt_formitem(IndexTuple itup) * _bt_preprocess_keys() -- Preprocess scan keys * * The caller-supplied keys (in scan->keyData[]) are copied to - * so->keyData[] with possible transformation. scan->numberOfKeys is + * so->keyData[] with possible transformation. scan->numberOfKeys is * the number of input keys, so->numberOfKeys gets the number of output * keys (possibly less, never greater). * * The primary purpose of this routine is to discover how many scan keys - * must be satisfied to continue the scan. It also attempts to eliminate + * must be satisfied to continue the scan. It also attempts to eliminate * redundant keys and detect contradictory keys. At present, redundant and * contradictory keys can only be detected for same-data-type comparisons, * but that's the usual case so it seems worth doing. @@ -198,7 +198,7 @@ _bt_formitem(IndexTuple itup) * or one or two boundary-condition keys for each attr.) However, we can * only detect redundant keys when the right-hand datatypes are all equal * to the index datatype, because we do not know suitable operators for - * comparing right-hand values of two different datatypes. (In theory + * comparing right-hand values of two different datatypes. (In theory * we could handle comparison of a RHS of the index datatype with a RHS of * another type, but that seems too much pain for too little gain.) So, * keys whose operator has a nondefault subtype (ie, its RHS is not of the @@ -285,9 +285,9 @@ _bt_preprocess_keys(IndexScanDesc scan) * * xform[i] points to the currently best scan key of strategy type i+1, * if any is found with a default operator subtype; it is NULL if we - * haven't yet found such a key for this attr. Scan keys of nondefault - * subtypes are transferred to the output with no processing except for - * noting if they are of "=" type. + * haven't yet found such a key for this attr. Scan keys of + * nondefault subtypes are transferred to the output with no + * processing except for noting if they are of "=" type. */ attno = 1; memset(xform, 0, sizeof(xform)); @@ -361,7 +361,7 @@ _bt_preprocess_keys(IndexScanDesc scan) /* * If no "=" for this key, we're done with required keys */ - if (! hasOtherTypeEqual) + if (!hasOtherTypeEqual) allEqualSoFar = false; } @@ -369,8 +369,8 @@ _bt_preprocess_keys(IndexScanDesc scan) if (xform[BTLessStrategyNumber - 1] && xform[BTLessEqualStrategyNumber - 1]) { - ScanKey lt = xform[BTLessStrategyNumber - 1]; - ScanKey le = xform[BTLessEqualStrategyNumber - 1]; + ScanKey lt = xform[BTLessStrategyNumber - 1]; + ScanKey le = xform[BTLessEqualStrategyNumber - 1]; test = FunctionCall2(&le->sk_func, lt->sk_argument, @@ -385,8 +385,8 @@ _bt_preprocess_keys(IndexScanDesc scan) if (xform[BTGreaterStrategyNumber - 1] && xform[BTGreaterEqualStrategyNumber - 1]) { - ScanKey gt = xform[BTGreaterStrategyNumber - 1]; - ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1]; + ScanKey gt = xform[BTGreaterStrategyNumber - 1]; + ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1]; test = FunctionCall2(&ge->sk_func, gt->sk_argument, @@ -545,21 +545,23 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple, { /* * Tuple fails this qual. If it's a required qual, then we - * may be able to conclude no further tuples will pass, either. - * We have to look at the scan direction and the qual type. + * may be able to conclude no further tuples will pass, + * either. We have to look at the scan direction and the qual + * type. * * Note: the only case in which we would keep going after failing - * a required qual is if there are partially-redundant quals that - * _bt_preprocess_keys() was unable to eliminate. For example, - * given "x > 4 AND x > 10" where both are cross-type comparisons - * and so not removable, we might start the scan at the x = 4 - * boundary point. The "x > 10" condition will fail until we - * pass x = 10, but we must not stop the scan on its account. + * a required qual is if there are partially-redundant quals + * that _bt_preprocess_keys() was unable to eliminate. For + * example, given "x > 4 AND x > 10" where both are cross-type + * comparisons and so not removable, we might start the scan + * at the x = 4 boundary point. The "x > 10" condition will + * fail until we pass x = 10, but we must not stop the scan on + * its account. * - * Note: because we stop the scan as soon as any required equality - * qual fails, it is critical that equality quals be used for the - * initial positioning in _bt_first() when they are available. - * See comments in _bt_first(). + * Note: because we stop the scan as soon as any required + * equality qual fails, it is critical that equality quals be + * used for the initial positioning in _bt_first() when they + * are available. See comments in _bt_first(). */ if (ikey < so->numberOfRequiredKeys) { diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index edb1ff8d9bb..03f09e3fa2c 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.17 2004/08/29 04:12:21 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.18 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -770,7 +770,7 @@ static void out_target(char *buf, xl_btreetid *target) { sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u", - target->node.spcNode, target->node.dbNode, target->node.relNode, + target->node.spcNode, target->node.dbNode, target->node.relNode, ItemPointerGetBlockNumber(&(target->tid)), ItemPointerGetOffsetNumber(&(target->tid))); } diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c index 1765cef28d7..4ec3a5da631 100644 --- a/src/backend/access/rtree/rtscan.c +++ b/src/backend/access/rtree/rtscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.54 2004/08/29 04:12:22 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.55 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -123,7 +123,7 @@ rtrescan(PG_FUNCTION_ARGS) Oid int_oper; RegProcedure int_proc; - opclass = s->indexRelation->rd_index->indclass[attno-1]; + opclass = s->indexRelation->rd_index->indclass[attno - 1]; int_strategy = RTMapToInternalOperator(s->keyData[i].sk_strategy); int_oper = get_opclass_member(opclass, s->keyData[i].sk_subtype, @@ -280,14 +280,14 @@ rtdropscan(IndexScanDesc s) void ReleaseResources_rtree(void) { - RTScanList l; - RTScanList prev; - RTScanList next; + RTScanList l; + RTScanList prev; + RTScanList next; /* - * Note: this should be a no-op during normal query shutdown. - * However, in an abort situation ExecutorEnd is not called and so - * there may be open index scans to clean up. + * Note: this should be a no-op during normal query shutdown. However, + * in an abort situation ExecutorEnd is not called and so there may be + * open index scans to clean up. */ prev = NULL; diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c index b26807f9afb..fb490e4137a 100644 --- a/src/backend/access/transam/clog.c +++ b/src/backend/access/transam/clog.c @@ -24,7 +24,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.24 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.25 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,6 +62,7 @@ * Link to shared-memory data structures for CLOG control */ static SlruCtlData ClogCtlData; + #define ClogCtl (&ClogCtlData) diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c index c695013ed17..c87b38a7922 100644 --- a/src/backend/access/transam/slru.c +++ b/src/backend/access/transam/slru.c @@ -48,7 +48,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.20 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.21 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -79,7 +79,7 @@ * segment and page numbers in SimpleLruTruncate (see PagePrecedes()). * * Note: this file currently assumes that segment file names will be four - * hex digits. This sets a lower bound on the segment size (64K transactions + * hex digits. This sets a lower bound on the segment size (64K transactions * for 32-bit TransactionIds). */ #define SLRU_PAGES_PER_SEGMENT 32 @@ -96,9 +96,9 @@ */ typedef struct SlruFlushData { - int num_files; /* # files actually open */ - int fd[NUM_SLRU_BUFFERS]; /* their FD's */ - int segno[NUM_SLRU_BUFFERS]; /* their log seg#s */ + int num_files; /* # files actually open */ + int fd[NUM_SLRU_BUFFERS]; /* their FD's */ + int segno[NUM_SLRU_BUFFERS]; /* their log seg#s */ } SlruFlushData; /* @@ -132,7 +132,7 @@ static int slru_errno; static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno); static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, - SlruFlush fdata); + SlruFlush fdata); static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid); static int SlruSelectLRUPage(SlruCtl ctl, int pageno); @@ -385,7 +385,7 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata) /* If we failed, and we're in a flush, better close the files */ if (!ok && fdata) { - int i; + int i; for (i = 0; i < fdata->num_files; i++) close(fdata->fd[i]); @@ -511,7 +511,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) */ if (fdata) { - int i; + int i; for (i = 0; i < fdata->num_files; i++) { @@ -527,16 +527,17 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata) { /* * If the file doesn't already exist, we should create it. It is - * possible for this to need to happen when writing a page that's not - * first in its segment; we assume the OS can cope with that. - * (Note: it might seem that it'd be okay to create files only when - * SimpleLruZeroPage is called for the first page of a segment. - * However, if after a crash and restart the REDO logic elects to - * replay the log from a checkpoint before the latest one, then it's - * possible that we will get commands to set transaction status of - * transactions that have already been truncated from the commit log. - * Easiest way to deal with that is to accept references to - * nonexistent files here and in SlruPhysicalReadPage.) + * possible for this to need to happen when writing a page that's + * not first in its segment; we assume the OS can cope with that. + * (Note: it might seem that it'd be okay to create files only + * when SimpleLruZeroPage is called for the first page of a + * segment. However, if after a crash and restart the REDO logic + * elects to replay the log from a checkpoint before the latest + * one, then it's possible that we will get commands to set + * transaction status of transactions that have already been + * truncated from the commit log. Easiest way to deal with that is + * to accept references to nonexistent files here and in + * SlruPhysicalReadPage.) */ SlruFileName(ctl, path, segno); fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR); @@ -648,36 +649,36 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid) ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not seek in file \"%s\" to offset %u: %m", - path, offset))); + errdetail("could not seek in file \"%s\" to offset %u: %m", + path, offset))); break; case SLRU_READ_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not read from file \"%s\" at offset %u: %m", - path, offset))); + errdetail("could not read from file \"%s\" at offset %u: %m", + path, offset))); break; case SLRU_WRITE_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not write to file \"%s\" at offset %u: %m", - path, offset))); + errdetail("could not write to file \"%s\" at offset %u: %m", + path, offset))); break; case SLRU_FSYNC_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not fsync file \"%s\": %m", - path))); + errdetail("could not fsync file \"%s\": %m", + path))); break; case SLRU_CLOSE_FAILED: ereport(ERROR, (errcode_for_file_access(), errmsg("could not access status of transaction %u", xid), - errdetail("could not close file \"%s\": %m", - path))); + errdetail("could not close file \"%s\": %m", + path))); break; default: /* can't get here, we trust */ @@ -841,8 +842,8 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage) /* * Scan shared memory and remove any pages preceding the cutoff page, * to ensure we won't rewrite them later. (Since this is normally - * called in or just after a checkpoint, any dirty pages should - * have been flushed already ... we're just being extra careful here.) + * called in or just after a checkpoint, any dirty pages should have + * been flushed already ... we're just being extra careful here.) */ LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE); @@ -952,8 +953,11 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions) errno = 0; } #ifdef WIN32 - /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but - not in released version */ + + /* + * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but + * not in released version + */ if (GetLastError() == ERROR_NO_MORE_FILES) errno = 0; #endif diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index 7976de2300c..93a586148be 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -5,7 +5,7 @@ * * The pg_subtrans manager is a pg_clog-like manager that stores the parent * transaction Id for each transaction. It is a fundamental part of the - * nested transactions implementation. A main transaction has a parent + * nested transactions implementation. A main transaction has a parent * of InvalidTransactionId, and each subtransaction has its immediate parent. * The tree can easily be walked from child to parent, but not in the * opposite direction. @@ -22,7 +22,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.4 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.5 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -57,6 +57,7 @@ * Link to shared-memory data structures for SUBTRANS control */ static SlruCtlData SubTransCtlData; + #define SubTransCtl (&SubTransCtlData) @@ -101,7 +102,7 @@ SubTransGetParent(TransactionId xid) int entryno = TransactionIdToEntry(xid); int slotno; TransactionId *ptr; - TransactionId parent; + TransactionId parent; /* Can't ask about stuff that might not be around anymore */ Assert(TransactionIdFollowsOrEquals(xid, RecentXmin)); @@ -139,7 +140,7 @@ TransactionId SubTransGetTopmostTransaction(TransactionId xid) { TransactionId parentXid = xid, - previousXid = xid; + previousXid = xid; /* Can't ask about stuff that might not be around anymore */ Assert(TransactionIdFollowsOrEquals(xid, RecentXmin)); @@ -185,7 +186,7 @@ SUBTRANSShmemInit(void) * must have been called already.) * * Note: it's not really necessary to create the initial segment now, - * since slru.c would create it on first write anyway. But we may as well + * since slru.c would create it on first write anyway. But we may as well * do it to be sure the directory is set up correctly. */ void @@ -229,10 +230,11 @@ StartupSUBTRANS(void) int startPage; /* - * Since we don't expect pg_subtrans to be valid across crashes, - * we initialize the currently-active page to zeroes during startup. + * Since we don't expect pg_subtrans to be valid across crashes, we + * initialize the currently-active page to zeroes during startup. * Whenever we advance into a new page, ExtendSUBTRANS will likewise - * zero the new page without regard to whatever was previously on disk. + * zero the new page without regard to whatever was previously on + * disk. */ LWLockAcquire(SubtransControlLock, LW_EXCLUSIVE); @@ -251,8 +253,8 @@ ShutdownSUBTRANS(void) /* * Flush dirty SUBTRANS pages to disk * - * This is not actually necessary from a correctness point of view. - * We do it merely as a debugging aid. + * This is not actually necessary from a correctness point of view. We do + * it merely as a debugging aid. */ SimpleLruFlush(SubTransCtl, false); } @@ -266,8 +268,8 @@ CheckPointSUBTRANS(void) /* * Flush dirty SUBTRANS pages to disk * - * This is not actually necessary from a correctness point of view. - * We do it merely to improve the odds that writing of dirty pages is done + * This is not actually necessary from a correctness point of view. We do + * it merely to improve the odds that writing of dirty pages is done * by the checkpoint process and not by backends. */ SimpleLruFlush(SubTransCtl, true); diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c index fd5a1619a77..f82168be5b7 100644 --- a/src/backend/access/transam/transam.c +++ b/src/backend/access/transam/transam.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.60 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.61 2004/08/29 05:06:40 momjian Exp $ * * NOTES * This file contains the high level access-method interface to the @@ -126,7 +126,7 @@ TransactionLogUpdate(TransactionId transactionId, /* trans id to update */ static void TransactionLogMultiUpdate(int nxids, TransactionId *xids, XidStatus status) { - int i; + int i; Assert(nxids != 0); @@ -199,9 +199,10 @@ TransactionIdDidCommit(TransactionId transactionId) return true; /* - * If it's marked subcommitted, we have to check the parent recursively. - * However, if it's older than RecentXmin, we can't look at pg_subtrans; - * instead assume that the parent crashed without cleaning up its children. + * If it's marked subcommitted, we have to check the parent + * recursively. However, if it's older than RecentXmin, we can't look + * at pg_subtrans; instead assume that the parent crashed without + * cleaning up its children. */ if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED) { @@ -214,7 +215,7 @@ TransactionIdDidCommit(TransactionId transactionId) return TransactionIdDidCommit(parentXid); } - /* + /* * It's not committed. */ return false; @@ -247,9 +248,10 @@ TransactionIdDidAbort(TransactionId transactionId) return true; /* - * If it's marked subcommitted, we have to check the parent recursively. - * However, if it's older than RecentXmin, we can't look at pg_subtrans; - * instead assume that the parent crashed without cleaning up its children. + * If it's marked subcommitted, we have to check the parent + * recursively. However, if it's older than RecentXmin, we can't look + * at pg_subtrans; instead assume that the parent crashed without + * cleaning up its children. */ if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED) { diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c index cfd78937428..84926ac415a 100644 --- a/src/backend/access/transam/varsup.c +++ b/src/backend/access/transam/varsup.c @@ -6,7 +6,7 @@ * Copyright (c) 2000-2004, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.58 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.59 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -47,9 +47,9 @@ GetNewTransactionId(bool isSubXact) xid = ShmemVariableCache->nextXid; /* - * If we are allocating the first XID of a new page of the commit - * log, zero out that commit-log page before returning. We must do - * this while holding XidGenLock, else another xact could acquire and + * If we are allocating the first XID of a new page of the commit log, + * zero out that commit-log page before returning. We must do this + * while holding XidGenLock, else another xact could acquire and * commit a later XID before we zero the page. Fortunately, a page of * the commit log holds 32K or more transactions, so we don't have to * do this very often. @@ -61,17 +61,18 @@ GetNewTransactionId(bool isSubXact) /* * Now advance the nextXid counter. This must not happen until after - * we have successfully completed ExtendCLOG() --- if that routine fails, - * we want the next incoming transaction to try it again. We cannot - * assign more XIDs until there is CLOG space for them. + * we have successfully completed ExtendCLOG() --- if that routine + * fails, we want the next incoming transaction to try it again. We + * cannot assign more XIDs until there is CLOG space for them. */ TransactionIdAdvance(ShmemVariableCache->nextXid); /* - * We must store the new XID into the shared PGPROC array before releasing - * XidGenLock. This ensures that when GetSnapshotData calls + * We must store the new XID into the shared PGPROC array before + * releasing XidGenLock. This ensures that when GetSnapshotData calls * ReadNewTransactionId, all active XIDs before the returned value of - * nextXid are already present in PGPROC. Else we have a race condition. + * nextXid are already present in PGPROC. Else we have a race + * condition. * * XXX by storing xid into MyProc without acquiring SInvalLock, we are * relying on fetch/store of an xid to be atomic, else other backends @@ -86,19 +87,19 @@ GetNewTransactionId(bool isSubXact) * * A solution to the atomic-store problem would be to give each PGPROC * its own spinlock used only for fetching/storing that PGPROC's xid - * and related fields. (SInvalLock would then mean primarily that + * and related fields. (SInvalLock would then mean primarily that * PGPROCs couldn't be added/removed while holding the lock.) * * If there's no room to fit a subtransaction XID into PGPROC, set the * cache-overflowed flag instead. This forces readers to look in - * pg_subtrans to map subtransaction XIDs up to top-level XIDs. - * There is a race-condition window, in that the new XID will not - * appear as running until its parent link has been placed into - * pg_subtrans. However, that will happen before anyone could possibly - * have a reason to inquire about the status of the XID, so it seems - * OK. (Snapshots taken during this window *will* include the parent - * XID, so they will deliver the correct answer later on when someone - * does have a reason to inquire.) + * pg_subtrans to map subtransaction XIDs up to top-level XIDs. There + * is a race-condition window, in that the new XID will not appear as + * running until its parent link has been placed into pg_subtrans. + * However, that will happen before anyone could possibly have a + * reason to inquire about the status of the XID, so it seems OK. + * (Snapshots taken during this window *will* include the parent XID, + * so they will deliver the correct answer later on when someone does + * have a reason to inquire.) */ if (MyProc != NULL) { @@ -112,9 +113,7 @@ GetNewTransactionId(bool isSubXact) MyProc->subxids.nxids++; } else - { MyProc->subxids.overflowed = true; - } } } diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index fd5d6b51682..3bb38e4227f 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -10,7 +10,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.182 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.183 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -89,19 +89,20 @@ typedef enum TBlockState */ typedef struct TransactionStateData { - TransactionId transactionIdData; /* my XID */ - char *name; /* savepoint name, if any */ - int savepointLevel; /* savepoint level */ - CommandId commandId; /* current CID */ - TransState state; /* low-level state */ - TBlockState blockState; /* high-level state */ - int nestingLevel; /* nest depth */ - MemoryContext curTransactionContext; /* my xact-lifetime context */ - ResourceOwner curTransactionOwner; /* my query resources */ - List *childXids; /* subcommitted child XIDs */ - AclId currentUser; /* subxact start current_user */ - bool prevXactReadOnly; /* entry-time xact r/o state */ - struct TransactionStateData *parent; /* back link to parent */ + TransactionId transactionIdData; /* my XID */ + char *name; /* savepoint name, if any */ + int savepointLevel; /* savepoint level */ + CommandId commandId; /* current CID */ + TransState state; /* low-level state */ + TBlockState blockState; /* high-level state */ + int nestingLevel; /* nest depth */ + MemoryContext curTransactionContext; /* my xact-lifetime + * context */ + ResourceOwner curTransactionOwner; /* my query resources */ + List *childXids; /* subcommitted child XIDs */ + AclId currentUser; /* subxact start current_user */ + bool prevXactReadOnly; /* entry-time xact r/o state */ + struct TransactionStateData *parent; /* back link to parent */ } TransactionStateData; typedef TransactionStateData *TransactionState; @@ -180,8 +181,8 @@ static TransactionState CurrentTransactionState = &TopTransactionStateData; * This does not change as we enter and exit subtransactions, so we don't * keep it inside the TransactionState stack. */ -static AbsoluteTime xactStartTime; /* integer part */ -static int xactStartTimeUsec; /* microsecond part */ +static AbsoluteTime xactStartTime; /* integer part */ +static int xactStartTimeUsec; /* microsecond part */ /* @@ -261,7 +262,7 @@ IsAbortedTransactionBlockState(void) { TransactionState s = CurrentTransactionState; - if (s->blockState == TBLOCK_ABORT || + if (s->blockState == TBLOCK_ABORT || s->blockState == TBLOCK_SUBABORT) return true; @@ -362,15 +363,15 @@ TransactionIdIsCurrentTransactionId(TransactionId xid) } /* - * We will return true for the Xid of the current subtransaction, - * any of its subcommitted children, any of its parents, or any of - * their previously subcommitted children. However, a transaction - * being aborted is no longer "current", even though it may still - * have an entry on the state stack. + * We will return true for the Xid of the current subtransaction, any + * of its subcommitted children, any of its parents, or any of their + * previously subcommitted children. However, a transaction being + * aborted is no longer "current", even though it may still have an + * entry on the state stack. */ for (s = CurrentTransactionState; s != NULL; s = s->parent) { - ListCell *cell; + ListCell *cell; if (s->state == TRANS_ABORT) continue; @@ -502,15 +503,16 @@ AtSubStart_Memory(void) Assert(CurTransactionContext != NULL); /* - * Create a CurTransactionContext, which will be used to hold data that - * survives subtransaction commit but disappears on subtransaction abort. - * We make it a child of the immediate parent's CurTransactionContext. + * Create a CurTransactionContext, which will be used to hold data + * that survives subtransaction commit but disappears on + * subtransaction abort. We make it a child of the immediate parent's + * CurTransactionContext. */ CurTransactionContext = AllocSetContextCreate(CurTransactionContext, "CurTransactionContext", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); s->curTransactionContext = CurTransactionContext; /* Make the CurTransactionContext active. */ @@ -528,8 +530,8 @@ AtSubStart_ResourceOwner(void) Assert(s->parent != NULL); /* - * Create a resource owner for the subtransaction. We make it a - * child of the immediate parent's resource owner. + * Create a resource owner for the subtransaction. We make it a child + * of the immediate parent's resource owner. */ s->curTransactionOwner = ResourceOwnerCreate(s->parent->curTransactionOwner, @@ -560,10 +562,11 @@ RecordTransactionCommit(void) nchildren = xactGetCommittedChildren(&children); /* - * If we made neither any XLOG entries nor any temp-rel updates, - * and have no files to be deleted, we can omit recording the transaction + * If we made neither any XLOG entries nor any temp-rel updates, and + * have no files to be deleted, we can omit recording the transaction * commit at all. (This test includes the effects of subtransactions, - * so the presence of committed subxacts need not alone force a write.) + * so the presence of committed subxacts need not alone force a + * write.) */ if (MyXactMadeXLogEntry || MyXactMadeTempRelUpdate || nrels > 0) { @@ -577,17 +580,18 @@ RecordTransactionCommit(void) START_CRIT_SECTION(); /* - * If our transaction made any transaction-controlled XLOG entries, - * we need to lock out checkpoint start between writing our XLOG - * record and updating pg_clog. Otherwise it is possible for the - * checkpoint to set REDO after the XLOG record but fail to flush the - * pg_clog update to disk, leading to loss of the transaction commit - * if we crash a little later. Slightly klugy fix for problem - * discovered 2004-08-10. + * If our transaction made any transaction-controlled XLOG + * entries, we need to lock out checkpoint start between writing + * our XLOG record and updating pg_clog. Otherwise it is possible + * for the checkpoint to set REDO after the XLOG record but fail + * to flush the pg_clog update to disk, leading to loss of the + * transaction commit if we crash a little later. Slightly klugy + * fix for problem discovered 2004-08-10. * * (If it made no transaction-controlled XLOG entries, its XID - * appears nowhere in permanent storage, so no one else will ever care - * if it committed; so it doesn't matter if we lose the commit flag.) + * appears nowhere in permanent storage, so no one else will ever + * care if it committed; so it doesn't matter if we lose the + * commit flag.) * * Note we only need a shared lock. */ @@ -798,21 +802,21 @@ static void RecordSubTransactionCommit(void) { /* - * We do not log the subcommit in XLOG; it doesn't matter until - * the top-level transaction commits. + * We do not log the subcommit in XLOG; it doesn't matter until the + * top-level transaction commits. * * We must mark the subtransaction subcommitted in clog if its XID * appears either in permanent rels or in local temporary rels. We - * test this by seeing if we made transaction-controlled entries - * *OR* local-rel tuple updates. (The test here actually covers the - * entire transaction tree so far, so it may mark subtransactions that - * don't really need it, but it's probably not worth being tenser. - * Note that if a prior subtransaction dirtied these variables, then + * test this by seeing if we made transaction-controlled entries *OR* + * local-rel tuple updates. (The test here actually covers the entire + * transaction tree so far, so it may mark subtransactions that don't + * really need it, but it's probably not worth being tenser. Note that + * if a prior subtransaction dirtied these variables, then * RecordTransactionCommit will have to do the full pushup anyway...) */ if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate) { - TransactionId xid = GetCurrentTransactionId(); + TransactionId xid = GetCurrentTransactionId(); /* XXX does this really need to be a critical section? */ START_CRIT_SECTION(); @@ -837,8 +841,8 @@ RecordTransactionAbort(void) { int nrels; RelFileNode *rptr; - int nchildren; - TransactionId *children; + int nchildren; + TransactionId *children; /* Get data needed for abort record */ nrels = smgrGetPendingDeletes(false, &rptr); @@ -846,13 +850,13 @@ RecordTransactionAbort(void) /* * If we made neither any transaction-controlled XLOG entries nor any - * temp-rel updates, and are not going to delete any files, we can omit - * recording the transaction abort at all. No one will ever care that - * it aborted. (These tests cover our whole transaction tree.) + * temp-rel updates, and are not going to delete any files, we can + * omit recording the transaction abort at all. No one will ever care + * that it aborted. (These tests cover our whole transaction tree.) */ if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate || nrels > 0) { - TransactionId xid = GetCurrentTransactionId(); + TransactionId xid = GetCurrentTransactionId(); /* * Catch the scenario where we aborted partway through @@ -867,13 +871,13 @@ RecordTransactionAbort(void) * We only need to log the abort in XLOG if the transaction made * any transaction-controlled XLOG entries or will delete files. * (If it made no transaction-controlled XLOG entries, its XID - * appears nowhere in permanent storage, so no one else will ever care - * if it committed.) + * appears nowhere in permanent storage, so no one else will ever + * care if it committed.) * * We do not flush XLOG to disk unless deleting files, since the - * default assumption after a crash would be that we aborted, anyway. - * For the same reason, we don't need to worry about interlocking - * against checkpoint start. + * default assumption after a crash would be that we aborted, + * anyway. For the same reason, we don't need to worry about + * interlocking against checkpoint start. */ if (MyLastRecPtr.xrecoff != 0 || nrels > 0) { @@ -990,9 +994,9 @@ RecordSubTransactionAbort(void) { int nrels; RelFileNode *rptr; - TransactionId xid = GetCurrentTransactionId(); - int nchildren; - TransactionId *children; + TransactionId xid = GetCurrentTransactionId(); + int nchildren; + TransactionId *children; /* Get data needed for abort record */ nrels = smgrGetPendingDeletes(false, &rptr); @@ -1000,10 +1004,10 @@ RecordSubTransactionAbort(void) /* * If we made neither any transaction-controlled XLOG entries nor any - * temp-rel updates, and are not going to delete any files, we can omit - * recording the transaction abort at all. No one will ever care that - * it aborted. (These tests cover our whole transaction tree, and - * therefore may mark subxacts that don't really need it, but it's + * temp-rel updates, and are not going to delete any files, we can + * omit recording the transaction abort at all. No one will ever care + * that it aborted. (These tests cover our whole transaction tree, + * and therefore may mark subxacts that don't really need it, but it's * probably not worth being tenser.) * * In this case we needn't worry about marking subcommitted children as @@ -1021,9 +1025,9 @@ RecordSubTransactionAbort(void) if (MyLastRecPtr.xrecoff != 0 || nrels > 0) { XLogRecData rdata[3]; - int lastrdata = 0; + int lastrdata = 0; xl_xact_abort xlrec; - XLogRecPtr recptr; + XLogRecPtr recptr; xlrec.xtime = time(NULL); xlrec.nrels = nrels; @@ -1071,8 +1075,8 @@ RecordSubTransactionAbort(void) /* * We can immediately remove failed XIDs from PGPROC's cache of * running child XIDs. It's easiest to do it here while we have the - * child XID array at hand, even though in the main-transaction - * case the equivalent work happens just after return from + * child XID array at hand, even though in the main-transaction case + * the equivalent work happens just after return from * RecordTransactionAbort. */ XidCacheRemoveRunningXids(xid, nchildren, children); @@ -1169,7 +1173,8 @@ StartTransaction(void) s->state = TRANS_START; /* - * Make sure we've freed any old snapshot, and reset xact state variables + * Make sure we've freed any old snapshot, and reset xact state + * variables */ FreeXactSnapshot(); XactIsoLevel = DefaultXactIsoLevel; @@ -1323,9 +1328,9 @@ CommitTransaction(void) * want to release locks at the point where any backend waiting for us * will see our transaction as being fully cleaned up. * - * Resources that can be associated with individual queries are - * handled by the ResourceOwner mechanism. The other calls here - * are for backend-wide state. + * Resources that can be associated with individual queries are handled + * by the ResourceOwner mechanism. The other calls here are for + * backend-wide state. */ smgrDoPendingDeletes(true); @@ -1342,7 +1347,8 @@ CommitTransaction(void) * after relcache references are dropped (see comments for * AtEOXact_RelationCache), but before locks are released (if anyone * is waiting for lock on a relation we've modified, we want them to - * know about the catalog change before they start using the relation). + * know about the catalog change before they start using the + * relation). */ AtEOXact_Inval(true); @@ -1428,11 +1434,12 @@ AbortTransaction(void) /* * Reset user id which might have been changed transiently. We cannot - * use s->currentUser, but must get the session userid from miscinit.c. + * use s->currentUser, but must get the session userid from + * miscinit.c. * * (Note: it is not necessary to restore session authorization here * because that can only be changed via GUC, and GUC will take care of - * rolling it back if need be. However, an error within a SECURITY + * rolling it back if need be. However, an error within a SECURITY * DEFINER function could send control here with the wrong current * userid.) */ @@ -1443,7 +1450,7 @@ AbortTransaction(void) */ DeferredTriggerAbortXact(); AtAbort_Portals(); - AtEOXact_LargeObject(false); /* 'false' means it's abort */ + AtEOXact_LargeObject(false); /* 'false' means it's abort */ AtAbort_Notify(); AtEOXact_UpdatePasswordFile(false); @@ -1523,7 +1530,7 @@ CleanupTransaction(void) */ AtCleanup_Portals(); /* now safe to release portal memory */ - CurrentResourceOwner = NULL; /* and resource owner */ + CurrentResourceOwner = NULL; /* and resource owner */ ResourceOwnerDelete(TopTransactionResourceOwner); s->curTransactionOwner = NULL; CurTransactionResourceOwner = NULL; @@ -1561,9 +1568,10 @@ StartTransactionCommand(void) break; /* - * This is the case when we are somewhere in a transaction block - * and about to start a new command. For now we do nothing - * but someday we may do command-local resource initialization. + * This is the case when we are somewhere in a transaction + * block and about to start a new command. For now we do + * nothing but someday we may do command-local resource + * initialization. */ case TBLOCK_INPROGRESS: case TBLOCK_SUBINPROGRESS: @@ -1616,8 +1624,8 @@ CommitTransactionCommand(void) /* * This shouldn't happen, because it means the previous * StartTransactionCommand didn't set the STARTED state - * appropriately, or we didn't manage previous pending - * abort states. + * appropriately, or we didn't manage previous pending abort + * states. */ case TBLOCK_DEFAULT: case TBLOCK_SUBABORT_PENDING: @@ -1689,19 +1697,21 @@ CommitTransactionCommand(void) break; /* - * Ditto, but in a subtransaction. AbortOutOfAnyTransaction + * Ditto, but in a subtransaction. AbortOutOfAnyTransaction * will do the dirty work. */ case TBLOCK_SUBENDABORT_ALL: AbortOutOfAnyTransaction(); - s = CurrentTransactionState; /* changed by AbortOutOfAnyTransaction */ + s = CurrentTransactionState; /* changed by + * AbortOutOfAnyTransaction + * */ /* AbortOutOfAnyTransaction sets the blockState */ break; /* * We were just issued a SAVEPOINT inside a transaction block. - * Start a subtransaction. (DefineSavepoint already - * did PushTransaction, so as to have someplace to put the + * Start a subtransaction. (DefineSavepoint already did + * PushTransaction, so as to have someplace to put the * SUBBEGIN state.) */ case TBLOCK_SUBBEGIN: @@ -1720,14 +1730,15 @@ CommitTransactionCommand(void) * We were issued a RELEASE command, so we end the current * subtransaction and return to the parent transaction. * - * Since RELEASE can exit multiple levels of subtransaction, - * we must loop here until we get out of all SUBEND'ed levels. + * Since RELEASE can exit multiple levels of subtransaction, we + * must loop here until we get out of all SUBEND'ed levels. */ case TBLOCK_SUBEND: - do { + do + { CommitSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ } while (s->blockState == TBLOCK_SUBEND); break; @@ -1738,25 +1749,26 @@ CommitTransactionCommand(void) break; /* - * The current subtransaction is ending. Do the equivalent - * of a ROLLBACK TO followed by a RELEASE command. + * The current subtransaction is ending. Do the equivalent of + * a ROLLBACK TO followed by a RELEASE command. */ case TBLOCK_SUBENDABORT_RELEASE: CleanupAbortedSubTransactions(false); break; /* - * The current subtransaction is ending due to a ROLLBACK - * TO command, so close all savepoints up to the target - * level. When finished, recreate the savepoint. + * The current subtransaction is ending due to a ROLLBACK TO + * command, so close all savepoints up to the target level. + * When finished, recreate the savepoint. */ case TBLOCK_SUBENDABORT: { - char *name = CleanupAbortedSubTransactions(true); + char *name = CleanupAbortedSubTransactions(true); Assert(PointerIsValid(name)); DefineSavepoint(name); - s = CurrentTransactionState; /* changed by DefineSavepoint */ + s = CurrentTransactionState; /* changed by + * DefineSavepoint */ pfree(name); /* This is the same as TBLOCK_SUBBEGIN case */ @@ -1780,8 +1792,8 @@ static char * CleanupAbortedSubTransactions(bool returnName) { TransactionState s = CurrentTransactionState; - char *name = NULL; - + char *name = NULL; + AssertState(PointerIsValid(s->parent)); Assert(s->parent->blockState == TBLOCK_SUBINPROGRESS || s->parent->blockState == TBLOCK_INPROGRESS || @@ -1798,7 +1810,7 @@ CleanupAbortedSubTransactions(bool returnName) CleanupSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ while (s->blockState == TBLOCK_SUBABORT_PENDING) { @@ -1827,9 +1839,9 @@ AbortCurrentTransaction(void) switch (s->blockState) { - /* - * we aren't in a transaction, so we do nothing. - */ + /* + * we aren't in a transaction, so we do nothing. + */ case TBLOCK_DEFAULT: break; @@ -1856,10 +1868,10 @@ AbortCurrentTransaction(void) break; /* - * This is the case when we are somewhere in a transaction block - * and we've gotten a failure, so we abort the transaction and - * set up the persistent ABORT state. We will stay in ABORT - * until we get an "END TRANSACTION". + * This is the case when we are somewhere in a transaction + * block and we've gotten a failure, so we abort the + * transaction and set up the persistent ABORT state. We will + * stay in ABORT until we get an "END TRANSACTION". */ case TBLOCK_INPROGRESS: AbortTransaction(); @@ -1900,8 +1912,8 @@ AbortCurrentTransaction(void) break; /* - * If we are just starting a subtransaction, put it - * in aborted state. + * If we are just starting a subtransaction, put it in aborted + * state. */ case TBLOCK_SUBBEGIN: StartAbortedSubTransaction(); @@ -1914,8 +1926,8 @@ AbortCurrentTransaction(void) break; /* - * If we are aborting an ending transaction, - * we have to abort the parent transaction too. + * If we are aborting an ending transaction, we have to abort + * the parent transaction too. */ case TBLOCK_SUBEND: case TBLOCK_SUBABORT_PENDING: @@ -1924,7 +1936,7 @@ AbortCurrentTransaction(void) PopTransaction(); s = CurrentTransactionState; /* changed by pop */ Assert(s->blockState != TBLOCK_SUBEND && - s->blockState != TBLOCK_SUBENDABORT); + s->blockState != TBLOCK_SUBENDABORT); AbortCurrentTransaction(); break; @@ -1937,13 +1949,13 @@ AbortCurrentTransaction(void) PopTransaction(); s = CurrentTransactionState; /* changed by pop */ Assert(s->blockState != TBLOCK_SUBEND && - s->blockState != TBLOCK_SUBENDABORT); + s->blockState != TBLOCK_SUBENDABORT); AbortCurrentTransaction(); break; /* - * We are already aborting the whole transaction tree. - * Do nothing, CommitTransactionCommand will call + * We are already aborting the whole transaction tree. Do + * nothing, CommitTransactionCommand will call * AbortOutOfAnyTransaction and set things straight. */ case TBLOCK_SUBENDABORT_ALL: @@ -2068,8 +2080,8 @@ bool IsInTransactionChain(void *stmtNode) { /* - * Return true on same conditions that would make PreventTransactionChain - * error out + * Return true on same conditions that would make + * PreventTransactionChain error out */ if (IsTransactionBlock()) return true; @@ -2097,8 +2109,8 @@ IsInTransactionChain(void *stmtNode) * (mainly because it's easier to control the order that way, where needed). * * At transaction end, the callback occurs post-commit or post-abort, so the - * callback functions can only do noncritical cleanup. At subtransaction - * start, the callback is called when the subtransaction has finished + * callback functions can only do noncritical cleanup. At subtransaction + * start, the callback is called when the subtransaction has finished * initializing. */ void @@ -2141,9 +2153,7 @@ CallXactCallbacks(XactEvent event, TransactionId parentXid) XactCallbackItem *item; for (item = Xact_callbacks; item; item = item->next) - { (*item->callback) (event, parentXid, item->arg); - } } @@ -2164,8 +2174,8 @@ BeginTransactionBlock(void) switch (s->blockState) { /* - * We are not inside a transaction block, so allow one - * to begin. + * We are not inside a transaction block, so allow one to + * begin. */ case TBLOCK_STARTED: s->blockState = TBLOCK_BEGIN; @@ -2180,7 +2190,7 @@ BeginTransactionBlock(void) case TBLOCK_SUBABORT: ereport(WARNING, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("there is already a transaction in progress"))); + errmsg("there is already a transaction in progress"))); break; /* These cases are invalid. Reject them altogether. */ @@ -2215,12 +2225,13 @@ EndTransactionBlock(void) switch (s->blockState) { - /* - * We are in a transaction block which should commit when we - * get to the upcoming CommitTransactionCommand() so we set the - * state to "END". CommitTransactionCommand() will recognize this - * and commit the transaction and return us to the default state. - */ + /* + * We are in a transaction block which should commit when we + * get to the upcoming CommitTransactionCommand() so we set + * the state to "END". CommitTransactionCommand() will + * recognize this and commit the transaction and return us to + * the default state. + */ case TBLOCK_INPROGRESS: case TBLOCK_SUBINPROGRESS: s->blockState = TBLOCK_END; @@ -2229,30 +2240,31 @@ EndTransactionBlock(void) /* * We are in a transaction block which aborted. Since the - * AbortTransaction() was already done, we need only - * change to the special "END ABORT" state. The upcoming - * CommitTransactionCommand() will recognise this and then put us - * back in the default state. + * AbortTransaction() was already done, we need only change to + * the special "END ABORT" state. The upcoming + * CommitTransactionCommand() will recognise this and then put + * us back in the default state. */ case TBLOCK_ABORT: s->blockState = TBLOCK_ENDABORT; break; /* - * Here we are inside an aborted subtransaction. Go to the "abort - * the whole tree" state so that CommitTransactionCommand() calls - * AbortOutOfAnyTransaction. + * Here we are inside an aborted subtransaction. Go to the + * "abort the whole tree" state so that + * CommitTransactionCommand() calls AbortOutOfAnyTransaction. */ case TBLOCK_SUBABORT: s->blockState = TBLOCK_SUBENDABORT_ALL; break; case TBLOCK_STARTED: + /* - * here, the user issued COMMIT when not inside a - * transaction. Issue a WARNING and go to abort state. The - * upcoming call to CommitTransactionCommand() will then put us - * back into the default state. + * here, the user issued COMMIT when not inside a transaction. + * Issue a WARNING and go to abort state. The upcoming call + * to CommitTransactionCommand() will then put us back into + * the default state. */ ereport(WARNING, (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION), @@ -2303,11 +2315,10 @@ UserAbortTransactionBlock(void) break; /* - * We are inside a failed subtransaction and we got an - * abort command from the user. Abort processing is already - * done, so go to the "abort all" state and - * CommitTransactionCommand will call AbortOutOfAnyTransaction - * to set things straight. + * We are inside a failed subtransaction and we got an abort + * command from the user. Abort processing is already done, + * so go to the "abort all" state and CommitTransactionCommand + * will call AbortOutOfAnyTransaction to set things straight. */ case TBLOCK_SUBABORT: s->blockState = TBLOCK_SUBENDABORT_ALL; @@ -2325,7 +2336,7 @@ UserAbortTransactionBlock(void) break; /* - * We are inside a subtransaction. Abort the current + * We are inside a subtransaction. Abort the current * subtransaction and go to the "abort all" state, so * CommitTransactionCommand will call AbortOutOfAnyTransaction * to set things straight. @@ -2373,7 +2384,7 @@ UserAbortTransactionBlock(void) void DefineSavepoint(char *name) { - TransactionState s = CurrentTransactionState; + TransactionState s = CurrentTransactionState; switch (s->blockState) { @@ -2381,11 +2392,12 @@ DefineSavepoint(char *name) case TBLOCK_SUBINPROGRESS: /* Normal subtransaction start */ PushTransaction(); - s = CurrentTransactionState; /* changed by push */ + s = CurrentTransactionState; /* changed by push */ + /* * Note that we are allocating the savepoint name in the - * parent transaction's CurTransactionContext, since we - * don't yet have a transaction context for the new guy. + * parent transaction's CurTransactionContext, since we don't + * yet have a transaction context for the new guy. */ s->name = MemoryContextStrdup(CurTransactionContext, name); s->blockState = TBLOCK_SUBBEGIN; @@ -2413,16 +2425,16 @@ DefineSavepoint(char *name) /* * ReleaseSavepoint - * This executes a RELEASE command. + * This executes a RELEASE command. */ void ReleaseSavepoint(List *options) { - TransactionState s = CurrentTransactionState; + TransactionState s = CurrentTransactionState; TransactionState target, - xact; - ListCell *cell; - char *name = NULL; + xact; + ListCell *cell; + char *name = NULL; /* * Check valid block state transaction status. @@ -2437,8 +2449,8 @@ ReleaseSavepoint(List *options) break; /* - * We are in a non-aborted subtransaction. This is - * the only valid case. + * We are in a non-aborted subtransaction. This is the only + * valid case. */ case TBLOCK_SUBINPROGRESS: break; @@ -2461,9 +2473,9 @@ ReleaseSavepoint(List *options) break; } - foreach (cell, options) + foreach(cell, options) { - DefElem *elem = lfirst(cell); + DefElem *elem = lfirst(cell); if (strcmp(elem->defname, "savepoint_name") == 0) name = strVal(elem->arg); @@ -2490,8 +2502,8 @@ ReleaseSavepoint(List *options) /* * Mark "commit pending" all subtransactions up to the target - * subtransaction. The actual commits will happen when control - * gets to CommitTransactionCommand. + * subtransaction. The actual commits will happen when control gets + * to CommitTransactionCommand. */ xact = CurrentTransactionState; for (;;) @@ -2507,23 +2519,23 @@ ReleaseSavepoint(List *options) /* * RollbackToSavepoint - * This executes a ROLLBACK TO <savepoint> command. + * This executes a ROLLBACK TO <savepoint> command. */ void RollbackToSavepoint(List *options) { TransactionState s = CurrentTransactionState; TransactionState target, - xact; - ListCell *cell; - char *name = NULL; + xact; + ListCell *cell; + char *name = NULL; switch (s->blockState) { - /* - * We can't rollback to a savepoint if there is no saveopint - * defined. - */ + /* + * We can't rollback to a savepoint if there is no saveopint + * defined. + */ case TBLOCK_ABORT: case TBLOCK_INPROGRESS: ereport(ERROR, @@ -2536,9 +2548,10 @@ RollbackToSavepoint(List *options) */ case TBLOCK_SUBABORT: case TBLOCK_SUBINPROGRESS: + /* - * Have to do AbortSubTransaction, but first check - * if this is the right subtransaction + * Have to do AbortSubTransaction, but first check if this is + * the right subtransaction */ break; @@ -2559,9 +2572,9 @@ RollbackToSavepoint(List *options) break; } - foreach (cell, options) + foreach(cell, options) { - DefElem *elem = lfirst(cell); + DefElem *elem = lfirst(cell); if (strcmp(elem->defname, "savepoint_name") == 0) name = strVal(elem->arg); @@ -2597,7 +2610,7 @@ RollbackToSavepoint(List *options) /* * Mark "abort pending" all subtransactions up to the target - * subtransaction. (Except the current subtransaction!) + * subtransaction. (Except the current subtransaction!) */ xact = CurrentTransactionState; @@ -2623,7 +2636,7 @@ RollbackToSavepoint(List *options) void BeginInternalSubTransaction(char *name) { - TransactionState s = CurrentTransactionState; + TransactionState s = CurrentTransactionState; switch (s->blockState) { @@ -2632,11 +2645,12 @@ BeginInternalSubTransaction(char *name) case TBLOCK_SUBINPROGRESS: /* Normal subtransaction start */ PushTransaction(); - s = CurrentTransactionState; /* changed by push */ + s = CurrentTransactionState; /* changed by push */ + /* * Note that we are allocating the savepoint name in the - * parent transaction's CurTransactionContext, since we - * don't yet have a transaction context for the new guy. + * parent transaction's CurTransactionContext, since we don't + * yet have a transaction context for the new guy. */ if (name) s->name = MemoryContextStrdup(CurTransactionContext, name); @@ -2698,7 +2712,7 @@ RollbackAndReleaseCurrentSubTransaction(void) switch (s->blockState) { - /* Must be in a subtransaction */ + /* Must be in a subtransaction */ case TBLOCK_SUBABORT: case TBLOCK_SUBINPROGRESS: break; @@ -2748,7 +2762,8 @@ AbortOutOfAnyTransaction(void) /* * Get out of any transaction or nested transaction */ - do { + do + { switch (s->blockState) { case TBLOCK_DEFAULT: @@ -2770,21 +2785,26 @@ AbortOutOfAnyTransaction(void) s->blockState = TBLOCK_DEFAULT; break; case TBLOCK_SUBBEGIN: + /* - * We didn't get as far as starting the subxact, so there's - * nothing to abort. Just pop back to parent. + * We didn't get as far as starting the subxact, so + * there's nothing to abort. Just pop back to parent. */ PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ break; case TBLOCK_SUBINPROGRESS: case TBLOCK_SUBEND: case TBLOCK_SUBABORT_PENDING: - /* In a subtransaction, so clean it up and abort parent too */ + + /* + * In a subtransaction, so clean it up and abort parent + * too + */ AbortSubTransaction(); CleanupSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ break; case TBLOCK_SUBABORT: case TBLOCK_SUBENDABORT_ALL: @@ -2793,7 +2813,7 @@ AbortOutOfAnyTransaction(void) /* As above, but AbortSubTransaction already done */ CleanupSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ break; } } while (s->blockState != TBLOCK_DEFAULT); @@ -2819,7 +2839,7 @@ CommitTransactionToLevel(int level) { CommitSubTransaction(); PopTransaction(); - s = CurrentTransactionState; /* changed by pop */ + s = CurrentTransactionState; /* changed by pop */ Assert(s->state == TRANS_INPROGRESS); } } @@ -2840,7 +2860,7 @@ IsTransactionBlock(void) /* * IsTransactionOrTransactionBlock --- are we within either a transaction - * or a transaction block? (The backend is only really "idle" when this + * or a transaction block? (The backend is only really "idle" when this * returns false.) * * This should match up with IsTransactionBlock and IsTransactionState. @@ -2928,9 +2948,10 @@ StartSubTransaction(void) /* * Generate a new Xid and record it in pg_subtrans. NB: we must make - * the subtrans entry BEFORE the Xid appears anywhere in shared storage, - * such as in the lock table; because until it's made the Xid may not - * appear to be "running" to other backends. See GetNewTransactionId. + * the subtrans entry BEFORE the Xid appears anywhere in shared + * storage, such as in the lock table; because until it's made the Xid + * may not appear to be "running" to other backends. See + * GetNewTransactionId. */ s->transactionIdData = GetNewTransactionId(true); @@ -2943,7 +2964,7 @@ StartSubTransaction(void) */ s->currentUser = GetUserId(); s->prevXactReadOnly = XactReadOnly; - + /* * Initialize other subsystems for new subtransaction */ @@ -2954,7 +2975,7 @@ StartSubTransaction(void) s->state = TRANS_INPROGRESS; /* - * Call start-of-subxact callbacks + * Call start-of-subxact callbacks */ CallXactCallbacks(XACT_EVENT_START_SUB, s->parent->transactionIdData); @@ -3020,9 +3041,9 @@ CommitSubTransaction(void) s->parent->transactionIdData); /* - * We need to restore the upper transaction's read-only state, - * in case the upper is read-write while the child is read-only; - * GUC will incorrectly think it should leave the child state in place. + * We need to restore the upper transaction's read-only state, in case + * the upper is read-write while the child is read-only; GUC will + * incorrectly think it should leave the child state in place. */ XactReadOnly = s->prevXactReadOnly; @@ -3117,14 +3138,16 @@ AbortSubTransaction(void) /* * Reset user id which might have been changed transiently. Here we * want to restore to the userid that was current at subxact entry. - * (As in AbortTransaction, we need not worry about the session userid.) + * (As in AbortTransaction, we need not worry about the session + * userid.) * * Must do this after AtEOXact_GUC to handle the case where we entered * the subxact inside a SECURITY DEFINER function (hence current and * session userids were different) and then session auth was changed - * inside the subxact. GUC will reset both current and session userids - * to the entry-time session userid. This is right in every other - * scenario so it seems simplest to let GUC do that and fix it here. + * inside the subxact. GUC will reset both current and session + * userids to the entry-time session userid. This is right in every + * other scenario so it seems simplest to let GUC do that and fix it + * here. */ SetUserId(s->currentUser); @@ -3168,11 +3191,11 @@ CleanupSubTransaction(void) * StartAbortedSubTransaction * * This function is used to start a subtransaction and put it immediately - * into aborted state. The end result should be equivalent to + * into aborted state. The end result should be equivalent to * StartSubTransaction immediately followed by AbortSubTransaction. * The reason we don't implement it just that way is that many of the backend * modules aren't designed to handle starting a subtransaction when not - * inside a valid transaction. Rather than making them all capable of + * inside a valid transaction. Rather than making them all capable of * doing that, we just omit the paired start and abort calls in this path. */ static void @@ -3195,9 +3218,10 @@ StartAbortedSubTransaction(void) /* Make sure currentUser is reasonably valid */ Assert(s->parent != NULL); s->currentUser = s->parent->currentUser; - + /* - * Initialize only what has to be there for CleanupSubTransaction to work. + * Initialize only what has to be there for CleanupSubTransaction to + * work. */ AtSubStart_Memory(); AtSubStart_ResourceOwner(); @@ -3219,8 +3243,8 @@ StartAbortedSubTransaction(void) static void PushTransaction(void) { - TransactionState p = CurrentTransactionState; - TransactionState s; + TransactionState p = CurrentTransactionState; + TransactionState s; /* * We keep subtransaction state nodes in TopTransactionContext. @@ -3315,7 +3339,7 @@ ShowTransactionStateRec(TransactionState s) /* use ereport to suppress computation if msg will not be printed */ ereport(DEBUG2, (errmsg_internal("name: %s; blockState: %13s; state: %7s, xid/cid: %u/%02u, nestlvl: %d, children: %s", - PointerIsValid(s->name) ? s->name : "unnamed", + PointerIsValid(s->name) ? s->name : "unnamed", BlockStateAsString(s->blockState), TransStateAsString(s->state), (unsigned int) s->transactionIdData, @@ -3393,7 +3417,7 @@ TransStateAsString(TransState state) /* * xactGetCommittedChildren * - * Gets the list of committed children of the current transaction. The return + * Gets the list of committed children of the current transaction. The return * value is the number of child transactions. *children is set to point to a * palloc'd array of TransactionIds. If there are no subxacts, *children is * set to NULL. @@ -3401,10 +3425,10 @@ TransStateAsString(TransState state) int xactGetCommittedChildren(TransactionId **ptr) { - TransactionState s = CurrentTransactionState; - int nchildren; - TransactionId *children; - ListCell *p; + TransactionState s = CurrentTransactionState; + int nchildren; + TransactionId *children; + ListCell *p; nchildren = list_length(s->childXids); if (nchildren == 0) @@ -3438,12 +3462,12 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record) if (info == XLOG_XACT_COMMIT) { xl_xact_commit *xlrec = (xl_xact_commit *) XLogRecGetData(record); - int i; + int i; TransactionIdCommit(record->xl_xid); /* Mark committed subtransactions as committed */ TransactionIdCommitTree(xlrec->nsubxacts, - (TransactionId *) &(xlrec->xnodes[xlrec->nrels])); + (TransactionId *) &(xlrec->xnodes[xlrec->nrels])); /* Make sure files supposed to be dropped are dropped */ for (i = 0; i < xlrec->nrels; i++) { @@ -3454,12 +3478,12 @@ xact_redo(XLogRecPtr lsn, XLogRecord *record) else if (info == XLOG_XACT_ABORT) { xl_xact_abort *xlrec = (xl_xact_abort *) XLogRecGetData(record); - int i; + int i; TransactionIdAbort(record->xl_xid); /* mark subtransactions as aborted */ TransactionIdAbortTree(xlrec->nsubxacts, - (TransactionId *) &(xlrec->xnodes[xlrec->nrels])); + (TransactionId *) &(xlrec->xnodes[xlrec->nrels])); /* Make sure files supposed to be dropped are dropped */ for (i = 0; i < xlrec->nrels; i++) { @@ -3486,7 +3510,7 @@ void xact_desc(char *buf, uint8 xl_info, char *rec) { uint8 info = xl_info & ~XLR_INFO_MASK; - int i; + int i; if (info == XLOG_XACT_COMMIT) { @@ -3502,6 +3526,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) for (i = 0; i < xlrec->nrels; i++) { RelFileNode rnode = xlrec->xnodes[i]; + sprintf(buf + strlen(buf), " %u/%u/%u", rnode.spcNode, rnode.dbNode, rnode.relNode); } @@ -3509,7 +3534,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) if (xlrec->nsubxacts > 0) { TransactionId *xacts = (TransactionId *) - &xlrec->xnodes[xlrec->nrels]; + &xlrec->xnodes[xlrec->nrels]; sprintf(buf + strlen(buf), "; subxacts:"); for (i = 0; i < xlrec->nsubxacts; i++) @@ -3530,6 +3555,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) for (i = 0; i < xlrec->nrels; i++) { RelFileNode rnode = xlrec->xnodes[i]; + sprintf(buf + strlen(buf), " %u/%u/%u", rnode.spcNode, rnode.dbNode, rnode.relNode); } @@ -3537,7 +3563,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) if (xlrec->nsubxacts > 0) { TransactionId *xacts = (TransactionId *) - &xlrec->xnodes[xlrec->nrels]; + &xlrec->xnodes[xlrec->nrels]; sprintf(buf + strlen(buf), "; subxacts:"); for (i = 0; i < xlrec->nsubxacts; i++) @@ -3549,7 +3575,7 @@ xact_desc(char *buf, uint8 xl_info, char *rec) } void -XactPushRollback(void (*func) (void *), void *data) + XactPushRollback(void (*func) (void *), void *data) { #ifdef XLOG_II if (_RollbackFunc != NULL) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 309f17a83fc..e65c109f665 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.165 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.166 2004/08/29 05:06:40 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -128,26 +128,28 @@ TimeLineID ThisTimeLineID = 0; /* Are we doing recovery from XLOG? */ bool InRecovery = false; + /* Are we recovering using offline XLOG archives? */ -static bool InArchiveRecovery = false; +static bool InArchiveRecovery = false; + /* Was the last xlog file restored from archive, or local? */ -static bool restoredFromArchive = false; +static bool restoredFromArchive = false; /* options taken from recovery.conf */ static char *recoveryRestoreCommand = NULL; static bool recoveryTarget = false; static bool recoveryTargetExact = false; static bool recoveryTargetInclusive = true; -static TransactionId recoveryTargetXid; -static time_t recoveryTargetTime; +static TransactionId recoveryTargetXid; +static time_t recoveryTargetTime; /* if recoveryStopsHere returns true, it saves actual stop xid/time here */ -static TransactionId recoveryStopXid; -static time_t recoveryStopTime; -static bool recoveryStopAfter; +static TransactionId recoveryStopXid; +static time_t recoveryStopTime; +static bool recoveryStopAfter; /* constraint set by read_backup_label */ -static XLogRecPtr recoveryMinXlogOffset = { 0, 0 }; +static XLogRecPtr recoveryMinXlogOffset = {0, 0}; /* * During normal operation, the only timeline we care about is ThisTimeLineID. @@ -161,7 +163,7 @@ static XLogRecPtr recoveryMinXlogOffset = { 0, 0 }; * * expectedTLIs: an integer list of recoveryTargetTLI and the TLIs of * its known parents, newest first (so recoveryTargetTLI is always the - * first list member). Only these TLIs are expected to be seen in the WAL + * first list member). Only these TLIs are expected to be seen in the WAL * segments we read, and indeed only these TLIs will be considered as * candidate WAL files to open at all. * @@ -171,9 +173,9 @@ static XLogRecPtr recoveryMinXlogOffset = { 0, 0 }; * file was created.) During a sequential scan we do not allow this value * to decrease. */ -static TimeLineID recoveryTargetTLI; -static List *expectedTLIs; -static TimeLineID curFileTLI; +static TimeLineID recoveryTargetTLI; +static List *expectedTLIs; +static TimeLineID curFileTLI; /* * MyLastRecPtr points to the start of the last XLOG record inserted by the @@ -373,7 +375,7 @@ static ControlFileData *ControlFile = NULL; /* File path names */ -char XLogDir[MAXPGPATH]; +char XLogDir[MAXPGPATH]; static char ControlFilePath[MAXPGPATH]; /* @@ -422,7 +424,7 @@ static bool XLogArchiveIsDone(const char *xlog); static void XLogArchiveCleanup(const char *xlog); static void readRecoveryCommandFile(void); static void exitArchiveRecovery(TimeLineID endTLI, - uint32 endLogId, uint32 endLogSeg); + uint32 endLogId, uint32 endLogSeg); static bool recoveryStopsHere(XLogRecord *record, bool *includeThis); static bool AdvanceXLInsertBuffer(void); @@ -435,7 +437,7 @@ static bool InstallXLogFileSegment(uint32 log, uint32 seg, char *tmppath, static int XLogFileOpen(uint32 log, uint32 seg); static int XLogFileRead(uint32 log, uint32 seg, int emode); static bool RestoreArchivedFile(char *path, const char *xlogfname, - const char *recovername, off_t expectedSize); + const char *recovername, off_t expectedSize); static void PreallocXlogFiles(XLogRecPtr endptr); static void MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr); static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer); @@ -447,12 +449,13 @@ static List *readTimeLineHistory(TimeLineID targetTLI); static bool existsTimeLineHistory(TimeLineID probeTLI); static TimeLineID findNewestTimeLine(TimeLineID startTLI); static void writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, - TimeLineID endTLI, - uint32 endLogId, uint32 endLogSeg); + TimeLineID endTLI, + uint32 endLogId, uint32 endLogSeg); static void WriteControlFile(void); static void ReadControlFile(void); static char *str_time(time_t tnow); static void issue_xlog_fsync(void); + #ifdef WAL_DEBUG static void xlog_outrec(char *buf, XLogRecord *record); #endif @@ -514,7 +517,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata) if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID) { RecPtr.xlogid = 0; - RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt record */ + RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt + * record */ return (RecPtr); } @@ -724,7 +728,8 @@ begin:; /* * If there isn't enough space on the current XLOG page for a record - * header, advance to the next page (leaving the unused space as zeroes). + * header, advance to the next page (leaving the unused space as + * zeroes). */ updrqst = false; freespace = INSERT_FREESPACE(Insert); @@ -895,19 +900,21 @@ static void XLogArchiveNotify(const char *xlog) { char archiveStatusPath[MAXPGPATH]; - FILE *fd; + FILE *fd; /* insert an otherwise empty file called <XLOG>.ready */ StatusFilePath(archiveStatusPath, xlog, ".ready"); fd = AllocateFile(archiveStatusPath, "w"); - if (fd == NULL) { + if (fd == NULL) + { ereport(LOG, (errcode_for_file_access(), errmsg("could not create archive status file \"%s\": %m", archiveStatusPath))); return; } - if (FreeFile(fd)) { + if (FreeFile(fd)) + { ereport(LOG, (errcode_for_file_access(), errmsg("could not write archive status file \"%s\": %m", @@ -935,7 +942,7 @@ XLogArchiveNotifySeg(uint32 log, uint32 seg) /* * XLogArchiveIsDone * - * Checks for a ".done" archive notification file. This is called when we + * Checks for a ".done" archive notification file. This is called when we * are ready to delete or recycle an old XLOG segment file. If it is okay * to delete it then return true. * @@ -958,7 +965,7 @@ XLogArchiveIsDone(const char *xlog) /* check for .ready --- this means archiver is still busy with it */ StatusFilePath(archiveStatusPath, xlog, ".ready"); if (stat(archiveStatusPath, &stat_buf) == 0) - return false; + return false; /* Race condition --- maybe archiver just finished, so recheck */ StatusFilePath(archiveStatusPath, xlog, ".done"); @@ -978,7 +985,7 @@ XLogArchiveIsDone(const char *xlog) static void XLogArchiveCleanup(const char *xlog) { - char archiveStatusPath[MAXPGPATH]; + char archiveStatusPath[MAXPGPATH]; /* Remove the .done file */ StatusFilePath(archiveStatusPath, xlog, ".done"); @@ -1267,8 +1274,8 @@ XLogWrite(XLogwrtRqst WriteRqst) issue_xlog_fsync(); LogwrtResult.Flush = LogwrtResult.Write; /* end of current page */ - if (XLogArchivingActive()) - XLogArchiveNotifySeg(openLogId, openLogSeg); + if (XLogArchivingActive()) + XLogArchiveNotifySeg(openLogId, openLogSeg); } if (ispartialpage) @@ -1552,7 +1559,7 @@ XLogFileInit(uint32 log, uint32 seg, ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write to file \"%s\": %m", tmppath))); + errmsg("could not write to file \"%s\": %m", tmppath))); } } @@ -1591,8 +1598,8 @@ XLogFileInit(uint32 log, uint32 seg, if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); return (fd); } @@ -1606,7 +1613,7 @@ XLogFileInit(uint32 log, uint32 seg, * a different timeline) * * Currently this is only used during recovery, and so there are no locking - * considerations. But we should be just as tense as XLogFileInit to avoid + * considerations. But we should be just as tense as XLogFileInit to avoid * emplacing a bogus file. */ static void @@ -1660,7 +1667,7 @@ XLogFileCopy(uint32 log, uint32 seg, errmsg("could not read file \"%s\": %m", path))); else ereport(PANIC, - (errmsg("insufficient data in file \"%s\"", path))); + (errmsg("insufficient data in file \"%s\"", path))); } errno = 0; if ((int) write(fd, buffer, sizeof(buffer)) != (int) sizeof(buffer)) @@ -1677,7 +1684,7 @@ XLogFileCopy(uint32 log, uint32 seg, ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write to file \"%s\": %m", tmppath))); + errmsg("could not write to file \"%s\": %m", tmppath))); } } @@ -1805,8 +1812,8 @@ XLogFileOpen(uint32 log, uint32 seg) if (fd < 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); return fd; } @@ -1823,11 +1830,11 @@ XLogFileRead(uint32 log, uint32 seg, int emode) int fd; /* - * Loop looking for a suitable timeline ID: we might need to - * read any of the timelines listed in expectedTLIs. + * Loop looking for a suitable timeline ID: we might need to read any + * of the timelines listed in expectedTLIs. * - * We expect curFileTLI on entry to be the TLI of the preceding file - * in sequence, or 0 if there was no predecessor. We do not allow + * We expect curFileTLI on entry to be the TLI of the preceding file in + * sequence, or 0 if there was no predecessor. We do not allow * curFileTLI to go backwards; this prevents us from picking up the * wrong file when a parent timeline extends to higher segment numbers * than the child we want to read. @@ -1868,8 +1875,8 @@ XLogFileRead(uint32 log, uint32 seg, int emode) errno = ENOENT; ereport(emode, (errcode_for_file_access(), - errmsg("could not open file \"%s\" (log file %u, segment %u): %m", - path, log, seg))); + errmsg("could not open file \"%s\" (log file %u, segment %u): %m", + path, log, seg))); return -1; } @@ -1891,36 +1898,37 @@ static bool RestoreArchivedFile(char *path, const char *xlogfname, const char *recovername, off_t expectedSize) { - char xlogpath[MAXPGPATH]; - char xlogRestoreCmd[MAXPGPATH]; - char *dp; - char *endp; + char xlogpath[MAXPGPATH]; + char xlogRestoreCmd[MAXPGPATH]; + char *dp; + char *endp; const char *sp; - int rc; + int rc; struct stat stat_buf; /* * When doing archive recovery, we always prefer an archived log file * even if a file of the same name exists in XLogDir. The reason is - * that the file in XLogDir could be an old, un-filled or partly-filled - * version that was copied and restored as part of backing up $PGDATA. + * that the file in XLogDir could be an old, un-filled or + * partly-filled version that was copied and restored as part of + * backing up $PGDATA. * - * We could try to optimize this slightly by checking the local - * copy lastchange timestamp against the archived copy, - * but we have no API to do this, nor can we guarantee that the - * lastchange timestamp was preserved correctly when we copied - * to archive. Our aim is robustness, so we elect not to do this. + * We could try to optimize this slightly by checking the local copy + * lastchange timestamp against the archived copy, but we have no API + * to do this, nor can we guarantee that the lastchange timestamp was + * preserved correctly when we copied to archive. Our aim is + * robustness, so we elect not to do this. * - * If we cannot obtain the log file from the archive, however, we - * will try to use the XLogDir file if it exists. This is so that - * we can make use of log segments that weren't yet transferred to - * the archive. + * If we cannot obtain the log file from the archive, however, we will + * try to use the XLogDir file if it exists. This is so that we can + * make use of log segments that weren't yet transferred to the + * archive. * * Notice that we don't actually overwrite any files when we copy back * from archive because the recoveryRestoreCommand may inadvertently - * restore inappropriate xlogs, or they may be corrupt, so we may - * wish to fallback to the segments remaining in current XLogDir later. - * The copy-from-archive filename is always the same, ensuring that we + * restore inappropriate xlogs, or they may be corrupt, so we may wish + * to fallback to the segments remaining in current XLogDir later. The + * copy-from-archive filename is always the same, ensuring that we * don't run out of disk space on long recoveries. */ snprintf(xlogpath, MAXPGPATH, "%s/%s", XLogDir, recovername); @@ -1961,14 +1969,14 @@ RestoreArchivedFile(char *path, const char *xlogfname, case 'p': /* %p: full path of target file */ sp++; - StrNCpy(dp, xlogpath, endp-dp); + StrNCpy(dp, xlogpath, endp - dp); make_native_path(dp); dp += strlen(dp); break; case 'f': /* %f: filename of desired file */ sp++; - StrNCpy(dp, xlogfname, endp-dp); + StrNCpy(dp, xlogfname, endp - dp); dp += strlen(dp); break; case '%': @@ -1993,7 +2001,7 @@ RestoreArchivedFile(char *path, const char *xlogfname, *dp = '\0'; ereport(DEBUG3, - (errmsg_internal("executing restore command \"%s\"", + (errmsg_internal("executing restore command \"%s\"", xlogRestoreCmd))); /* @@ -2006,9 +2014,9 @@ RestoreArchivedFile(char *path, const char *xlogfname, * command apparently succeeded, but let's make sure the file is * really there now and has the correct size. * - * XXX I made wrong-size a fatal error to ensure the DBA would - * notice it, but is that too strong? We could try to plow ahead - * with a local copy of the file ... but the problem is that there + * XXX I made wrong-size a fatal error to ensure the DBA would notice + * it, but is that too strong? We could try to plow ahead with a + * local copy of the file ... but the problem is that there * probably isn't one, and we'd incorrectly conclude we've reached * the end of WAL and we're done recovering ... */ @@ -2041,23 +2049,23 @@ RestoreArchivedFile(char *path, const char *xlogfname, } /* - * remember, we rollforward UNTIL the restore fails - * so failure here is just part of the process... - * that makes it difficult to determine whether the restore - * failed because there isn't an archive to restore, or - * because the administrator has specified the restore + * remember, we rollforward UNTIL the restore fails so failure here is + * just part of the process... that makes it difficult to determine + * whether the restore failed because there isn't an archive to + * restore, or because the administrator has specified the restore * program incorrectly. We have to assume the former. */ ereport(DEBUG1, - (errmsg("could not restore \"%s\" from archive: return code %d", - xlogfname, rc))); + (errmsg("could not restore \"%s\" from archive: return code %d", + xlogfname, rc))); /* - * if an archived file is not available, there might still be a version - * of this file in XLogDir, so return that as the filename to open. + * if an archived file is not available, there might still be a + * version of this file in XLogDir, so return that as the filename to + * open. * - * In many recovery scenarios we expect this to fail also, but - * if so that just means we've reached the end of WAL. + * In many recovery scenarios we expect this to fail also, but if so that + * just means we've reached the end of WAL. */ snprintf(path, MAXPGPATH, "%s/%s", XLogDir, xlogfname); return false; @@ -2118,24 +2126,24 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr) { /* * We ignore the timeline part of the XLOG segment identifiers in - * deciding whether a segment is still needed. This ensures that + * deciding whether a segment is still needed. This ensures that * we won't prematurely remove a segment from a parent timeline. * We could probably be a little more proactive about removing * segments of non-parent timelines, but that would be a whole lot * more complicated. * - * We use the alphanumeric sorting property of the filenames to decide - * which ones are earlier than the lastoff segment. + * We use the alphanumeric sorting property of the filenames to + * decide which ones are earlier than the lastoff segment. */ if (strlen(xlde->d_name) == 24 && strspn(xlde->d_name, "0123456789ABCDEF") == 24 && strcmp(xlde->d_name + 8, lastoff + 8) <= 0) { - bool recycle; + bool recycle; if (XLogArchivingActive()) recycle = XLogArchiveIsDone(xlde->d_name); - else + else recycle = true; if (recycle) @@ -2160,8 +2168,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr) { /* No need for any more future segments... */ ereport(LOG, - (errmsg("removing transaction log file \"%s\"", - xlde->d_name))); + (errmsg("removing transaction log file \"%s\"", + xlde->d_name))); unlink(path); } @@ -2171,8 +2179,11 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr) errno = 0; } #ifdef WIN32 - /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but - not in released version */ + + /* + * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but + * not in released version + */ if (GetLastError() == ERROR_NO_MORE_FILES) errno = 0; #endif @@ -2263,8 +2274,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode) if (!EQ_CRC64(record->xl_crc, crc)) { ereport(emode, - (errmsg("incorrect resource manager data checksum in record at %X/%X", - recptr.xlogid, recptr.xrecoff))); + (errmsg("incorrect resource manager data checksum in record at %X/%X", + recptr.xlogid, recptr.xrecoff))); return (false); } @@ -2286,8 +2297,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode) if (!EQ_CRC64(cbuf, crc)) { ereport(emode, - (errmsg("incorrect checksum of backup block %d in record at %X/%X", - i + 1, recptr.xlogid, recptr.xrecoff))); + (errmsg("incorrect checksum of backup block %d in record at %X/%X", + i + 1, recptr.xlogid, recptr.xrecoff))); return (false); } blk += sizeof(BkpBlock) + BLCKSZ; @@ -2361,12 +2372,13 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer) ereport(PANIC, (errmsg("invalid record offset at %X/%X", RecPtr->xlogid, RecPtr->xrecoff))); + /* * Since we are going to a random position in WAL, forget any - * prior state about what timeline we were in, and allow it - * to be any timeline in expectedTLIs. We also set a flag to - * allow curFileTLI to go backwards (but we can't reset that - * variable right here, since we might not change files at all). + * prior state about what timeline we were in, and allow it to be + * any timeline in expectedTLIs. We also set a flag to allow + * curFileTLI to go backwards (but we can't reset that variable + * right here, since we might not change files at all). */ lastPageTLI = 0; /* see comment in ValidXLOGHeader */ randAccess = true; /* allow curFileTLI to go backwards too */ @@ -2418,9 +2430,9 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer) if (targetRecOff == 0) { /* - * Can only get here in the continuing-from-prev-page case, because - * XRecOffIsValid eliminated the zero-page-offset case otherwise. - * Need to skip over the new page's header. + * Can only get here in the continuing-from-prev-page case, + * because XRecOffIsValid eliminated the zero-page-offset case + * otherwise. Need to skip over the new page's header. */ tmpRecPtr.xrecoff += pageHeaderSize; targetRecOff = pageHeaderSize; @@ -2631,15 +2643,15 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) ControlFile->system_identifier); ereport(emode, (errmsg("WAL file is from different system"), - errdetail("WAL file SYSID is %s, pg_control SYSID is %s", - fhdrident_str, sysident_str))); + errdetail("WAL file SYSID is %s, pg_control SYSID is %s", + fhdrident_str, sysident_str))); return false; } if (longhdr->xlp_seg_size != XLogSegSize) { ereport(emode, (errmsg("WAL file is from different system"), - errdetail("Incorrect XLOG_SEG_SIZE in page header."))); + errdetail("Incorrect XLOG_SEG_SIZE in page header."))); return false; } } @@ -2671,9 +2683,9 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) * immediate parent's TLI, we should never see TLI go backwards across * successive pages of a consistent WAL sequence. * - * Of course this check should only be applied when advancing sequentially - * across pages; therefore ReadRecord resets lastPageTLI to zero when - * going to a random page. + * Of course this check should only be applied when advancing + * sequentially across pages; therefore ReadRecord resets lastPageTLI + * to zero when going to a random page. */ if (hdr->xlp_tli < lastPageTLI) { @@ -2691,7 +2703,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode) * Try to read a timeline's history file. * * If successful, return the list of component TLIs (the given TLI followed by - * its ancestor TLIs). If we can't find the history file, assume that the + * its ancestor TLIs). If we can't find the history file, assume that the * timeline has no parents, and return a list of just the specified timeline * ID. */ @@ -2702,7 +2714,7 @@ readTimeLineHistory(TimeLineID targetTLI) char path[MAXPGPATH]; char histfname[MAXFNAMELEN]; char fline[MAXPGPATH]; - FILE *fd; + FILE *fd; if (InArchiveRecovery) { @@ -2712,7 +2724,7 @@ readTimeLineHistory(TimeLineID targetTLI) else TLHistoryFilePath(path, targetTLI); - fd = AllocateFile(path, "r"); + fd = AllocateFile(path, "r"); if (fd == NULL) { if (errno != ENOENT) @@ -2725,15 +2737,15 @@ readTimeLineHistory(TimeLineID targetTLI) result = NIL; - /* - * Parse the file... - */ - while (fgets(fline, MAXPGPATH, fd) != NULL) + /* + * Parse the file... + */ + while (fgets(fline, MAXPGPATH, fd) != NULL) { /* skip leading whitespace and check for # comment */ - char *ptr; - char *endptr; - TimeLineID tli; + char *ptr; + char *endptr; + TimeLineID tli; for (ptr = fline; *ptr; ptr++) { @@ -2754,7 +2766,7 @@ readTimeLineHistory(TimeLineID targetTLI) tli <= (TimeLineID) linitial_int(result)) ereport(FATAL, (errmsg("invalid data in history file: %s", fline), - errhint("Timeline IDs must be in increasing sequence."))); + errhint("Timeline IDs must be in increasing sequence."))); /* Build list with newest item first */ result = lcons_int((int) tli, result); @@ -2768,7 +2780,7 @@ readTimeLineHistory(TimeLineID targetTLI) targetTLI <= (TimeLineID) linitial_int(result)) ereport(FATAL, (errmsg("invalid data in history file \"%s\"", path), - errhint("Timeline IDs must be less than child timeline's ID."))); + errhint("Timeline IDs must be less than child timeline's ID."))); result = lcons_int((int) targetTLI, result); @@ -2787,7 +2799,7 @@ existsTimeLineHistory(TimeLineID probeTLI) { char path[MAXPGPATH]; char histfname[MAXFNAMELEN]; - FILE *fd; + FILE *fd; if (InArchiveRecovery) { @@ -2827,12 +2839,12 @@ findNewestTimeLine(TimeLineID startTLI) TimeLineID probeTLI; /* - * The algorithm is just to probe for the existence of timeline history - * files. XXX is it useful to allow gaps in the sequence? + * The algorithm is just to probe for the existence of timeline + * history files. XXX is it useful to allow gaps in the sequence? */ newestTLI = startTLI; - for (probeTLI = startTLI + 1; ; probeTLI++) + for (probeTLI = startTLI + 1;; probeTLI++) { if (existsTimeLineHistory(probeTLI)) { @@ -2856,7 +2868,7 @@ findNewestTimeLine(TimeLineID startTLI) * endTLI et al: ID of the last used WAL file, for annotation purposes * * Currently this is only used during recovery, and so there are no locking - * considerations. But we should be just as tense as XLogFileInit to avoid + * considerations. But we should be just as tense as XLogFileInit to avoid * emplacing a bogus file. */ static void @@ -2872,7 +2884,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, int fd; int nbytes; - Assert(newTLI > parentTLI); /* else bad selection of newTLI */ + Assert(newTLI > parentTLI); /* else bad selection of newTLI */ /* * Write into a temp file name. @@ -2932,12 +2944,16 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, * space */ unlink(tmppath); - /* if write didn't set errno, assume problem is no disk space */ + + /* + * if write didn't set errno, assume problem is no disk + * space + */ errno = save_errno ? save_errno : ENOSPC; ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write to file \"%s\": %m", tmppath))); + errmsg("could not write to file \"%s\": %m", tmppath))); } } close(srcfd); @@ -2946,8 +2962,8 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, /* * Append one line with the details of this timeline split. * - * If we did have a parent file, insert an extra newline just in case - * the parent file failed to end with one. + * If we did have a parent file, insert an extra newline just in case the + * parent file failed to end with one. */ XLogFileName(xlogfname, endTLI, endLogId, endLogSeg); @@ -2967,8 +2983,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, int save_errno = errno; /* - * If we fail to make the file, delete it to release disk - * space + * If we fail to make the file, delete it to release disk space */ unlink(tmppath); /* if write didn't set errno, assume problem is no disk space */ @@ -3215,7 +3230,7 @@ ReadControlFile(void) ereport(FATAL, (errmsg("database files are incompatible with server"), errdetail("The database cluster was initialized with XLOG_SEG_SIZE %d," - " but the server was compiled with XLOG_SEG_SIZE %d.", + " but the server was compiled with XLOG_SEG_SIZE %d.", ControlFile->xlog_seg_size, XLOG_SEG_SIZE), errhint("It looks like you need to recompile or initdb."))); if (ControlFile->nameDataLen != NAMEDATALEN) @@ -3336,7 +3351,8 @@ XLOGShmemSize(void) void XLOGShmemInit(void) { - bool foundXLog, foundCFile; + bool foundXLog, + foundCFile; /* this must agree with space requested by XLOGShmemSize() */ if (XLOGbuffers < MinXLOGbuffers) @@ -3414,16 +3430,17 @@ BootStrapXLOG(void) crc64 crc; /* - * Select a hopefully-unique system identifier code for this installation. - * We use the result of gettimeofday(), including the fractional seconds - * field, as being about as unique as we can easily get. (Think not to - * use random(), since it hasn't been seeded and there's no portable way - * to seed it other than the system clock value...) The upper half of the - * uint64 value is just the tv_sec part, while the lower half is the XOR - * of tv_sec and tv_usec. This is to ensure that we don't lose uniqueness - * unnecessarily if "uint64" is really only 32 bits wide. A person - * knowing this encoding can determine the initialization time of the - * installation, which could perhaps be useful sometimes. + * Select a hopefully-unique system identifier code for this + * installation. We use the result of gettimeofday(), including the + * fractional seconds field, as being about as unique as we can easily + * get. (Think not to use random(), since it hasn't been seeded and + * there's no portable way to seed it other than the system clock + * value...) The upper half of the uint64 value is just the tv_sec + * part, while the lower half is the XOR of tv_sec and tv_usec. This + * is to ensure that we don't lose uniqueness unnecessarily if + * "uint64" is really only 32 bits wide. A person knowing this + * encoding can determine the initialization time of the installation, + * which could perhaps be useful sometimes. */ gettimeofday(&tv, NULL); sysidentifier = ((uint64) tv.tv_sec) << 32; @@ -3492,18 +3509,18 @@ BootStrapXLOG(void) errno = ENOSPC; ereport(PANIC, (errcode_for_file_access(), - errmsg("could not write bootstrap transaction log file: %m"))); + errmsg("could not write bootstrap transaction log file: %m"))); } if (pg_fsync(openLogFile) != 0) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not fsync bootstrap transaction log file: %m"))); + errmsg("could not fsync bootstrap transaction log file: %m"))); if (close(openLogFile)) ereport(PANIC, (errcode_for_file_access(), - errmsg("could not close bootstrap transaction log file: %m"))); + errmsg("could not close bootstrap transaction log file: %m"))); openLogFile = -1; @@ -3550,37 +3567,37 @@ str_time(time_t tnow) static void readRecoveryCommandFile(void) { - char recoveryCommandFile[MAXPGPATH]; - FILE *fd; - char cmdline[MAXPGPATH]; - TimeLineID rtli = 0; - bool rtliGiven = false; - bool syntaxError = false; - - snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir); - fd = AllocateFile(recoveryCommandFile, "r"); + char recoveryCommandFile[MAXPGPATH]; + FILE *fd; + char cmdline[MAXPGPATH]; + TimeLineID rtli = 0; + bool rtliGiven = false; + bool syntaxError = false; + + snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir); + fd = AllocateFile(recoveryCommandFile, "r"); if (fd == NULL) { if (errno == ENOENT) return; /* not there, so no archive recovery */ ereport(FATAL, - (errcode_for_file_access(), + (errcode_for_file_access(), errmsg("could not open recovery command file \"%s\": %m", recoveryCommandFile))); } ereport(LOG, - (errmsg("starting archive recovery"))); + (errmsg("starting archive recovery"))); - /* - * Parse the file... - */ - while (fgets(cmdline, MAXPGPATH, fd) != NULL) + /* + * Parse the file... + */ + while (fgets(cmdline, MAXPGPATH, fd) != NULL) { /* skip leading whitespace and check for # comment */ - char *ptr; - char *tok1; - char *tok2; + char *ptr; + char *tok1; + char *tok2; for (ptr = cmdline; *ptr; ptr++) { @@ -3591,13 +3608,13 @@ readRecoveryCommandFile(void) continue; /* identify the quoted parameter value */ - tok1 = strtok(ptr, "'"); + tok1 = strtok(ptr, "'"); if (!tok1) { syntaxError = true; break; } - tok2 = strtok(NULL, "'"); + tok2 = strtok(NULL, "'"); if (!tok2) { syntaxError = true; @@ -3611,13 +3628,15 @@ readRecoveryCommandFile(void) break; } - if (strcmp(tok1,"restore_command") == 0) { + if (strcmp(tok1, "restore_command") == 0) + { recoveryRestoreCommand = pstrdup(tok2); ereport(LOG, (errmsg("restore_command = \"%s\"", recoveryRestoreCommand))); } - else if (strcmp(tok1,"recovery_target_timeline") == 0) { + else if (strcmp(tok1, "recovery_target_timeline") == 0) + { rtliGiven = true; if (strcmp(tok2, "latest") == 0) rtli = 0; @@ -3637,7 +3656,8 @@ readRecoveryCommandFile(void) ereport(LOG, (errmsg("recovery_target_timeline = latest"))); } - else if (strcmp(tok1,"recovery_target_xid") == 0) { + else if (strcmp(tok1, "recovery_target_xid") == 0) + { errno = 0; recoveryTargetXid = (TransactionId) strtoul(tok2, NULL, 0); if (errno == EINVAL || errno == ERANGE) @@ -3650,7 +3670,8 @@ readRecoveryCommandFile(void) recoveryTarget = true; recoveryTargetExact = true; } - else if (strcmp(tok1,"recovery_target_time") == 0) { + else if (strcmp(tok1, "recovery_target_time") == 0) + { /* * if recovery_target_xid specified, then this overrides * recovery_target_time @@ -3659,20 +3680,22 @@ readRecoveryCommandFile(void) continue; recoveryTarget = true; recoveryTargetExact = false; + /* - * Convert the time string given by the user to the time_t format. - * We use type abstime's input converter because we know abstime - * has the same representation as time_t. + * Convert the time string given by the user to the time_t + * format. We use type abstime's input converter because we + * know abstime has the same representation as time_t. */ recoveryTargetTime = (time_t) DatumGetAbsoluteTime(DirectFunctionCall1(abstimein, - CStringGetDatum(tok2))); + CStringGetDatum(tok2))); ereport(LOG, (errmsg("recovery_target_time = %s", - DatumGetCString(DirectFunctionCall1(abstimeout, - AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime)))))); + DatumGetCString(DirectFunctionCall1(abstimeout, + AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime)))))); } - else if (strcmp(tok1,"recovery_target_inclusive") == 0) { + else if (strcmp(tok1, "recovery_target_inclusive") == 0) + { /* * does nothing if a recovery_target is not also set */ @@ -3694,11 +3717,11 @@ readRecoveryCommandFile(void) FreeFile(fd); - if (syntaxError) - ereport(FATAL, + if (syntaxError) + ereport(FATAL, (errmsg("syntax error in recovery command file: %s", cmdline), - errhint("Lines should have the format parameter = 'value'."))); + errhint("Lines should have the format parameter = 'value'."))); /* Check that required parameters were supplied */ if (recoveryRestoreCommand == NULL) @@ -3710,10 +3733,10 @@ readRecoveryCommandFile(void) InArchiveRecovery = true; /* - * If user specified recovery_target_timeline, validate it or compute the - * "latest" value. We can't do this until after we've gotten the restore - * command and set InArchiveRecovery, because we need to fetch timeline - * history files from the archive. + * If user specified recovery_target_timeline, validate it or compute + * the "latest" value. We can't do this until after we've gotten the + * restore command and set InArchiveRecovery, because we need to fetch + * timeline history files from the archive. */ if (rtliGiven) { @@ -3722,8 +3745,8 @@ readRecoveryCommandFile(void) /* Timeline 1 does not have a history file, all else should */ if (rtli != 1 && !existsTimeLineHistory(rtli)) ereport(FATAL, - (errmsg("recovery_target_timeline %u does not exist", - rtli))); + (errmsg("recovery_target_timeline %u does not exist", + rtli))); recoveryTargetTLI = rtli; } else @@ -3740,10 +3763,10 @@ readRecoveryCommandFile(void) static void exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) { - char recoveryPath[MAXPGPATH]; - char xlogpath[MAXPGPATH]; - char recoveryCommandFile[MAXPGPATH]; - char recoveryCommandDone[MAXPGPATH]; + char recoveryPath[MAXPGPATH]; + char xlogpath[MAXPGPATH]; + char recoveryCommandFile[MAXPGPATH]; + char recoveryCommandDone[MAXPGPATH]; /* * We are no longer in archive recovery state. @@ -3751,9 +3774,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) InArchiveRecovery = false; /* - * We should have the ending log segment currently open. Verify, - * and then close it (to avoid problems on Windows with trying to - * rename or delete an open file). + * We should have the ending log segment currently open. Verify, and + * then close it (to avoid problems on Windows with trying to rename + * or delete an open file). */ Assert(readFile >= 0); Assert(readId == endLogId); @@ -3763,17 +3786,17 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) readFile = -1; /* - * If the segment was fetched from archival storage, we want to replace - * the existing xlog segment (if any) with the archival version. This - * is because whatever is in XLogDir is very possibly older than what - * we have from the archives, since it could have come from restoring - * a PGDATA backup. In any case, the archival version certainly is - * more descriptive of what our current database state is, because that - * is what we replayed from. + * If the segment was fetched from archival storage, we want to + * replace the existing xlog segment (if any) with the archival + * version. This is because whatever is in XLogDir is very possibly + * older than what we have from the archives, since it could have come + * from restoring a PGDATA backup. In any case, the archival version + * certainly is more descriptive of what our current database state + * is, because that is what we replayed from. * * Note that if we are establishing a new timeline, ThisTimeLineID is - * already set to the new value, and so we will create a new file instead - * of overwriting any existing file. + * already set to the new value, and so we will create a new file + * instead of overwriting any existing file. */ snprintf(recoveryPath, MAXPGPATH, "%s/RECOVERYXLOG", XLogDir); XLogFilePath(xlogpath, ThisTimeLineID, endLogId, endLogSeg); @@ -3798,6 +3821,7 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) * RECOVERYXLOG laying about, get rid of it. */ unlink(recoveryPath); /* ignore any error */ + /* * If we are establishing a new timeline, we have to copy data * from the last WAL segment of the old timeline to create a @@ -3809,22 +3833,22 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg) } /* - * Let's just make real sure there are not .ready or .done flags posted - * for the new segment. + * Let's just make real sure there are not .ready or .done flags + * posted for the new segment. */ XLogFileName(xlogpath, ThisTimeLineID, endLogId, endLogSeg); XLogArchiveCleanup(xlogpath); /* Get rid of any remaining recovered timeline-history file, too */ snprintf(recoveryPath, MAXPGPATH, "%s/RECOVERYHISTORY", XLogDir); - unlink(recoveryPath); /* ignore any error */ + unlink(recoveryPath); /* ignore any error */ /* - * Rename the config file out of the way, so that we don't accidentally - * re-enter archive recovery mode in a subsequent crash. + * Rename the config file out of the way, so that we don't + * accidentally re-enter archive recovery mode in a subsequent crash. */ - snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir); - snprintf(recoveryCommandDone, MAXPGPATH, "%s/recovery.done", DataDir); + snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir); + snprintf(recoveryCommandDone, MAXPGPATH, "%s/recovery.done", DataDir); unlink(recoveryCommandDone); if (rename(recoveryCommandFile, recoveryCommandDone) != 0) ereport(FATAL, @@ -3849,8 +3873,8 @@ static bool recoveryStopsHere(XLogRecord *record, bool *includeThis) { bool stopsHere; - uint8 record_info; - time_t recordXtime; + uint8 record_info; + time_t recordXtime; /* Do we have a PITR target at all? */ if (!recoveryTarget) @@ -3862,14 +3886,14 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) record_info = record->xl_info & ~XLR_INFO_MASK; if (record_info == XLOG_XACT_COMMIT) { - xl_xact_commit *recordXactCommitData; + xl_xact_commit *recordXactCommitData; recordXactCommitData = (xl_xact_commit *) XLogRecGetData(record); recordXtime = recordXactCommitData->xtime; } else if (record_info == XLOG_XACT_ABORT) { - xl_xact_abort *recordXactAbortData; + xl_xact_abort *recordXactAbortData; recordXactAbortData = (xl_xact_abort *) XLogRecGetData(record); recordXtime = recordXactAbortData->xtime; @@ -3880,14 +3904,13 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) if (recoveryTargetExact) { /* - * there can be only one transaction end record - * with this exact transactionid + * there can be only one transaction end record with this exact + * transactionid * - * when testing for an xid, we MUST test for - * equality only, since transactions are numbered - * in the order they start, not the order they - * complete. A higher numbered xid will complete - * before you about 50% of the time... + * when testing for an xid, we MUST test for equality only, since + * transactions are numbered in the order they start, not the + * order they complete. A higher numbered xid will complete before + * you about 50% of the time... */ stopsHere = (record->xl_xid == recoveryTargetXid); if (stopsHere) @@ -3896,11 +3919,9 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) else { /* - * there can be many transactions that - * share the same commit time, so - * we stop after the last one, if we are - * inclusive, or stop at the first one - * if we are exclusive + * there can be many transactions that share the same commit time, + * so we stop after the last one, if we are inclusive, or stop at + * the first one if we are exclusive */ if (recoveryTargetInclusive) stopsHere = (recordXtime > recoveryTargetTime); @@ -3921,22 +3942,22 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis) if (recoveryStopAfter) ereport(LOG, (errmsg("recovery stopping after commit of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); else ereport(LOG, (errmsg("recovery stopping before commit of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); } else { if (recoveryStopAfter) ereport(LOG, (errmsg("recovery stopping after abort of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); else ereport(LOG, (errmsg("recovery stopping before abort of transaction %u, time %s", - recoveryStopXid, str_time(recoveryStopTime)))); + recoveryStopXid, str_time(recoveryStopTime)))); } } @@ -4009,14 +4030,14 @@ StartupXLOG(void) #endif /* - * Initialize on the assumption we want to recover to the same timeline - * that's active according to pg_control. + * Initialize on the assumption we want to recover to the same + * timeline that's active according to pg_control. */ recoveryTargetTLI = ControlFile->checkPointCopy.ThisTimeLineID; /* - * Check for recovery control file, and if so set up state for - * offline recovery + * Check for recovery control file, and if so set up state for offline + * recovery */ readRecoveryCommandFile(); @@ -4029,7 +4050,7 @@ StartupXLOG(void) * timeline. */ if (!list_member_int(expectedTLIs, - (int) ControlFile->checkPointCopy.ThisTimeLineID)) + (int) ControlFile->checkPointCopy.ThisTimeLineID)) ereport(FATAL, (errmsg("requested timeline %u is not a child of database system timeline %u", recoveryTargetTLI, @@ -4038,29 +4059,30 @@ StartupXLOG(void) if (read_backup_label(&checkPointLoc)) { /* - * When a backup_label file is present, we want to roll forward from - * the checkpoint it identifies, rather than using pg_control. + * When a backup_label file is present, we want to roll forward + * from the checkpoint it identifies, rather than using + * pg_control. */ record = ReadCheckpointRecord(checkPointLoc, 0, buffer); if (record != NULL) { ereport(LOG, (errmsg("checkpoint record is at %X/%X", - checkPointLoc.xlogid, checkPointLoc.xrecoff))); + checkPointLoc.xlogid, checkPointLoc.xrecoff))); InRecovery = true; /* force recovery even if SHUTDOWNED */ } else { ereport(PANIC, - (errmsg("could not locate required checkpoint record"), - errhint("If you are not restoring from a backup, try removing $PGDATA/backup_label."))); + (errmsg("could not locate required checkpoint record"), + errhint("If you are not restoring from a backup, try removing $PGDATA/backup_label."))); } } else { /* - * Get the last valid checkpoint record. If the latest one according - * to pg_control is broken, try the next-to-last one. + * Get the last valid checkpoint record. If the latest one + * according to pg_control is broken, try the next-to-last one. */ checkPointLoc = ControlFile->checkPoint; record = ReadCheckpointRecord(checkPointLoc, 1, buffer); @@ -4068,7 +4090,7 @@ StartupXLOG(void) { ereport(LOG, (errmsg("checkpoint record is at %X/%X", - checkPointLoc.xlogid, checkPointLoc.xrecoff))); + checkPointLoc.xlogid, checkPointLoc.xrecoff))); } else { @@ -4077,13 +4099,14 @@ StartupXLOG(void) if (record != NULL) { ereport(LOG, - (errmsg("using previous checkpoint record at %X/%X", - checkPointLoc.xlogid, checkPointLoc.xrecoff))); - InRecovery = true; /* force recovery even if SHUTDOWNED */ + (errmsg("using previous checkpoint record at %X/%X", + checkPointLoc.xlogid, checkPointLoc.xrecoff))); + InRecovery = true; /* force recovery even if + * SHUTDOWNED */ } else ereport(PANIC, - (errmsg("could not locate a valid checkpoint record"))); + (errmsg("could not locate a valid checkpoint record"))); } } @@ -4108,9 +4131,9 @@ StartupXLOG(void) ShmemVariableCache->oidCount = 0; /* - * We must replay WAL entries using the same TimeLineID they were created - * under, so temporarily adopt the TLI indicated by the checkpoint (see - * also xlog_redo()). + * We must replay WAL entries using the same TimeLineID they were + * created under, so temporarily adopt the TLI indicated by the + * checkpoint (see also xlog_redo()). */ ThisTimeLineID = checkPoint.ThisTimeLineID; @@ -4123,8 +4146,8 @@ StartupXLOG(void) checkPoint.undo = RecPtr; /* - * Check whether we need to force recovery from WAL. If it appears - * to have been a clean shutdown and we did not have a recovery.conf + * Check whether we need to force recovery from WAL. If it appears to + * have been a clean shutdown and we did not have a recovery.conf * file, then assume no recovery needed. */ if (XLByteLT(checkPoint.undo, RecPtr) || @@ -4219,7 +4242,7 @@ StartupXLOG(void) */ if (recoveryStopsHere(record, &recoveryApply)) { - needNewTimeLine = true; /* see below */ + needNewTimeLine = true; /* see below */ recoveryContinue = false; if (!recoveryApply) break; @@ -4242,6 +4265,7 @@ StartupXLOG(void) record = ReadRecord(NULL, LOG, buffer); } while (record != NULL && recoveryContinue); + /* * end of main redo apply loop */ @@ -4276,7 +4300,8 @@ StartupXLOG(void) if (needNewTimeLine) /* stopped because of stop request */ ereport(FATAL, (errmsg("requested recovery stop point is before end time of backup dump"))); - else /* ran off end of WAL */ + else +/* ran off end of WAL */ ereport(FATAL, (errmsg("WAL ends before end time of backup dump"))); } @@ -4284,10 +4309,10 @@ StartupXLOG(void) /* * Consider whether we need to assign a new timeline ID. * - * If we stopped short of the end of WAL during recovery, then we - * are generating a new timeline and must assign it a unique new ID. - * Otherwise, we can just extend the timeline we were in when we - * ran out of WAL. + * If we stopped short of the end of WAL during recovery, then we are + * generating a new timeline and must assign it a unique new ID. + * Otherwise, we can just extend the timeline we were in when we ran + * out of WAL. */ if (needNewTimeLine) { @@ -4302,8 +4327,8 @@ StartupXLOG(void) XLogCtl->ThisTimeLineID = ThisTimeLineID; /* - * We are now done reading the old WAL. Turn off archive fetching - * if it was active, and make a writable copy of the last WAL segment. + * We are now done reading the old WAL. Turn off archive fetching if + * it was active, and make a writable copy of the last WAL segment. * (Note that we also have a copy of the last block of the old WAL in * readBuf; we will use that below.) */ @@ -4361,7 +4386,7 @@ StartupXLOG(void) * XLogWrite()). * * Note: it might seem we should do AdvanceXLInsertBuffer() here, but - * this is sufficient. The first actual attempt to insert a log + * this is sufficient. The first actual attempt to insert a log * record will advance the insert state. */ XLogCtl->Write.curridx = NextBufIdx(0); @@ -4434,8 +4459,8 @@ StartupXLOG(void) XLogCloseRelationCache(); /* - * Now that we've checkpointed the recovery, it's safe to - * flush old backup_label, if present. + * Now that we've checkpointed the recovery, it's safe to flush + * old backup_label, if present. */ remove_backup_label(); } @@ -4504,7 +4529,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, break; default: ereport(LOG, - (errmsg("invalid checkpoint link in backup_label file"))); + (errmsg("invalid checkpoint link in backup_label file"))); break; } return NULL; @@ -4557,7 +4582,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, { case 1: ereport(LOG, - (errmsg("invalid xl_info in primary checkpoint record"))); + (errmsg("invalid xl_info in primary checkpoint record"))); break; case 2: ereport(LOG, @@ -4576,7 +4601,7 @@ ReadCheckpointRecord(XLogRecPtr RecPtr, { case 1: ereport(LOG, - (errmsg("invalid length of primary checkpoint record"))); + (errmsg("invalid length of primary checkpoint record"))); break; case 2: ereport(LOG, @@ -4791,8 +4816,8 @@ CreateCheckPoint(bool shutdown, bool force) * so there's a risk of deadlock. Need to find a better solution. See * pgsql-hackers discussion of 17-Dec-01. * - * XXX actually, the whole UNDO code is dead code and unlikely to ever - * be revived, so the lack of a good solution here is not troubling. + * XXX actually, the whole UNDO code is dead code and unlikely to ever be + * revived, so the lack of a good solution here is not troubling. */ #ifdef NOT_USED checkPoint.undo = GetUndoRecPtr(); @@ -4919,11 +4944,11 @@ CreateCheckPoint(bool shutdown, bool force) PreallocXlogFiles(recptr); /* - * Truncate pg_subtrans if possible. We can throw away all data before - * the oldest XMIN of any running transaction. No future transaction will - * attempt to reference any pg_subtrans entry older than that (see Asserts - * in subtrans.c). During recovery, though, we mustn't do this because - * StartupSUBTRANS hasn't been called yet. + * Truncate pg_subtrans if possible. We can throw away all data + * before the oldest XMIN of any running transaction. No future + * transaction will attempt to reference any pg_subtrans entry older + * than that (see Asserts in subtrans.c). During recovery, though, we + * mustn't do this because StartupSUBTRANS hasn't been called yet. */ if (!InRecovery) TruncateSUBTRANS(GetOldestXmin(true)); @@ -4974,8 +4999,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) ShmemVariableCache->nextXid = checkPoint.nextXid; ShmemVariableCache->nextOid = checkPoint.nextOid; ShmemVariableCache->oidCount = 0; + /* - * TLI may change in a shutdown checkpoint, but it shouldn't decrease + * TLI may change in a shutdown checkpoint, but it shouldn't + * decrease */ if (checkPoint.ThisTimeLineID != ThisTimeLineID) { @@ -4984,7 +5011,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) (int) checkPoint.ThisTimeLineID)) ereport(PANIC, (errmsg("unexpected timeline ID %u (after %u) in checkpoint record", - checkPoint.ThisTimeLineID, ThisTimeLineID))); + checkPoint.ThisTimeLineID, ThisTimeLineID))); /* Following WAL records should be run with new TLI */ ThisTimeLineID = checkPoint.ThisTimeLineID; } @@ -5071,8 +5098,7 @@ xlog_outrec(char *buf, XLogRecord *record) sprintf(buf + strlen(buf), ": %s", RmgrTable[record->xl_rmid].rm_name); } - -#endif /* WAL_DEBUG */ +#endif /* WAL_DEBUG */ /* @@ -5200,7 +5226,7 @@ pg_start_backup(PG_FUNCTION_ARGS) char *backupidstr; XLogRecPtr checkpointloc; XLogRecPtr startpoint; - time_t stamp_time; + time_t stamp_time; char strfbuf[128]; char labelfilepath[MAXPGPATH]; char xlogfilename[MAXFNAMELEN]; @@ -5209,24 +5235,26 @@ pg_start_backup(PG_FUNCTION_ARGS) struct stat stat_buf; FILE *fp; - if (!superuser()) + if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to run a backup")))); backupidstr = DatumGetCString(DirectFunctionCall1(textout, - PointerGetDatum(backupid))); + PointerGetDatum(backupid))); + /* - * Force a CHECKPOINT. This is not strictly necessary, but it seems - * like a good idea to minimize the amount of past WAL needed to use the - * backup. Also, this guarantees that two successive backup runs - * will have different checkpoint positions and hence different history - * file names, even if nothing happened in between. + * Force a CHECKPOINT. This is not strictly necessary, but it seems + * like a good idea to minimize the amount of past WAL needed to use + * the backup. Also, this guarantees that two successive backup runs + * will have different checkpoint positions and hence different + * history file names, even if nothing happened in between. */ RequestCheckpoint(true); + /* * Now we need to fetch the checkpoint record location, and also its - * REDO pointer. The oldest point in WAL that would be needed to restore - * starting from the checkpoint is precisely the REDO pointer. + * REDO pointer. The oldest point in WAL that would be needed to + * restore starting from the checkpoint is precisely the REDO pointer. */ LWLockAcquire(ControlFileLock, LW_EXCLUSIVE); checkpointloc = ControlFile->checkPoint; @@ -5235,18 +5263,21 @@ pg_start_backup(PG_FUNCTION_ARGS) XLByteToSeg(startpoint, _logId, _logSeg); XLogFileName(xlogfilename, ThisTimeLineID, _logId, _logSeg); + /* - * We deliberately use strftime/localtime not the src/timezone functions, - * so that backup labels will consistently be recorded in the same - * timezone regardless of TimeZone setting. This matches elog.c's - * practice. + * We deliberately use strftime/localtime not the src/timezone + * functions, so that backup labels will consistently be recorded in + * the same timezone regardless of TimeZone setting. This matches + * elog.c's practice. */ stamp_time = time(NULL); strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&stamp_time)); + /* - * Check for existing backup label --- implies a backup is already running + * Check for existing backup label --- implies a backup is already + * running */ snprintf(labelfilepath, MAXPGPATH, "%s/backup_label", DataDir); if (stat(labelfilepath, &stat_buf) != 0) @@ -5263,6 +5294,7 @@ pg_start_backup(PG_FUNCTION_ARGS) errmsg("a backup is already in progress"), errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.", labelfilepath))); + /* * Okay, write the file */ @@ -5283,13 +5315,14 @@ pg_start_backup(PG_FUNCTION_ARGS) (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", labelfilepath))); + /* * We're done. As a convenience, return the starting WAL offset. */ snprintf(xlogfilename, sizeof(xlogfilename), "%X/%X", startpoint.xlogid, startpoint.xrecoff); result = DatumGetTextP(DirectFunctionCall1(textin, - CStringGetDatum(xlogfilename))); + CStringGetDatum(xlogfilename))); PG_RETURN_TEXT_P(result); } @@ -5308,7 +5341,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) XLogCtlInsert *Insert = &XLogCtl->Insert; XLogRecPtr startpoint; XLogRecPtr stoppoint; - time_t stamp_time; + time_t stamp_time; char strfbuf[128]; char labelfilepath[MAXPGPATH]; char histfilepath[MAXPGPATH]; @@ -5321,10 +5354,11 @@ pg_stop_backup(PG_FUNCTION_ARGS) char ch; int ich; - if (!superuser()) + if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), (errmsg("must be superuser to run a backup")))); + /* * Get the current end-of-WAL position; it will be unsafe to use this * dump to restore to a point in advance of this time. @@ -5335,16 +5369,18 @@ pg_stop_backup(PG_FUNCTION_ARGS) XLByteToSeg(stoppoint, _logId, _logSeg); XLogFileName(stopxlogfilename, ThisTimeLineID, _logId, _logSeg); + /* - * We deliberately use strftime/localtime not the src/timezone functions, - * so that backup labels will consistently be recorded in the same - * timezone regardless of TimeZone setting. This matches elog.c's - * practice. + * We deliberately use strftime/localtime not the src/timezone + * functions, so that backup labels will consistently be recorded in + * the same timezone regardless of TimeZone setting. This matches + * elog.c's practice. */ stamp_time = time(NULL); strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", localtime(&stamp_time)); + /* * Open the existing label file */ @@ -5361,9 +5397,11 @@ pg_stop_backup(PG_FUNCTION_ARGS) (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("a backup is not in progress"))); } + /* * Read and parse the START WAL LOCATION line (this code is pretty - * crude, but we are not expecting any variability in the file format). + * crude, but we are not expecting any variability in the file + * format). */ if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %24s)%c", &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename, @@ -5371,6 +5409,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("invalid data in file \"%s\"", labelfilepath))); + /* * Write the backup history file */ @@ -5396,6 +5435,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) (errcode_for_file_access(), errmsg("could not write file \"%s\": %m", histfilepath))); + /* * Close and remove the backup label file */ @@ -5409,6 +5449,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) (errcode_for_file_access(), errmsg("could not remove file \"%s\": %m", labelfilepath))); + /* * Notify archiver that history file may be archived immediately */ @@ -5418,13 +5459,14 @@ pg_stop_backup(PG_FUNCTION_ARGS) startpoint.xrecoff % XLogSegSize); XLogArchiveNotify(histfilepath); } + /* * We're done. As a convenience, return the ending WAL offset. */ snprintf(stopxlogfilename, sizeof(stopxlogfilename), "%X/%X", stoppoint.xlogid, stoppoint.xrecoff); result = DatumGetTextP(DirectFunctionCall1(textin, - CStringGetDatum(stopxlogfilename))); + CStringGetDatum(stopxlogfilename))); PG_RETURN_TEXT_P(result); } @@ -5433,7 +5475,7 @@ pg_stop_backup(PG_FUNCTION_ARGS) * * If we see a backup_label during recovery, we assume that we are recovering * from a backup dump file, and we therefore roll forward from the checkpoint - * identified by the label file, NOT what pg_control says. This avoids the + * identified by the label file, NOT what pg_control says. This avoids the * problem that pg_control might have been archived one or more checkpoints * later than the start of the dump, and so if we rely on it as the start * point, we will fail to restore a consistent database state. @@ -5476,10 +5518,11 @@ read_backup_label(XLogRecPtr *checkPointLoc) labelfilepath))); return false; /* it's not there, all is fine */ } + /* - * Read and parse the START WAL LOCATION and CHECKPOINT lines (this code - * is pretty crude, but we are not expecting any variability in the file - * format). + * Read and parse the START WAL LOCATION and CHECKPOINT lines (this + * code is pretty crude, but we are not expecting any variability in + * the file format). */ if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %08X%16s)%c", &startpoint.xlogid, &startpoint.xrecoff, &tli, @@ -5498,6 +5541,7 @@ read_backup_label(XLogRecPtr *checkPointLoc) (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", labelfilepath))); + /* * Try to retrieve the backup history file (no error if we can't) */ @@ -5511,24 +5555,24 @@ read_backup_label(XLogRecPtr *checkPointLoc) BackupHistoryFilePath(histfilepath, tli, _logId, _logSeg, startpoint.xrecoff % XLogSegSize); - fp = AllocateFile(histfilepath, "r"); + fp = AllocateFile(histfilepath, "r"); if (fp) { /* * Parse history file to identify stop point. */ if (fscanf(fp, "START WAL LOCATION: %X/%X (file %24s)%c", - &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename, + &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename, &ch) != 4 || ch != '\n') ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("invalid data in file \"%s\"", histfilename))); + errmsg("invalid data in file \"%s\"", histfilename))); if (fscanf(fp, "STOP WAL LOCATION: %X/%X (file %24s)%c", - &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename, + &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename, &ch) != 4 || ch != '\n') ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("invalid data in file \"%s\"", histfilename))); + errmsg("invalid data in file \"%s\"", histfilename))); recoveryMinXlogOffset = stoppoint; if (ferror(fp) || FreeFile(fp)) ereport(FATAL, diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index 1791068d7a5..4f1ac8dde58 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -11,7 +11,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.33 2004/08/29 04:12:23 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.34 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -212,11 +212,11 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode) res->reldata.rd_node = rnode; /* - * We set up the lockRelId in case anything tries to lock the dummy - * relation. Note that this is fairly bogus since relNode may be - * different from the relation's OID. It shouldn't really matter - * though, since we are presumably running by ourselves and can't - * have any lock conflicts ... + * We set up the lockRelId in case anything tries to lock the + * dummy relation. Note that this is fairly bogus since relNode + * may be different from the relation's OID. It shouldn't really + * matter though, since we are presumably running by ourselves and + * can't have any lock conflicts ... */ res->reldata.rd_lockInfo.lockRelId.dbId = rnode.dbNode; res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode; @@ -234,14 +234,15 @@ XLogOpenRelation(bool redo, RmgrId rmid, RelFileNode rnode) res->reldata.rd_targblock = InvalidBlockNumber; res->reldata.rd_smgr = smgropen(res->reldata.rd_node); + /* * Create the target file if it doesn't already exist. This lets * us cope if the replay sequence contains writes to a relation * that is later deleted. (The original coding of this routine * would instead return NULL, causing the writes to be suppressed. - * But that seems like it risks losing valuable data if the filesystem - * loses an inode during a crash. Better to write the data until we - * are actually told to delete the file.) + * But that seems like it risks losing valuable data if the + * filesystem loses an inode during a crash. Better to write the + * data until we are actually told to delete the file.) */ smgrcreate(res->reldata.rd_smgr, res->reldata.rd_istemp, true); } diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c index 9e401e7764d..0b63df18012 100644 --- a/src/backend/bootstrap/bootstrap.c +++ b/src/backend/bootstrap/bootstrap.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.192 2004/08/29 04:12:25 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.193 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -111,46 +111,46 @@ struct typinfo static const struct typinfo TypInfo[] = { {"bool", BOOLOID, 0, 1, true, 'c', 'p', - F_BOOLIN, F_BOOLOUT}, + F_BOOLIN, F_BOOLOUT}, {"bytea", BYTEAOID, 0, -1, false, 'i', 'x', - F_BYTEAIN, F_BYTEAOUT}, + F_BYTEAIN, F_BYTEAOUT}, {"char", CHAROID, 0, 1, true, 'c', 'p', - F_CHARIN, F_CHAROUT}, + F_CHARIN, F_CHAROUT}, {"name", NAMEOID, CHAROID, NAMEDATALEN, false, 'i', 'p', - F_NAMEIN, F_NAMEOUT}, + F_NAMEIN, F_NAMEOUT}, {"int2", INT2OID, 0, 2, true, 's', 'p', - F_INT2IN, F_INT2OUT}, + F_INT2IN, F_INT2OUT}, {"int4", INT4OID, 0, 4, true, 'i', 'p', - F_INT4IN, F_INT4OUT}, + F_INT4IN, F_INT4OUT}, {"regproc", REGPROCOID, 0, 4, true, 'i', 'p', - F_REGPROCIN, F_REGPROCOUT}, + F_REGPROCIN, F_REGPROCOUT}, {"regclass", REGCLASSOID, 0, 4, true, 'i', 'p', - F_REGCLASSIN, F_REGCLASSOUT}, + F_REGCLASSIN, F_REGCLASSOUT}, {"regtype", REGTYPEOID, 0, 4, true, 'i', 'p', - F_REGTYPEIN, F_REGTYPEOUT}, + F_REGTYPEIN, F_REGTYPEOUT}, {"text", TEXTOID, 0, -1, false, 'i', 'x', - F_TEXTIN, F_TEXTOUT}, + F_TEXTIN, F_TEXTOUT}, {"oid", OIDOID, 0, 4, true, 'i', 'p', - F_OIDIN, F_OIDOUT}, + F_OIDIN, F_OIDOUT}, {"tid", TIDOID, 0, 6, false, 's', 'p', - F_TIDIN, F_TIDOUT}, + F_TIDIN, F_TIDOUT}, {"xid", XIDOID, 0, 4, true, 'i', 'p', - F_XIDIN, F_XIDOUT}, + F_XIDIN, F_XIDOUT}, {"cid", CIDOID, 0, 4, true, 'i', 'p', - F_CIDIN, F_CIDOUT}, + F_CIDIN, F_CIDOUT}, {"int2vector", INT2VECTOROID, INT2OID, INDEX_MAX_KEYS * 2, false, 's', 'p', - F_INT2VECTORIN, F_INT2VECTOROUT}, + F_INT2VECTORIN, F_INT2VECTOROUT}, {"oidvector", OIDVECTOROID, OIDOID, INDEX_MAX_KEYS * 4, false, 'i', 'p', - F_OIDVECTORIN, F_OIDVECTOROUT}, + F_OIDVECTORIN, F_OIDVECTOROUT}, {"_int4", INT4ARRAYOID, INT4OID, -1, false, 'i', 'x', - F_ARRAY_IN, F_ARRAY_OUT}, + F_ARRAY_IN, F_ARRAY_OUT}, {"_text", 1009, TEXTOID, -1, false, 'i', 'x', - F_ARRAY_IN, F_ARRAY_OUT}, + F_ARRAY_IN, F_ARRAY_OUT}, {"_aclitem", 1034, ACLITEMOID, -1, false, 'i', 'x', - F_ARRAY_IN, F_ARRAY_OUT} + F_ARRAY_IN, F_ARRAY_OUT} }; -static const int n_types = sizeof(TypInfo) / sizeof(struct typinfo); +static const int n_types = sizeof(TypInfo) / sizeof(struct typinfo); struct typmap { /* a hack */ @@ -498,13 +498,13 @@ static void usage(void) { write_stderr("Usage:\n" - " postgres -boot [OPTION]... DBNAME\n" - " -c NAME=VALUE set run-time parameter\n" - " -d 1-5 debug level\n" - " -D datadir data directory\n" - " -F turn off fsync\n" - " -o file send debug output to file\n" - " -x num internal use\n"); + " postgres -boot [OPTION]... DBNAME\n" + " -c NAME=VALUE set run-time parameter\n" + " -d 1-5 debug level\n" + " -D datadir data directory\n" + " -F turn off fsync\n" + " -o file send debug output to file\n" + " -x num internal use\n"); proc_exit(1); } diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c index 38f8ccfff61..a6ec207a323 100644 --- a/src/backend/catalog/aclchk.c +++ b/src/backend/catalog/aclchk.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.106 2004/08/29 04:12:26 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.107 2004/08/29 05:06:41 momjian Exp $ * * NOTES * See acl.h. @@ -73,7 +73,7 @@ dumpacl(Acl *acl) * Determine the effective grantor ID for a GRANT or REVOKE operation. * * Ordinarily this is just the current user, but when a superuser does - * GRANT or REVOKE, we pretend he is the object owner. This ensures that + * GRANT or REVOKE, we pretend he is the object owner. This ensures that * all granted privileges appear to flow from the object owner, and there * are never multiple "original sources" of a privilege. */ @@ -122,25 +122,25 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant, foreach(j, grantees) { PrivGrantee *grantee = (PrivGrantee *) lfirst(j); - AclItem aclitem; + AclItem aclitem; uint32 idtype; Acl *newer_acl; if (grantee->username) { - aclitem.ai_grantee = get_usesysid(grantee->username); + aclitem. ai_grantee = get_usesysid(grantee->username); idtype = ACL_IDTYPE_UID; } else if (grantee->groupname) { - aclitem.ai_grantee = get_grosysid(grantee->groupname); + aclitem. ai_grantee = get_grosysid(grantee->groupname); idtype = ACL_IDTYPE_GID; } else { - aclitem.ai_grantee = ACL_ID_WORLD; + aclitem. ai_grantee = ACL_ID_WORLD; idtype = ACL_IDTYPE_WORLD; } @@ -157,18 +157,19 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant, (errcode(ERRCODE_INVALID_GRANT_OPERATION), errmsg("grant options can only be granted to individual users"))); - aclitem.ai_grantor = grantor_uid; + aclitem. ai_grantor = grantor_uid; /* * The asymmetry in the conditions here comes from the spec. In - * GRANT, the grant_option flag signals WITH GRANT OPTION, which means - * to grant both the basic privilege and its grant option. But in - * REVOKE, plain revoke revokes both the basic privilege and its - * grant option, while REVOKE GRANT OPTION revokes only the option. + * GRANT, the grant_option flag signals WITH GRANT OPTION, which + * means to grant both the basic privilege and its grant option. + * But in REVOKE, plain revoke revokes both the basic privilege + * and its grant option, while REVOKE GRANT OPTION revokes only + * the option. */ ACLITEM_SET_PRIVS_IDTYPE(aclitem, - (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS, - (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS, + (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS, + (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS, idtype); newer_acl = aclupdate(new_acl, &aclitem, modechg, owner_uid, behavior); @@ -318,11 +319,11 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -476,11 +477,11 @@ ExecuteGrantStmt_Database(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -630,11 +631,11 @@ ExecuteGrantStmt_Function(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -761,7 +762,7 @@ ExecuteGrantStmt_Language(GrantStmt *stmt) ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("language \"%s\" is not trusted", langname), - errhint("Only superusers may use untrusted languages."))); + errhint("Only superusers may use untrusted languages."))); /* * Note: for now, languages are treated as owned by the bootstrap @@ -793,11 +794,11 @@ ExecuteGrantStmt_Language(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -946,11 +947,11 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -1039,8 +1040,8 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) if (priv & ~((AclMode) ACL_ALL_RIGHTS_TABLESPACE)) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), - errmsg("invalid privilege type %s for tablespace", - privilege_to_string(priv)))); + errmsg("invalid privilege type %s for tablespace", + privilege_to_string(priv)))); privileges |= priv; } } @@ -1076,7 +1077,7 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace \"%s\" does not exist", spcname))); + errmsg("tablespace \"%s\" does not exist", spcname))); pg_tablespace_tuple = (Form_pg_tablespace) GETSTRUCT(tuple); ownerId = pg_tablespace_tuple->spcowner; @@ -1105,11 +1106,11 @@ ExecuteGrantStmt_Tablespace(GrantStmt *stmt) /* * Restrict the operation to what we can actually grant or revoke, - * and issue a warning if appropriate. (For REVOKE this isn't quite - * what the spec says to do: the spec seems to want a warning only - * if no privilege bits actually change in the ACL. In practice - * that behavior seems much too noisy, as well as inconsistent with - * the GRANT case.) + * and issue a warning if appropriate. (For REVOKE this isn't + * quite what the spec says to do: the spec seems to want a + * warning only if no privilege bits actually change in the ACL. + * In practice that behavior seems much too noisy, as well as + * inconsistent with the GRANT case.) */ this_privileges = privileges & my_goptions; if (stmt->is_grant) @@ -1389,11 +1390,12 @@ pg_class_aclmask(Oid table_oid, AclId userid, /* * Deny anyone permission to update a system catalog unless * pg_shadow.usecatupd is set. (This is to let superusers protect - * themselves from themselves.) Also allow it if allowSystemTableMods. + * themselves from themselves.) Also allow it if + * allowSystemTableMods. * - * As of 7.4 we have some updatable system views; those shouldn't - * be protected in this way. Assume the view rules can take care - * of themselves. + * As of 7.4 we have some updatable system views; those shouldn't be + * protected in this way. Assume the view rules can take care of + * themselves. */ if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)) && IsSystemClass(classForm) && @@ -1648,23 +1650,23 @@ pg_namespace_aclmask(Oid nsp_oid, AclId userid, return mask; /* - * If we have been assigned this namespace as a temp namespace, - * check to make sure we have CREATE TEMP permission on the database, - * and if so act as though we have all standard (but not GRANT OPTION) + * If we have been assigned this namespace as a temp namespace, check + * to make sure we have CREATE TEMP permission on the database, and if + * so act as though we have all standard (but not GRANT OPTION) * permissions on the namespace. If we don't have CREATE TEMP, act as * though we have only USAGE (and not CREATE) rights. * - * This may seem redundant given the check in InitTempTableNamespace, - * but it really isn't since current user ID may have changed since then. + * This may seem redundant given the check in InitTempTableNamespace, but + * it really isn't since current user ID may have changed since then. * The upshot of this behavior is that a SECURITY DEFINER function can - * create temp tables that can then be accessed (if permission is granted) - * by code in the same session that doesn't have permissions to create - * temp tables. + * create temp tables that can then be accessed (if permission is + * granted) by code in the same session that doesn't have permissions + * to create temp tables. * * XXX Would it be safe to ereport a special error message as * InitTempTableNamespace does? Returning zero here means we'll get a - * generic "permission denied for schema pg_temp_N" message, which is not - * remarkably user-friendly. + * generic "permission denied for schema pg_temp_N" message, which is + * not remarkably user-friendly. */ if (isTempNamespace(nsp_oid)) { @@ -1731,8 +1733,8 @@ pg_tablespace_aclmask(Oid spc_oid, AclId userid, AclId ownerId; /* - * Only shared relations can be stored in global space; don't let - * even superusers override this + * Only shared relations can be stored in global space; don't let even + * superusers override this */ if (spc_oid == GLOBALTABLESPACE_OID && !IsBootstrapProcessingMode()) return 0; @@ -1756,7 +1758,7 @@ pg_tablespace_aclmask(Oid spc_oid, AclId userid, if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace with OID %u does not exist", spc_oid))); + errmsg("tablespace with OID %u does not exist", spc_oid))); ownerId = ((Form_pg_tablespace) GETSTRUCT(tuple))->spcowner; @@ -2034,7 +2036,7 @@ pg_tablespace_ownercheck(Oid spc_oid, AclId userid) if (!HeapTupleIsValid(spctuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace with OID %u does not exist", spc_oid))); + errmsg("tablespace with OID %u does not exist", spc_oid))); spcowner = ((Form_pg_tablespace) GETSTRUCT(spctuple))->spcowner; @@ -2131,7 +2133,7 @@ pg_conversion_ownercheck(Oid conv_oid, AclId userid) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("conversion with OID %u does not exist", conv_oid))); + errmsg("conversion with OID %u does not exist", conv_oid))); owner_id = ((Form_pg_conversion) GETSTRUCT(tuple))->conowner; diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index 72002d84614..586be553a85 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -8,7 +8,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.38 2004/08/29 04:12:27 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.39 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -970,6 +970,7 @@ find_expr_references_walker(Node *node, if (var->varno <= 0 || var->varno > list_length(rtable)) elog(ERROR, "invalid varno %d", var->varno); rte = rt_fetch(var->varno, rtable); + /* * A whole-row Var references no specific columns, so adds no new * dependency. @@ -995,7 +996,7 @@ find_expr_references_walker(Node *node, var->varattno > list_length(rte->joinaliasvars)) elog(ERROR, "invalid varattno %d", var->varattno); find_expr_references_walker((Node *) list_nth(rte->joinaliasvars, - var->varattno - 1), + var->varattno - 1), context); list_free(context->rtables); context->rtables = save_rtables; @@ -1424,8 +1425,8 @@ getObjectDescription(const ObjectAddress *object) getRelationDescription(&buffer, object->objectId); if (object->objectSubId != 0) appendStringInfo(&buffer, gettext(" column %s"), - get_relid_attribute_name(object->objectId, - object->objectSubId)); + get_relid_attribute_name(object->objectId, + object->objectSubId)); break; case OCLASS_PROC: @@ -1624,7 +1625,7 @@ getObjectDescription(const ObjectAddress *object) appendStringInfo(&buffer, gettext("operator class %s for %s"), quote_qualified_identifier(nspname, - NameStr(opcForm->opcname)), + NameStr(opcForm->opcname)), NameStr(amForm->amname)); ReleaseSysCache(amTup); diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 057bd7fb80c..5cf6c5fa6d5 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.274 2004/08/29 04:12:27 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.275 2004/08/29 05:06:41 momjian Exp $ * * * INTERFACE ROUTINES @@ -265,10 +265,10 @@ heap_create(const char *relname, /* * Never allow a pg_class entry to explicitly specify the database's - * default tablespace in reltablespace; force it to zero instead. - * This ensures that if the database is cloned with a different - * default tablespace, the pg_class entry will still match where - * CREATE DATABASE will put the physically copied relation. + * default tablespace in reltablespace; force it to zero instead. This + * ensures that if the database is cloned with a different default + * tablespace, the pg_class entry will still match where CREATE + * DATABASE will put the physically copied relation. * * Yes, this is a bit of a hack. */ @@ -294,7 +294,8 @@ heap_create(const char *relname, nailme); /* - * have the storage manager create the relation's disk file, if needed. + * have the storage manager create the relation's disk file, if + * needed. */ if (create_storage) { @@ -980,12 +981,12 @@ RemoveAttributeById(Oid relid, AttrNumber attnum) /* * Set the type OID to invalid. A dropped attribute's type link - * cannot be relied on (once the attribute is dropped, the type might - * be too). Fortunately we do not need the type row --- the only - * really essential information is the type's typlen and typalign, - * which are preserved in the attribute's attlen and attalign. We set - * atttypid to zero here as a means of catching code that incorrectly - * expects it to be valid. + * cannot be relied on (once the attribute is dropped, the type + * might be too). Fortunately we do not need the type row --- the + * only really essential information is the type's typlen and + * typalign, which are preserved in the attribute's attlen and + * attalign. We set atttypid to zero here as a means of catching + * code that incorrectly expects it to be valid. */ attStruct->atttypid = InvalidOid; @@ -995,7 +996,10 @@ RemoveAttributeById(Oid relid, AttrNumber attnum) /* We don't want to keep stats for it anymore */ attStruct->attstattarget = 0; - /* Change the column name to something that isn't likely to conflict */ + /* + * Change the column name to something that isn't likely to + * conflict + */ snprintf(newattname, sizeof(newattname), "........pg.dropped.%d........", attnum); namestrcpy(&(attStruct->attname), newattname); @@ -1199,7 +1203,7 @@ heap_drop_with_catalog(Oid relid) /* * Flush the relation from the relcache. We want to do this before * starting to remove catalog entries, just to be certain that no - * relcache entry rebuild will happen partway through. (That should + * relcache entry rebuild will happen partway through. (That should * not really matter, since we don't do CommandCounterIncrement here, * but let's be safe.) */ @@ -1584,11 +1588,11 @@ AddRelationRawConstraints(Relation rel, if (pstate->p_hasSubLinks) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in check constraint"))); + errmsg("cannot use subquery in check constraint"))); if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in check constraint"))); + errmsg("cannot use aggregate function in check constraint"))); /* * Check name uniqueness, or generate a name if none was given. @@ -1614,8 +1618,8 @@ AddRelationRawConstraints(Relation rel, if (strcmp((char *) lfirst(cell2), ccname) == 0) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("check constraint \"%s\" already exists", - ccname))); + errmsg("check constraint \"%s\" already exists", + ccname))); } } else @@ -1623,18 +1627,18 @@ AddRelationRawConstraints(Relation rel, /* * When generating a name, we want to create "tab_col_check" * for a column constraint and "tab_check" for a table - * constraint. We no longer have any info about the - * syntactic positioning of the constraint phrase, so we - * approximate this by seeing whether the expression references - * more than one column. (If the user played by the rules, - * the result is the same...) + * constraint. We no longer have any info about the syntactic + * positioning of the constraint phrase, so we approximate + * this by seeing whether the expression references more than + * one column. (If the user played by the rules, the result + * is the same...) * - * Note: pull_var_clause() doesn't descend into sublinks, - * but we eliminated those above; and anyway this only needs - * to be an approximate answer. + * Note: pull_var_clause() doesn't descend into sublinks, but we + * eliminated those above; and anyway this only needs to be an + * approximate answer. */ - List *vars; - char *colname; + List *vars; + char *colname; vars = pull_var_clause(expr, false); @@ -1763,7 +1767,7 @@ cookDefault(ParseState *pstate, if (contain_var_clause(expr)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("cannot use column references in default expression"))); + errmsg("cannot use column references in default expression"))); /* * It can't return a set either. @@ -1783,7 +1787,7 @@ cookDefault(ParseState *pstate, if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in default expression"))); + errmsg("cannot use aggregate function in default expression"))); /* * Coerce the expression to the correct type and typmod, if given. @@ -2047,7 +2051,7 @@ heap_truncate_check_FKs(Relation rel) return; /* - * Otherwise, must scan pg_constraint. Right now, this is a seqscan + * Otherwise, must scan pg_constraint. Right now, this is a seqscan * because there is no available index on confrelid. */ fkeyRel = heap_openr(ConstraintRelationName, AccessShareLock); diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 78bbe3ecf58..bed06fc5383 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.237 2004/08/29 04:12:27 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.238 2004/08/29 05:06:41 momjian Exp $ * * * INTERFACE ROUTINES @@ -511,9 +511,10 @@ index_create(Oid heapRelationId, * We cannot allow indexing a shared relation after initdb (because * there's no way to make the entry in other databases' pg_class). * Unfortunately we can't distinguish initdb from a manually started - * standalone backend (toasting of shared rels happens after the bootstrap - * phase, so checking IsBootstrapProcessingMode() won't work). However, - * we can at least prevent this mistake under normal multi-user operation. + * standalone backend (toasting of shared rels happens after the + * bootstrap phase, so checking IsBootstrapProcessingMode() won't + * work). However, we can at least prevent this mistake under normal + * multi-user operation. */ if (shared_relation && IsUnderPostmaster) ereport(ERROR, @@ -800,8 +801,8 @@ index_drop(Oid indexId) /* * Close and flush the index's relcache entry, to ensure relcache - * doesn't try to rebuild it while we're deleting catalog entries. - * We keep the lock though. + * doesn't try to rebuild it while we're deleting catalog entries. We + * keep the lock though. */ index_close(userIndexRelation); @@ -826,8 +827,8 @@ index_drop(Oid indexId) heap_close(indexRelation, RowExclusiveLock); /* - * if it has any expression columns, we might have stored - * statistics about them. + * if it has any expression columns, we might have stored statistics + * about them. */ if (hasexprs) RemoveStatistics(indexId, 0); @@ -1008,7 +1009,7 @@ setRelhasindex(Oid relid, bool hasindex, bool isprimary, Oid reltoastidxid) /* * Find the tuple to update in pg_class. In bootstrap mode we can't - * use heap_update, so cheat and overwrite the tuple in-place. In + * use heap_update, so cheat and overwrite the tuple in-place. In * normal processing, make a copy to scribble on. */ pg_class = heap_openr(RelationRelationName, RowExclusiveLock); @@ -1122,13 +1123,13 @@ setNewRelfilenode(Relation relation) newrelfilenode = newoid(); /* - * Find the pg_class tuple for the given relation. This is not used + * Find the pg_class tuple for the given relation. This is not used * during bootstrap, so okay to use heap_update always. */ pg_class = heap_openr(RelationRelationName, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, - ObjectIdGetDatum(RelationGetRelid(relation)), + ObjectIdGetDatum(RelationGetRelid(relation)), 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "could not find tuple for relation %u", @@ -1206,15 +1207,15 @@ UpdateStats(Oid relid, double reltuples) /* * Find the tuple to update in pg_class. Normally we make a copy of - * the tuple using the syscache, modify it, and apply heap_update. - * But in bootstrap mode we can't use heap_update, so we cheat and + * the tuple using the syscache, modify it, and apply heap_update. But + * in bootstrap mode we can't use heap_update, so we cheat and * overwrite the tuple in-place. * - * We also must cheat if reindexing pg_class itself, because the - * target index may presently not be part of the set of indexes that + * We also must cheat if reindexing pg_class itself, because the target + * index may presently not be part of the set of indexes that * CatalogUpdateIndexes would update (see reindex_relation). In this * case the stats updates will not be WAL-logged and so could be lost - * in a crash. This seems OK considering VACUUM does the same thing. + * in a crash. This seems OK considering VACUUM does the same thing. */ pg_class = heap_openr(RelationRelationName, RowExclusiveLock); @@ -1454,7 +1455,7 @@ IndexBuildHeapScan(Relation heapRelation, scan = heap_beginscan(heapRelation, /* relation */ snapshot, /* seeself */ 0, /* number of keys */ - NULL); /* scan key */ + NULL); /* scan key */ reltuples = 0; @@ -1513,7 +1514,7 @@ IndexBuildHeapScan(Relation heapRelation, * system catalogs before committing. */ if (!TransactionIdIsCurrentTransactionId( - HeapTupleHeaderGetXmin(heapTuple->t_data)) + HeapTupleHeaderGetXmin(heapTuple->t_data)) && !IsSystemRelation(heapRelation)) elog(ERROR, "concurrent insert in progress"); indexIt = true; @@ -1531,7 +1532,7 @@ IndexBuildHeapScan(Relation heapRelation, * system catalogs before committing. */ if (!TransactionIdIsCurrentTransactionId( - HeapTupleHeaderGetXmax(heapTuple->t_data)) + HeapTupleHeaderGetXmax(heapTuple->t_data)) && !IsSystemRelation(heapRelation)) elog(ERROR, "concurrent delete in progress"); indexIt = true; @@ -1659,11 +1660,11 @@ reindex_index(Oid indexId) * Note: for REINDEX INDEX, doing this before opening the parent heap * relation means there's a possibility for deadlock failure against * another xact that is doing normal accesses to the heap and index. - * However, it's not real clear why you'd be wanting to do REINDEX INDEX - * on a table that's in active use, so I'd rather have the protection of - * making sure the index is locked down. In the REINDEX TABLE and - * REINDEX DATABASE cases, there is no problem because caller already - * holds exclusive lock on the parent table. + * However, it's not real clear why you'd be wanting to do REINDEX + * INDEX on a table that's in active use, so I'd rather have the + * protection of making sure the index is locked down. In the REINDEX + * TABLE and REINDEX DATABASE cases, there is no problem because + * caller already holds exclusive lock on the parent table. */ iRel = index_open(indexId); LockRelation(iRel, AccessExclusiveLock); @@ -1680,8 +1681,8 @@ reindex_index(Oid indexId) * we can do it the normal transaction-safe way. * * Since inplace processing isn't crash-safe, we only allow it in a - * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE cases, - * the caller should have detected this.) + * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE + * cases, the caller should have detected this.) */ inplace = iRel->rd_rel->relisshared; @@ -1705,7 +1706,8 @@ reindex_index(Oid indexId) { /* * Release any buffers associated with this index. If they're - * dirty, they're just dropped without bothering to flush to disk. + * dirty, they're just dropped without bothering to flush to + * disk. */ DropRelationBuffers(iRel); @@ -1724,8 +1726,8 @@ reindex_index(Oid indexId) index_build(heapRelation, iRel, indexInfo); /* - * index_build will close both the heap and index relations (but not - * give up the locks we hold on them). So we're done. + * index_build will close both the heap and index relations (but + * not give up the locks we hold on them). So we're done. */ } PG_CATCH(); @@ -1774,13 +1776,13 @@ reindex_relation(Oid relid, bool toast_too) /* * reindex_index will attempt to update the pg_class rows for the - * relation and index. If we are processing pg_class itself, we - * want to make sure that the updates do not try to insert index - * entries into indexes we have not processed yet. (When we are - * trying to recover from corrupted indexes, that could easily - * cause a crash.) We can accomplish this because CatalogUpdateIndexes - * will use the relcache's index list to know which indexes to update. - * We just force the index list to be only the stuff we've processed. + * relation and index. If we are processing pg_class itself, we want + * to make sure that the updates do not try to insert index entries + * into indexes we have not processed yet. (When we are trying to + * recover from corrupted indexes, that could easily cause a crash.) + * We can accomplish this because CatalogUpdateIndexes will use the + * relcache's index list to know which indexes to update. We just + * force the index list to be only the stuff we've processed. * * It is okay to not insert entries into the indexes we have not * processed yet because all of this is transaction-safe. If we fail @@ -1795,7 +1797,7 @@ reindex_relation(Oid relid, bool toast_too) /* Reindex all the indexes. */ foreach(indexId, indexIds) { - Oid indexOid = lfirst_oid(indexId); + Oid indexOid = lfirst_oid(indexId); if (is_pg_class) RelationSetIndexList(rel, doneIndexes); @@ -1819,8 +1821,8 @@ reindex_relation(Oid relid, bool toast_too) result = (indexIds != NIL); /* - * If the relation has a secondary toast rel, reindex that too while we - * still hold the lock on the master table. + * If the relation has a secondary toast rel, reindex that too while + * we still hold the lock on the master table. */ if (toast_too && OidIsValid(toast_relid)) result |= reindex_relation(toast_relid, false); diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c index 2ff0070536c..001e02ba7f6 100644 --- a/src/backend/catalog/namespace.c +++ b/src/backend/catalog/namespace.c @@ -13,7 +13,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.69 2004/08/29 04:12:28 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.70 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -170,9 +170,9 @@ RangeVarGetRelid(const RangeVar *relation, bool failOK) if (strcmp(relation->catalogname, get_database_name(MyDatabaseId)) != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cross-database references are not implemented: \"%s.%s.%s\"", - relation->catalogname, relation->schemaname, - relation->relname))); + errmsg("cross-database references are not implemented: \"%s.%s.%s\"", + relation->catalogname, relation->schemaname, + relation->relname))); } if (relation->schemaname) @@ -225,9 +225,9 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation) if (strcmp(newRelation->catalogname, get_database_name(MyDatabaseId)) != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cross-database references are not implemented: \"%s.%s.%s\"", - newRelation->catalogname, newRelation->schemaname, - newRelation->relname))); + errmsg("cross-database references are not implemented: \"%s.%s.%s\"", + newRelation->catalogname, newRelation->schemaname, + newRelation->relname))); } if (newRelation->istemp) @@ -236,7 +236,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation) if (newRelation->schemaname) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("temporary tables may not specify a schema name"))); + errmsg("temporary tables may not specify a schema name"))); /* Initialize temp namespace if first time through */ if (!OidIsValid(myTempNamespace)) InitTempTableNamespace(); @@ -699,12 +699,13 @@ OpernameGetCandidates(List *names, char oprkind) /* * In typical scenarios, most if not all of the operators found by the - * catcache search will end up getting returned; and there can be quite - * a few, for common operator names such as '=' or '+'. To reduce the - * time spent in palloc, we allocate the result space as an array large - * enough to hold all the operators. The original coding of this routine - * did a separate palloc for each operator, but profiling revealed that - * the pallocs used an unreasonably large fraction of parsing time. + * catcache search will end up getting returned; and there can be + * quite a few, for common operator names such as '=' or '+'. To + * reduce the time spent in palloc, we allocate the result space as an + * array large enough to hold all the operators. The original coding + * of this routine did a separate palloc for each operator, but + * profiling revealed that the pallocs used an unreasonably large + * fraction of parsing time. */ #define SPACE_PER_OP MAXALIGN(sizeof(struct _FuncCandidateList) + sizeof(Oid)) @@ -1191,8 +1192,8 @@ DeconstructQualifiedName(List *names, if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cross-database references are not implemented: %s", - NameListToString(names)))); + errmsg("cross-database references are not implemented: %s", + NameListToString(names)))); break; default: ereport(ERROR, @@ -1645,10 +1646,11 @@ InitTempTableNamespace(void) * tables. We use a nonstandard error message here since * "databasename: permission denied" might be a tad cryptic. * - * Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask; - * that's necessary since current user ID could change during the session. - * But there's no need to make the namespace in the first place until a - * temp table creation request is made by someone with appropriate rights. + * Note that ACL_CREATE_TEMP rights are rechecked in + * pg_namespace_aclmask; that's necessary since current user ID could + * change during the session. But there's no need to make the + * namespace in the first place until a temp table creation request is + * made by someone with appropriate rights. */ if (pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE_TEMP) != ACLCHECK_OK) @@ -1847,7 +1849,8 @@ assign_search_path(const char *newval, bool doit, GucSource source) * ALTER DATABASE SET or ALTER USER SET command. It could be that * the intended use of the search path is for some other database, * so we should not error out if it mentions schemas not present - * in the current database. We reduce the message to NOTICE instead. + * in the current database. We reduce the message to NOTICE + * instead. */ foreach(l, namelist) { diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c index 928d27ae16a..2882eeff06a 100644 --- a/src/backend/catalog/pg_aggregate.c +++ b/src/backend/catalog/pg_aggregate.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.67 2004/08/29 04:12:28 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.68 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -78,8 +78,8 @@ AggregateCreate(const char *aggName, ereport(ERROR, (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION), errmsg("cannot determine transition data type"), - errdetail("An aggregate using \"anyarray\" or \"anyelement\" as " - "transition type must have one of them as its base type."))); + errdetail("An aggregate using \"anyarray\" or \"anyelement\" as " + "transition type must have one of them as its base type."))); /* handle transfn */ MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid)); @@ -163,8 +163,8 @@ AggregateCreate(const char *aggName, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot determine result data type"), - errdetail("An aggregate returning \"anyarray\" or \"anyelement\" " - "must have one of them as its base type."))); + errdetail("An aggregate returning \"anyarray\" or \"anyelement\" " + "must have one of them as its base type."))); /* * Everything looks okay. Try to create the pg_proc entry for the @@ -190,8 +190,8 @@ AggregateCreate(const char *aggName, PROVOLATILE_IMMUTABLE, /* volatility (not * needed for agg) */ 1, /* parameterCount */ - fnArgs, /* parameterTypes */ - NULL); /* parameterNames */ + fnArgs, /* parameterTypes */ + NULL); /* parameterNames */ /* * Okay to create the pg_aggregate entry. diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c index 0f74814f176..1b658c9ad26 100644 --- a/src/backend/catalog/pg_proc.c +++ b/src/backend/catalog/pg_proc.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.118 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.119 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -44,12 +44,12 @@ Datum fmgr_c_validator(PG_FUNCTION_ARGS); Datum fmgr_sql_validator(PG_FUNCTION_ARGS); static Datum create_parameternames_array(int parameterCount, - const char *parameterNames[]); + const char *parameterNames[]); static void sql_function_parse_error_callback(void *arg); -static int match_prosrc_to_query(const char *prosrc, const char *queryText, - int cursorpos); +static int match_prosrc_to_query(const char *prosrc, const char *queryText, + int cursorpos); static bool match_prosrc_to_literal(const char *prosrc, const char *literal, - int cursorpos, int *newcursorpos); + int cursorpos, int *newcursorpos); /* ---------------------------------------------------------------- @@ -173,7 +173,7 @@ ProcedureCreate(const char *procedureName, values[i++] = UInt16GetDatum(parameterCount); /* pronargs */ values[i++] = ObjectIdGetDatum(returnType); /* prorettype */ values[i++] = PointerGetDatum(typev); /* proargtypes */ - values[i++] = namesarray; /* proargnames */ + values[i++] = namesarray; /* proargnames */ if (namesarray == PointerGetDatum(NULL)) nulls[Anum_pg_proc_proargnames - 1] = 'n'; values[i++] = DirectFunctionCall1(textin, /* prosrc */ @@ -329,7 +329,7 @@ create_parameternames_array(int parameterCount, const char *parameterNames[]) if (!parameterNames) return PointerGetDatum(NULL); - for (i=0; i<parameterCount; i++) + for (i = 0; i < parameterCount; i++) { const char *s = parameterNames[i]; @@ -562,8 +562,9 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList) } /* - * Otherwise assume we are returning the whole tuple. Crosschecking - * against what the caller expects will happen at runtime. + * Otherwise assume we are returning the whole tuple. + * Crosschecking against what the caller expects will happen at + * runtime. */ return true; } @@ -652,9 +653,10 @@ fmgr_c_validator(PG_FUNCTION_ARGS) char *probin; /* - * It'd be most consistent to skip the check if !check_function_bodies, - * but the purpose of that switch is to be helpful for pg_dump loading, - * and for pg_dump loading it's much better if we *do* check. + * It'd be most consistent to skip the check if + * !check_function_bodies, but the purpose of that switch is to be + * helpful for pg_dump loading, and for pg_dump loading it's much + * better if we *do* check. */ tuple = SearchSysCache(PROCOID, @@ -760,10 +762,10 @@ fmgr_sql_validator(PG_FUNCTION_ARGS) error_context_stack = &sqlerrcontext; /* - * We can't do full prechecking of the function definition if there - * are any polymorphic input types, because actual datatypes of - * expression results will be unresolvable. The check will be done - * at runtime instead. + * We can't do full prechecking of the function definition if + * there are any polymorphic input types, because actual datatypes + * of expression results will be unresolvable. The check will be + * done at runtime instead. * * We can run the text through the raw parser though; this will at * least catch silly syntactic errors. @@ -817,7 +819,7 @@ sql_function_parse_error_callback(void *arg) /* * Adjust a syntax error occurring inside the function body of a CREATE * FUNCTION command. This can be used by any function validator, not only - * for SQL-language functions. It is assumed that the syntax error position + * for SQL-language functions. It is assumed that the syntax error position * is initially relative to the function body string (as passed in). If * possible, we adjust the position to reference the original CREATE command; * if we can't manage that, we set up an "internal query" syntax error instead. @@ -832,11 +834,11 @@ function_parse_error_transpose(const char *prosrc) const char *queryText; /* - * Nothing to do unless we are dealing with a syntax error that has - * a cursor position. + * Nothing to do unless we are dealing with a syntax error that has a + * cursor position. * - * Some PLs may prefer to report the error position as an internal - * error to begin with, so check that too. + * Some PLs may prefer to report the error position as an internal error + * to begin with, so check that too. */ origerrposition = geterrposition(); if (origerrposition <= 0) @@ -891,17 +893,17 @@ match_prosrc_to_query(const char *prosrc, const char *queryText, * (though not in any very probable scenarios), so fail if we find * more than one match. */ - int prosrclen = strlen(prosrc); - int querylen = strlen(queryText); - int matchpos = 0; - int curpos; - int newcursorpos; + int prosrclen = strlen(prosrc); + int querylen = strlen(queryText); + int matchpos = 0; + int curpos; + int newcursorpos; - for (curpos = 0; curpos < querylen-prosrclen; curpos++) + for (curpos = 0; curpos < querylen - prosrclen; curpos++) { if (queryText[curpos] == '$' && - strncmp(prosrc, &queryText[curpos+1], prosrclen) == 0 && - queryText[curpos+1+prosrclen] == '$') + strncmp(prosrc, &queryText[curpos + 1], prosrclen) == 0 && + queryText[curpos + 1 + prosrclen] == '$') { /* * Found a $foo$ match. Since there are no embedded quoting @@ -910,20 +912,21 @@ match_prosrc_to_query(const char *prosrc, const char *queryText, */ if (matchpos) return 0; /* multiple matches, fail */ - matchpos = pg_mbstrlen_with_len(queryText, curpos+1) + matchpos = pg_mbstrlen_with_len(queryText, curpos + 1) + cursorpos; } else if (queryText[curpos] == '\'' && - match_prosrc_to_literal(prosrc, &queryText[curpos+1], + match_prosrc_to_literal(prosrc, &queryText[curpos + 1], cursorpos, &newcursorpos)) { /* - * Found a 'foo' match. match_prosrc_to_literal() has adjusted - * for any quotes or backslashes embedded in the literal. + * Found a 'foo' match. match_prosrc_to_literal() has + * adjusted for any quotes or backslashes embedded in the + * literal. */ if (matchpos) return 0; /* multiple matches, fail */ - matchpos = pg_mbstrlen_with_len(queryText, curpos+1) + matchpos = pg_mbstrlen_with_len(queryText, curpos + 1) + newcursorpos; } } @@ -948,15 +951,16 @@ match_prosrc_to_literal(const char *prosrc, const char *literal, /* * This implementation handles backslashes and doubled quotes in the - * string literal. It does not handle the SQL syntax for literals + * string literal. It does not handle the SQL syntax for literals * continued across line boundaries. * - * We do the comparison a character at a time, not a byte at a time, - * so that we can do the correct cursorpos math. + * We do the comparison a character at a time, not a byte at a time, so + * that we can do the correct cursorpos math. */ while (*prosrc) { cursorpos--; /* characters left before cursor */ + /* * Check for backslashes and doubled quotes in the literal; adjust * newcp when one is found before the cursor. diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c index 2326f25330c..2ba6ea0ff95 100644 --- a/src/backend/catalog/pg_type.c +++ b/src/backend/catalog/pg_type.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.95 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.96 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -201,8 +201,8 @@ TypeCreate(const char *typeName, (internalSize <= 0 || internalSize > (int16) sizeof(Datum))) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("internal size %d is invalid for passed-by-value type", - internalSize))); + errmsg("internal size %d is invalid for passed-by-value type", + internalSize))); /* Only varlena types can be toasted */ if (storage != 'p' && internalSize != -1) diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c index bc3affcf4bf..fcbd1df98dc 100644 --- a/src/backend/commands/aggregatecmds.c +++ b/src/backend/commands/aggregatecmds.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.20 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.21 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -223,9 +223,9 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname) /* * if a basetype is passed in, then attempt to find an aggregate for - * that specific type; else attempt to find an aggregate with a basetype - * of ANYOID. This means that the aggregate applies to all basetypes - * (eg, COUNT). + * that specific type; else attempt to find an aggregate with a + * basetype of ANYOID. This means that the aggregate applies to all + * basetypes (eg, COUNT). */ if (basetype) basetypeOid = typenameTypeId(basetype); @@ -302,9 +302,9 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId) /* * if a basetype is passed in, then attempt to find an aggregate for - * that specific type; else attempt to find an aggregate with a basetype - * of ANYOID. This means that the aggregate applies to all basetypes - * (eg, COUNT). + * that specific type; else attempt to find an aggregate with a + * basetype of ANYOID. This means that the aggregate applies to all + * basetypes (eg, COUNT). */ if (basetype) basetypeOid = typenameTypeId(basetype); @@ -322,7 +322,7 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId) elog(ERROR, "cache lookup failed for function %u", procOid); procForm = (Form_pg_proc) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -334,7 +334,10 @@ AlterAggregateOwner(List *name, TypeName *basetype, AclId newOwnerSysId) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on tup because it's a copy */ + /* + * Modify the owner --- okay to scribble on tup because it's a + * copy + */ procForm->proowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c index 11d9c828f69..3e08a551f65 100644 --- a/src/backend/commands/alter.c +++ b/src/backend/commands/alter.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.10 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.11 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -38,7 +38,7 @@ /* - * Executes an ALTER OBJECT / RENAME TO statement. Based on the object + * Executes an ALTER OBJECT / RENAME TO statement. Based on the object * type, the function appropriate to that type is executed. */ void @@ -153,7 +153,7 @@ ExecRenameStmt(RenameStmt *stmt) void ExecAlterOwnerStmt(AlterOwnerStmt *stmt) { - AclId newowner = get_usesysid(stmt->newowner); + AclId newowner = get_usesysid(stmt->newowner); switch (stmt->objectType) { diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 45aff53796d..ce7db272113 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.75 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.76 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -42,9 +42,9 @@ /* Data structure for Algorithm S from Knuth 3.4.2 */ typedef struct { - BlockNumber N; /* number of blocks, known in advance */ + BlockNumber N; /* number of blocks, known in advance */ int n; /* desired sample size */ - BlockNumber t; /* current block number */ + BlockNumber t; /* current block number */ int m; /* blocks selected so far */ } BlockSamplerData; typedef BlockSamplerData *BlockSampler; @@ -68,13 +68,13 @@ static MemoryContext anl_context = NULL; static void BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, - int samplesize); + int samplesize); static bool BlockSampler_HasMore(BlockSampler bs); static BlockNumber BlockSampler_Next(BlockSampler bs); static void compute_index_stats(Relation onerel, double totalrows, - AnlIndexData *indexdata, int nindexes, - HeapTuple *rows, int numrows, - MemoryContext col_context); + AnlIndexData *indexdata, int nindexes, + HeapTuple *rows, int numrows, + MemoryContext col_context); static VacAttrStats *examine_attribute(Relation onerel, int attnum); static int acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, double *totalrows); @@ -157,9 +157,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) } /* - * Check that it's a plain table; we used to do this in - * get_rel_oids() but seems safer to check after we've locked the - * relation. + * Check that it's a plain table; we used to do this in get_rel_oids() + * but seems safer to check after we've locked the relation. */ if (onerel->rd_rel->relkind != RELKIND_RELATION) { @@ -239,9 +238,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) } /* - * Open all indexes of the relation, and see if there are any analyzable - * columns in the indexes. We do not analyze index columns if there was - * an explicit column list in the ANALYZE command, however. + * Open all indexes of the relation, and see if there are any + * analyzable columns in the indexes. We do not analyze index columns + * if there was an explicit column list in the ANALYZE command, + * however. */ vac_open_indexes(onerel, &nindexes, &Irel); hasindex = (nindexes > 0); @@ -253,10 +253,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) for (ind = 0; ind < nindexes; ind++) { AnlIndexData *thisdata = &indexdata[ind]; - IndexInfo *indexInfo; + IndexInfo *indexInfo; thisdata->indexInfo = indexInfo = BuildIndexInfo(Irel[ind]); - thisdata->tupleFract = 1.0; /* fix later if partial */ + thisdata->tupleFract = 1.0; /* fix later if partial */ if (indexInfo->ii_Expressions != NIL && vacstmt->va_cols == NIL) { ListCell *indexpr_item = list_head(indexInfo->ii_Expressions); @@ -273,25 +273,26 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) /* Found an index expression */ Node *indexkey; - if (indexpr_item == NULL) /* shouldn't happen */ + if (indexpr_item == NULL) /* shouldn't happen */ elog(ERROR, "too few entries in indexprs list"); indexkey = (Node *) lfirst(indexpr_item); indexpr_item = lnext(indexpr_item); /* - * Can't analyze if the opclass uses a storage type - * different from the expression result type. We'd - * get confused because the type shown in pg_attribute - * for the index column doesn't match what we are - * getting from the expression. Perhaps this can be - * fixed someday, but for now, punt. + * Can't analyze if the opclass uses a storage + * type different from the expression result type. + * We'd get confused because the type shown in + * pg_attribute for the index column doesn't match + * what we are getting from the expression. + * Perhaps this can be fixed someday, but for now, + * punt. */ if (exprType(indexkey) != Irel[ind]->rd_att->attrs[i]->atttypid) continue; thisdata->vacattrstats[tcnt] = - examine_attribute(Irel[ind], i+1); + examine_attribute(Irel[ind], i + 1); if (thisdata->vacattrstats[tcnt] != NULL) { tcnt++; @@ -401,10 +402,10 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt) /* * If we are running a standalone ANALYZE, update pages/tuples stats - * in pg_class. We know the accurate page count from the smgr, - * but only an approximate number of tuples; therefore, if we are part - * of VACUUM ANALYZE do *not* overwrite the accurate count already - * inserted by VACUUM. The same consideration applies to indexes. + * in pg_class. We know the accurate page count from the smgr, but + * only an approximate number of tuples; therefore, if we are part of + * VACUUM ANALYZE do *not* overwrite the accurate count already + * inserted by VACUUM. The same consideration applies to indexes. */ if (!vacstmt->vacuum) { @@ -446,7 +447,7 @@ compute_index_stats(Relation onerel, double totalrows, MemoryContext col_context) { MemoryContext ind_context, - old_context; + old_context; TupleDesc heapDescriptor; Datum attdata[INDEX_MAX_KEYS]; char nulls[INDEX_MAX_KEYS]; @@ -465,7 +466,7 @@ compute_index_stats(Relation onerel, double totalrows, for (ind = 0; ind < nindexes; ind++) { AnlIndexData *thisdata = &indexdata[ind]; - IndexInfo *indexInfo = thisdata->indexInfo; + IndexInfo *indexInfo = thisdata->indexInfo; int attr_cnt = thisdata->attr_cnt; TupleTable tupleTable; TupleTableSlot *slot; @@ -526,8 +527,9 @@ compute_index_stats(Relation onerel, double totalrows, if (attr_cnt > 0) { /* - * Evaluate the index row to compute expression values. - * We could do this by hand, but FormIndexDatum is convenient. + * Evaluate the index row to compute expression values. We + * could do this by hand, but FormIndexDatum is + * convenient. */ FormIndexDatum(indexInfo, heapTuple, @@ -535,16 +537,17 @@ compute_index_stats(Relation onerel, double totalrows, estate, attdata, nulls); + /* * Save just the columns we care about. */ for (i = 0; i < attr_cnt; i++) { VacAttrStats *stats = thisdata->vacattrstats[i]; - int attnum = stats->attr->attnum; + int attnum = stats->attr->attnum; - exprvals[tcnt] = attdata[attnum-1]; - exprnulls[tcnt] = (nulls[attnum-1] == 'n'); + exprvals[tcnt] = attdata[attnum - 1]; + exprnulls[tcnt] = (nulls[attnum - 1] == 'n'); tcnt++; } } @@ -552,7 +555,8 @@ compute_index_stats(Relation onerel, double totalrows, /* * Having counted the number of rows that pass the predicate in - * the sample, we can estimate the total number of rows in the index. + * the sample, we can estimate the total number of rows in the + * index. */ thisdata->tupleFract = (double) numindexrows / (double) numrows; totalindexrows = ceil(thisdata->tupleFract * totalrows); @@ -630,7 +634,7 @@ examine_attribute(Relation onerel, int attnum) stats->tupattnum = attnum; /* - * Call the type-specific typanalyze function. If none is specified, + * Call the type-specific typanalyze function. If none is specified, * use std_typanalyze(). */ if (OidIsValid(stats->attrtype->typanalyze)) @@ -667,10 +671,10 @@ static void BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize) { bs->N = nblocks; /* measured table size */ + /* - * If we decide to reduce samplesize for tables that have less or - * not much more than samplesize blocks, here is the place to do - * it. + * If we decide to reduce samplesize for tables that have less or not + * much more than samplesize blocks, here is the place to do it. */ bs->n = samplesize; bs->t = 0; /* blocks scanned so far */ @@ -686,10 +690,10 @@ BlockSampler_HasMore(BlockSampler bs) static BlockNumber BlockSampler_Next(BlockSampler bs) { - BlockNumber K = bs->N - bs->t; /* remaining blocks */ + BlockNumber K = bs->N - bs->t; /* remaining blocks */ int k = bs->n - bs->m; /* blocks still to sample */ - double p; /* probability to skip block */ - double V; /* random */ + double p; /* probability to skip block */ + double V; /* random */ Assert(BlockSampler_HasMore(bs)); /* hence K > 0 and k > 0 */ @@ -706,7 +710,7 @@ BlockSampler_Next(BlockSampler bs) * If we are to skip, we should advance t (hence decrease K), and * repeat the same probabilistic test for the next block. The naive * implementation thus requires a random_fract() call for each block - * number. But we can reduce this to one random_fract() call per + * number. But we can reduce this to one random_fract() call per * selected block, by noting that each time the while-test succeeds, * we can reinterpret V as a uniform random number in the range 0 to p. * Therefore, instead of choosing a new V, we just adjust p to be @@ -770,11 +774,11 @@ static int acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, double *totalrows) { - int numrows = 0; /* # rows collected */ - double liverows = 0; /* # rows seen */ + int numrows = 0; /* # rows collected */ + double liverows = 0; /* # rows seen */ double deadrows = 0; - double rowstoskip = -1; /* -1 means not set yet */ - BlockNumber totalblocks; + double rowstoskip = -1; /* -1 means not set yet */ + BlockNumber totalblocks; BlockSamplerData bs; double rstate; @@ -826,14 +830,13 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, { /* * The first targrows live rows are simply copied into the - * reservoir. - * Then we start replacing tuples in the sample until - * we reach the end of the relation. This algorithm is - * from Jeff Vitter's paper (see full citation below). + * reservoir. Then we start replacing tuples in the sample + * until we reach the end of the relation. This algorithm + * is from Jeff Vitter's paper (see full citation below). * It works by repeatedly computing the number of tuples * to skip before selecting a tuple, which replaces a - * randomly chosen element of the reservoir (current - * set of tuples). At all times the reservoir is a true + * randomly chosen element of the reservoir (current set + * of tuples). At all times the reservoir is a true * random sample of the tuples we've passed over so far, * so when we fall off the end of the relation we're done. */ @@ -842,10 +845,10 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, else { /* - * t in Vitter's paper is the number of records already - * processed. If we need to compute a new S value, we - * must use the not-yet-incremented value of liverows - * as t. + * t in Vitter's paper is the number of records + * already processed. If we need to compute a new S + * value, we must use the not-yet-incremented value of + * liverows as t. */ if (rowstoskip < 0) rowstoskip = get_next_S(liverows, targrows, &rstate); @@ -853,10 +856,10 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, if (rowstoskip <= 0) { /* - * Found a suitable tuple, so save it, - * replacing one old tuple at random + * Found a suitable tuple, so save it, replacing + * one old tuple at random */ - int k = (int) (targrows * random_fract()); + int k = (int) (targrows * random_fract()); Assert(k >= 0 && k < targrows); heap_freetuple(rows[k]); @@ -874,9 +877,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, else { /* - * Count dead rows, but not empty slots. This information is - * currently not used, but it seems likely we'll want it - * someday. + * Count dead rows, but not empty slots. This information + * is currently not used, but it seems likely we'll want + * it someday. */ if (targtuple.t_data != NULL) deadrows += 1; @@ -888,12 +891,12 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, } /* - * If we didn't find as many tuples as we wanted then we're done. - * No sort is needed, since they're already in order. + * If we didn't find as many tuples as we wanted then we're done. No + * sort is needed, since they're already in order. * * Otherwise we need to sort the collected tuples by position - * (itempointer). It's not worth worrying about corner cases - * where the tuples are already sorted. + * (itempointer). It's not worth worrying about corner cases where + * the tuples are already sorted. */ if (numrows == targrows) qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows); @@ -907,7 +910,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows, *totalrows = 0.0; /* - * Emit some interesting relation info + * Emit some interesting relation info */ ereport(elevel, (errmsg("\"%s\": scanned %d of %u pages, " @@ -1128,10 +1131,10 @@ update_attstats(Oid relid, int natts, VacAttrStats **vacattrstats) i = 0; values[i++] = ObjectIdGetDatum(relid); /* starelid */ - values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */ - values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */ + values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */ + values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */ values[i++] = Int32GetDatum(stats->stawidth); /* stawidth */ - values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */ + values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */ for (k = 0; k < STATISTIC_NUM_SLOTS; k++) { values[i++] = Int16GetDatum(stats->stakind[k]); /* stakindN */ @@ -1305,13 +1308,13 @@ static int *datumCmpTupnoLink; static void compute_minimal_stats(VacAttrStatsP stats, - AnalyzeAttrFetchFunc fetchfunc, - int samplerows, - double totalrows); + AnalyzeAttrFetchFunc fetchfunc, + int samplerows, + double totalrows); static void compute_scalar_stats(VacAttrStatsP stats, - AnalyzeAttrFetchFunc fetchfunc, - int samplerows, - double totalrows); + AnalyzeAttrFetchFunc fetchfunc, + int samplerows, + double totalrows); static int compare_scalars(const void *a, const void *b); static int compare_mcvs(const void *a, const void *b); diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c index a0cb1cc393a..f9d257d1a11 100644 --- a/src/backend/commands/async.c +++ b/src/backend/commands/async.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.114 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.115 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -106,7 +106,8 @@ */ static List *pendingNotifies = NIL; -static List *upperPendingNotifies = NIL; /* list of upper-xact lists */ +static List *upperPendingNotifies = NIL; /* list of upper-xact + * lists */ /* * State for inbound notifies consists of two flags: one saying whether @@ -524,25 +525,27 @@ AtCommit_Notify(void) rTuple = heap_modifytuple(lTuple, lRel, value, nulls, repl); + /* * We cannot use simple_heap_update here because the tuple * could have been modified by an uncommitted transaction; * specifically, since UNLISTEN releases exclusive lock on - * the table before commit, the other guy could already have - * tried to unlisten. There are no other cases where we - * should be able to see an uncommitted update or delete. - * Therefore, our response to a HeapTupleBeingUpdated result - * is just to ignore it. We do *not* wait for the other - * guy to commit --- that would risk deadlock, and we don't - * want to block while holding the table lock anyway for - * performance reasons. We also ignore HeapTupleUpdated, - * which could occur if the other guy commits between our - * heap_getnext and heap_update calls. + * the table before commit, the other guy could already + * have tried to unlisten. There are no other cases where + * we should be able to see an uncommitted update or + * delete. Therefore, our response to a + * HeapTupleBeingUpdated result is just to ignore it. We + * do *not* wait for the other guy to commit --- that + * would risk deadlock, and we don't want to block while + * holding the table lock anyway for performance reasons. + * We also ignore HeapTupleUpdated, which could occur if + * the other guy commits between our heap_getnext and + * heap_update calls. */ result = heap_update(lRel, &lTuple->t_self, rTuple, &ctid, GetCurrentCommandId(), SnapshotAny, - false /* no wait for commit */); + false /* no wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: @@ -620,7 +623,7 @@ AtAbort_Notify(void) void AtSubStart_Notify(void) { - MemoryContext old_cxt; + MemoryContext old_cxt; /* Keep the list-of-lists in TopTransactionContext for simplicity */ old_cxt = MemoryContextSwitchTo(TopTransactionContext); @@ -640,13 +643,14 @@ AtSubStart_Notify(void) void AtSubCommit_Notify(void) { - List *parentPendingNotifies; + List *parentPendingNotifies; parentPendingNotifies = (List *) linitial(upperPendingNotifies); upperPendingNotifies = list_delete_first(upperPendingNotifies); /* - * We could try to eliminate duplicates here, but it seems not worthwhile. + * We could try to eliminate duplicates here, but it seems not + * worthwhile. */ pendingNotifies = list_concat(parentPendingNotifies, pendingNotifies); } @@ -836,7 +840,7 @@ EnableNotifyInterrupt(void) bool DisableNotifyInterrupt(void) { - bool result = (notifyInterruptEnabled != 0); + bool result = (notifyInterruptEnabled != 0); notifyInterruptEnabled = 0; @@ -914,11 +918,12 @@ ProcessIncomingNotify(void) relname, (int) sourcePID); NotifyMyFrontEnd(relname, sourcePID); + /* * Rewrite the tuple with 0 in notification column. * - * simple_heap_update is safe here because no one else would - * have tried to UNLISTEN us, so there can be no uncommitted + * simple_heap_update is safe here because no one else would have + * tried to UNLISTEN us, so there can be no uncommitted * changes. */ rTuple = heap_modifytuple(lTuple, lRel, value, nulls, repl); diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 7b618db0727..0bce21ffb96 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -11,7 +11,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.128 2004/08/29 04:12:29 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.129 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -286,8 +286,8 @@ cluster_rel(RelToCluster *rvtc, bool recheck) /* * We grab exclusive access to the target rel and index for the * duration of the transaction. (This is redundant for the single- - * transaction case, since cluster() already did it.) The index - * lock is taken inside check_index_is_clusterable. + * transaction case, since cluster() already did it.) The index lock + * is taken inside check_index_is_clusterable. */ OldHeap = heap_open(rvtc->tableOid, AccessExclusiveLock); @@ -391,7 +391,7 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid) if (isOtherTempNamespace(RelationGetNamespace(OldHeap))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot cluster temporary tables of other sessions"))); + errmsg("cannot cluster temporary tables of other sessions"))); /* Drop relcache refcnt on OldIndex, but keep lock */ index_close(OldIndex); @@ -438,7 +438,7 @@ mark_index_clustered(Relation rel, Oid indexOid) foreach(index, RelationGetIndexList(rel)) { - Oid thisIndexOid = lfirst_oid(index); + Oid thisIndexOid = lfirst_oid(index); indexTuple = SearchSysCacheCopy(INDEXRELID, ObjectIdGetDatum(thisIndexOid), @@ -540,8 +540,8 @@ rebuild_relation(Relation OldHeap, Oid indexOid) /* performDeletion does CommandCounterIncrement at end */ /* - * Rebuild each index on the relation (but not the toast table, - * which is all-new at this point). We do not need + * Rebuild each index on the relation (but not the toast table, which + * is all-new at this point). We do not need * CommandCounterIncrement() because reindex_relation does it. */ reindex_relation(tableOid, false); @@ -569,7 +569,7 @@ make_new_heap(Oid OIDOldHeap, const char *NewName, Oid NewTableSpace) OIDNewHeap = heap_create_with_catalog(NewName, RelationGetNamespace(OldHeap), - NewTableSpace, + NewTableSpace, tupdesc, OldHeap->rd_rel->relkind, OldHeap->rd_rel->relisshared, @@ -745,8 +745,8 @@ swap_relation_files(Oid r1, Oid r2) * their new owning relations. Otherwise the wrong one will get * dropped ... * - * NOTE: it is possible that only one table has a toast table; this - * can happen in CLUSTER if there were dropped columns in the old table, + * NOTE: it is possible that only one table has a toast table; this can + * happen in CLUSTER if there were dropped columns in the old table, * and in ALTER TABLE when adding or changing type of columns. * * NOTE: at present, a TOAST table's only dependency is the one on its @@ -802,15 +802,15 @@ swap_relation_files(Oid r1, Oid r2) /* * Blow away the old relcache entries now. We need this kluge because * relcache.c keeps a link to the smgr relation for the physical file, - * and that will be out of date as soon as we do CommandCounterIncrement. - * Whichever of the rels is the second to be cleared during cache - * invalidation will have a dangling reference to an already-deleted smgr - * relation. Rather than trying to avoid this by ordering operations - * just so, it's easiest to not have the relcache entries there at all. - * (Fortunately, since one of the entries is local in our transaction, - * it's sufficient to clear out our own relcache this way; the problem - * cannot arise for other backends when they see our update on the - * non-local relation.) + * and that will be out of date as soon as we do + * CommandCounterIncrement. Whichever of the rels is the second to be + * cleared during cache invalidation will have a dangling reference to + * an already-deleted smgr relation. Rather than trying to avoid this + * by ordering operations just so, it's easiest to not have the + * relcache entries there at all. (Fortunately, since one of the + * entries is local in our transaction, it's sufficient to clear out + * our own relcache this way; the problem cannot arise for other + * backends when they see our update on the non-local relation.) */ RelationForgetRelation(r1); RelationForgetRelation(r2); diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c index 8e3e3a8cafe..8a1b2e0c4c8 100644 --- a/src/backend/commands/comment.c +++ b/src/backend/commands/comment.c @@ -7,7 +7,7 @@ * Copyright (c) 1996-2004, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.78 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.79 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -123,10 +123,10 @@ CommentObject(CommentStmt *stmt) CommentOpClass(stmt->objname, stmt->objargs, stmt->comment); break; case OBJECT_LARGEOBJECT: - CommentLargeObject(stmt->objname, stmt->comment); + CommentLargeObject(stmt->objname, stmt->comment); break; case OBJECT_CAST: - CommentCast(stmt->objname, stmt->objargs, stmt->comment); + CommentCast(stmt->objname, stmt->objargs, stmt->comment); break; default: elog(ERROR, "unrecognized object type: %d", @@ -401,8 +401,8 @@ CommentAttribute(List *qualname, char *comment) if (attnum == InvalidAttrNumber) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - attrname, RelationGetRelationName(relation)))); + errmsg("column \"%s\" of relation \"%s\" does not exist", + attrname, RelationGetRelationName(relation)))); /* Create the comment using the relation's oid */ @@ -462,7 +462,8 @@ CommentDatabase(List *qualname, char *comment) /* Only allow comments on the current database */ if (oid != MyDatabaseId) { - ereport(WARNING, /* throw just a warning so pg_restore doesn't fail */ + ereport(WARNING, /* throw just a warning so pg_restore + * doesn't fail */ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("database comments may only be applied to the current database"))); return; @@ -586,7 +587,7 @@ CommentRule(List *qualname, char *comment) ForwardScanDirection))) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("there are multiple rules named \"%s\"", rulename), + errmsg("there are multiple rules named \"%s\"", rulename), errhint("Specify a relation name as well as a rule name."))); heap_endscan(scanDesc); @@ -615,8 +616,8 @@ CommentRule(List *qualname, char *comment) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("rule \"%s\" for relation \"%s\" does not exist", - rulename, RelationGetRelationName(relation)))); + errmsg("rule \"%s\" for relation \"%s\" does not exist", + rulename, RelationGetRelationName(relation)))); Assert(reloid == ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class); ruleoid = HeapTupleGetOid(tuple); ReleaseSysCache(tuple); @@ -832,8 +833,8 @@ CommentTrigger(List *qualname, char *comment) if (!HeapTupleIsValid(triggertuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("trigger \"%s\" for table \"%s\" does not exist", - trigname, RelationGetRelationName(relation)))); + errmsg("trigger \"%s\" for table \"%s\" does not exist", + trigname, RelationGetRelationName(relation)))); oid = HeapTupleGetOid(triggertuple); @@ -924,8 +925,8 @@ CommentConstraint(List *qualname, char *comment) if (!OidIsValid(conOid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("constraint \"%s\" for table \"%s\" does not exist", - conName, RelationGetRelationName(relation)))); + errmsg("constraint \"%s\" for table \"%s\" does not exist", + conName, RelationGetRelationName(relation)))); /* Create the comment with the pg_constraint oid */ CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment); @@ -1003,7 +1004,7 @@ CommentLanguage(List *qualname, char *comment) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to comment on procedural language"))); + errmsg("must be superuser to comment on procedural language"))); /* pg_language doesn't have a hard-coded OID, so must look it up */ classoid = get_system_catalog_relid(LanguageRelationName); @@ -1084,7 +1085,7 @@ CommentOpClass(List *qualname, List *arguments, char *comment) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("operator class \"%s\" does not exist for access method \"%s\"", - NameListToString(qualname), amname))); + NameListToString(qualname), amname))); opcID = HeapTupleGetOid(tuple); @@ -1116,7 +1117,7 @@ CommentLargeObject(List *qualname, char *comment) { Oid loid; Oid classoid; - Node *node; + Node *node; Assert(list_length(qualname) == 1); node = (Node *) linitial(qualname); @@ -1127,19 +1128,20 @@ CommentLargeObject(List *qualname, char *comment) loid = intVal(node); break; case T_Float: + /* * Values too large for int4 will be represented as Float - * constants by the lexer. Accept these if they are valid - * OID strings. + * constants by the lexer. Accept these if they are valid OID + * strings. */ loid = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(strVal(node)))); + CStringGetDatum(strVal(node)))); break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); /* keep compiler quiet */ - loid = InvalidOid; + loid = InvalidOid; } /* check that the large object exists */ @@ -1152,7 +1154,7 @@ CommentLargeObject(List *qualname, char *comment) classoid = get_system_catalog_relid(LargeObjectRelationName); /* Call CreateComments() to create/drop the comments */ - CreateComments(loid, classoid, 0, comment); + CreateComments(loid, classoid, 0, comment); } /* @@ -1182,7 +1184,7 @@ CommentCast(List *qualname, List *arguments, char *comment) Assert(list_length(arguments) == 1); targettype = (TypeName *) linitial(arguments); Assert(IsA(targettype, TypeName)); - + sourcetypeid = typenameTypeId(sourcetype); if (!OidIsValid(sourcetypeid)) ereport(ERROR, @@ -1210,7 +1212,7 @@ CommentCast(List *qualname, List *arguments, char *comment) /* Get the OID of the cast */ castOid = HeapTupleGetOid(tuple); - + /* Permission check */ if (!pg_type_ownercheck(sourcetypeid, GetUserId()) && !pg_type_ownercheck(targettypeid, GetUserId())) @@ -1226,5 +1228,5 @@ CommentCast(List *qualname, List *arguments, char *comment) classoid = get_system_catalog_relid(CastRelationName); /* Call CreateComments() to create/drop the comments */ - CreateComments(castOid, classoid, 0, comment); + CreateComments(castOid, classoid, 0, comment); } diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c index 44b2ef266f7..751e0b9152e 100644 --- a/src/backend/commands/conversioncmds.c +++ b/src/backend/commands/conversioncmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.14 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.15 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -181,7 +181,7 @@ AlterConversionOwner(List *name, AclId newOwnerSysId) Oid conversionOid; HeapTuple tup; Relation rel; - Form_pg_conversion convForm; + Form_pg_conversion convForm; rel = heap_openr(ConversionRelationName, RowExclusiveLock); @@ -200,7 +200,7 @@ AlterConversionOwner(List *name, AclId newOwnerSysId) convForm = (Form_pg_conversion) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -212,7 +212,10 @@ AlterConversionOwner(List *name, AclId newOwnerSysId) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on tup because it's a copy */ + /* + * Modify the owner --- okay to scribble on tup because it's a + * copy + */ convForm->conowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index d060785d8d6..5793c0b2bbb 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.229 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.230 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -133,22 +133,22 @@ static void DoCopyTo(Relation rel, List *attnumlist, bool binary, bool oids, char *delim, char *null_print, bool csv_mode, char *quote, char *escape, List *force_quote_atts, bool fe_copy); static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, - char *delim, char *null_print, bool csv_mode, char *quote, char *escape, + char *delim, char *null_print, bool csv_mode, char *quote, char *escape, List *force_quote_atts); static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, - char *delim, char *null_print, bool csv_mode, char *quote, char *escape, + char *delim, char *null_print, bool csv_mode, char *quote, char *escape, List *force_notnull_atts); static bool CopyReadLine(void); static char *CopyReadAttribute(const char *delim, const char *null_print, - CopyReadResult *result, bool *isnull); + CopyReadResult *result, bool *isnull); static char *CopyReadAttributeCSV(const char *delim, const char *null_print, - char *quote, char *escape, - CopyReadResult *result, bool *isnull); + char *quote, char *escape, + CopyReadResult *result, bool *isnull); static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo, Oid typioparam, bool *isnull); static void CopyAttributeOut(char *string, char *delim); static void CopyAttributeOutCSV(char *string, char *delim, char *quote, - char *escape, bool force_quote); + char *escape, bool force_quote); static List *CopyGetAttnums(Relation rel, List *attnamelist); static void limit_printout_length(StringInfo buf); @@ -413,7 +413,7 @@ CopyGetData(void *databuf, int datasize) /* Try to receive another message */ int mtype; - readmessage: + readmessage: mtype = pq_getbyte(); if (mtype == EOF) ereport(ERROR, @@ -439,11 +439,12 @@ CopyGetData(void *databuf, int datasize) break; case 'H': /* Flush */ case 'S': /* Sync */ + /* * Ignore Flush/Sync for the convenience of * client libraries (such as libpq) that may - * send those without noticing that the command - * they just sent was COPY. + * send those without noticing that the + * command they just sent was COPY. */ goto readmessage; default: @@ -693,7 +694,7 @@ DoCopy(const CopyStmt *stmt) bool fe_copy = false; bool binary = false; bool oids = false; - bool csv_mode = false; + bool csv_mode = false; char *delim = NULL; char *quote = NULL; char *escape = NULL; @@ -773,7 +774,7 @@ DoCopy(const CopyStmt *stmt) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"))); - force_quote = (List *)defel->arg; + force_quote = (List *) defel->arg; } else if (strcmp(defel->defname, "force_notnull") == 0) { @@ -781,7 +782,7 @@ DoCopy(const CopyStmt *stmt) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("conflicting or redundant options"))); - force_notnull = (List *)defel->arg; + force_notnull = (List *) defel->arg; } else elog(ERROR, "option \"%s\" not recognized", @@ -806,7 +807,7 @@ DoCopy(const CopyStmt *stmt) /* Set defaults */ if (!delim) delim = csv_mode ? "," : "\t"; - + if (!null_print) null_print = csv_mode ? "" : "\\N"; @@ -817,7 +818,7 @@ DoCopy(const CopyStmt *stmt) if (!escape) escape = quote; } - + /* * Only single-character delimiter strings are supported. */ @@ -862,7 +863,7 @@ DoCopy(const CopyStmt *stmt) if (force_quote != NIL && is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force quote only available using COPY TO"))); + errmsg("COPY force quote only available using COPY TO"))); /* * Check force_notnull @@ -870,11 +871,11 @@ DoCopy(const CopyStmt *stmt) if (!csv_mode && force_notnull != NIL) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force not null available only in CSV mode"))); + errmsg("COPY force not null available only in CSV mode"))); if (force_notnull != NIL && !is_from) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("COPY force not null only available using COPY FROM"))); + errmsg("COPY force not null only available using COPY FROM"))); /* * Don't allow the delimiter to appear in the null string. @@ -948,11 +949,11 @@ DoCopy(const CopyStmt *stmt) if (!list_member_int(attnumlist, attnum)) ereport(ERROR, (errcode(ERRCODE_INVALID_COLUMN_REFERENCE), - errmsg("FORCE QUOTE column \"%s\" not referenced by COPY", - NameStr(attr[attnum - 1]->attname)))); + errmsg("FORCE QUOTE column \"%s\" not referenced by COPY", + NameStr(attr[attnum - 1]->attname)))); } } - + /* * Check that FORCE NOT NULL references valid COPY columns */ @@ -975,7 +976,7 @@ DoCopy(const CopyStmt *stmt) NameStr(attr[attnum - 1]->attname)))); } } - + /* * Set up variables to avoid per-attribute overhead. */ @@ -1152,9 +1153,9 @@ DoCopyTo(Relation rel, List *attnumlist, bool binary, bool oids, PG_CATCH(); { /* - * Make sure we turn off old-style COPY OUT mode upon error. - * It is okay to do this in all cases, since it does nothing - * if the mode is not on. + * Make sure we turn off old-style COPY OUT mode upon error. It is + * okay to do this in all cases, since it does nothing if the mode + * is not on. */ pq_endcopyout(true); PG_RE_THROW(); @@ -1202,10 +1203,10 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, { int attnum = lfirst_int(cur); Oid out_func_oid; - + if (binary) getTypeBinaryOutputInfo(attr[attnum - 1]->atttypid, - &out_func_oid, &typioparams[attnum - 1], + &out_func_oid, &typioparams[attnum - 1], &isvarlena[attnum - 1]); else getTypeOutputInfo(attr[attnum - 1]->atttypid, @@ -1266,6 +1267,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, while ((tuple = heap_getnext(scandesc, ForwardScanDirection)) != NULL) { bool need_delim = false; + CHECK_FOR_INTERRUPTS(); MemoryContextReset(mycontext); @@ -1325,13 +1327,13 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, { string = DatumGetCString(FunctionCall3(&out_functions[attnum - 1], value, - ObjectIdGetDatum(typioparams[attnum - 1]), + ObjectIdGetDatum(typioparams[attnum - 1]), Int32GetDatum(attr[attnum - 1]->atttypmod))); if (csv_mode) { CopyAttributeOutCSV(string, delim, quote, escape, - (strcmp(string, null_print) == 0 || - force_quote[attnum - 1])); + (strcmp(string, null_print) == 0 || + force_quote[attnum - 1])); } else CopyAttributeOut(string, delim); @@ -1343,7 +1345,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids, outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1], value, - ObjectIdGetDatum(typioparams[attnum - 1]))); + ObjectIdGetDatum(typioparams[attnum - 1]))); /* We assume the result will not have been toasted */ CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ); CopySendData(VARDATA(outputbytes), @@ -1444,7 +1446,7 @@ limit_printout_length(StringInfo buf) { #define MAX_COPY_DATA_DISPLAY 100 - int len; + int len; /* Fast path if definitely okay */ if (buf->len <= MAX_COPY_DATA_DISPLAY) @@ -1551,7 +1553,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, /* Fetch the input function and typioparam info */ if (binary) getTypeBinaryInputInfo(attr[attnum - 1]->atttypid, - &in_func_oid, &typioparams[attnum - 1]); + &in_func_oid, &typioparams[attnum - 1]); else getTypeInputInfo(attr[attnum - 1]->atttypid, &in_func_oid, &typioparams[attnum - 1]); @@ -1561,7 +1563,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, force_notnull[attnum - 1] = true; else force_notnull[attnum - 1] = false; - + /* Get default info if needed */ if (!list_member_int(attnumlist, attnum)) { @@ -1603,7 +1605,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, COERCE_IMPLICIT_CAST, false); constraintexprs[attnum - 1] = ExecPrepareExpr((Expr *) node, - estate); + estate); hasConstraints = true; } } @@ -1718,10 +1720,10 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, done = CopyReadLine(); /* - * EOF at start of line means we're done. If we see EOF - * after some characters, we act as though it was newline - * followed by EOF, ie, process the line and then exit loop - * on next iteration. + * EOF at start of line means we're done. If we see EOF after + * some characters, we act as though it was newline followed + * by EOF, ie, process the line and then exit loop on next + * iteration. */ if (done && line_buf.len == 0) break; @@ -1770,29 +1772,29 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, if (csv_mode) { string = CopyReadAttributeCSV(delim, null_print, quote, - escape, &result, &isnull); + escape, &result, &isnull); if (result == UNTERMINATED_FIELD) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), - errmsg("unterminated CSV quoted field"))); + errmsg("unterminated CSV quoted field"))); } else - string = CopyReadAttribute(delim, null_print, + string = CopyReadAttribute(delim, null_print, &result, &isnull); if (csv_mode && isnull && force_notnull[m]) { - string = null_print; /* set to NULL string */ + string = null_print; /* set to NULL string */ isnull = false; } - /* we read an SQL NULL, no need to do anything */ + /* we read an SQL NULL, no need to do anything */ if (!isnull) { copy_attname = NameStr(attr[m]->attname); values[m] = FunctionCall3(&in_functions[m], CStringGetDatum(string), - ObjectIdGetDatum(typioparams[m]), + ObjectIdGetDatum(typioparams[m]), Int32GetDatum(attr[m]->atttypmod)); nulls[m] = ' '; copy_attname = NULL; @@ -1809,7 +1811,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, if (result == NORMAL_ATTR && line_buf.len != 0) ereport(ERROR, (errcode(ERRCODE_BAD_COPY_FILE_FORMAT), - errmsg("extra data after last expected column"))); + errmsg("extra data after last expected column"))); } else { @@ -1835,8 +1837,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids, copy_attname = "oid"; loaded_oid = DatumGetObjectId(CopyReadBinaryAttribute(0, - &oid_in_function, - oid_typioparam, + &oid_in_function, + oid_typioparam, &isnull)); if (isnull || loaded_oid == InvalidOid) ereport(ERROR, @@ -2022,14 +2024,14 @@ CopyReadLine(void) result = false; /* - * In this loop we only care for detecting newlines (\r and/or \n) - * and the end-of-copy marker (\.). For backwards compatibility - * we allow backslashes to escape newline characters. Backslashes - * other than the end marker get put into the line_buf, since - * CopyReadAttribute does its own escape processing. These four - * characters, and only these four, are assumed the same in frontend - * and backend encodings. We do not assume that second and later bytes - * of a frontend multibyte character couldn't look like ASCII characters. + * In this loop we only care for detecting newlines (\r and/or \n) and + * the end-of-copy marker (\.). For backwards compatibility we allow + * backslashes to escape newline characters. Backslashes other than + * the end marker get put into the line_buf, since CopyReadAttribute + * does its own escape processing. These four characters, and only + * these four, are assumed the same in frontend and backend encodings. + * We do not assume that second and later bytes of a frontend + * multibyte character couldn't look like ASCII characters. */ for (;;) { @@ -2120,9 +2122,9 @@ CopyReadLine(void) errmsg("end-of-copy marker does not match previous newline style"))); /* - * In protocol version 3, we should ignore anything - * after \. up to the protocol end of copy data. (XXX - * maybe better not to treat \. as special?) + * In protocol version 3, we should ignore anything after + * \. up to the protocol end of copy data. (XXX maybe + * better not to treat \. as special?) */ if (copy_dest == COPY_NEW_FE) { @@ -2140,10 +2142,10 @@ CopyReadLine(void) /* * When client encoding != server, must be careful to read the - * extra bytes of a multibyte character exactly, since the encoding - * might not ensure they don't look like ASCII. When the encodings - * are the same, we need not do this, since no server encoding we - * use has ASCII-like following bytes. + * extra bytes of a multibyte character exactly, since the + * encoding might not ensure they don't look like ASCII. When the + * encodings are the same, we need not do this, since no server + * encoding we use has ASCII-like following bytes. */ if (change_encoding) { @@ -2162,7 +2164,7 @@ CopyReadLine(void) if (result) break; /* out of outer loop */ } - } /* end of outer loop */ + } /* end of outer loop */ /* * Done reading the line. Convert it to server encoding. @@ -2170,8 +2172,9 @@ CopyReadLine(void) * Note: set line_buf_converted to true *before* attempting conversion; * this prevents infinite recursion during error reporting should * pg_client_to_server() issue an error, due to copy_in_error_callback - * again attempting the same conversion. We'll end up issuing the message - * without conversion, which is bad but better than nothing ... + * again attempting the same conversion. We'll end up issuing the + * message without conversion, which is bad but better than nothing + * ... */ line_buf_converted = true; @@ -2295,9 +2298,11 @@ CopyReadAttribute(const char *delim, const char *null_print, case 'v': c = '\v'; break; - /* - * in all other cases, take the char after '\' literally - */ + + /* + * in all other cases, take the char after '\' + * literally + */ } } appendStringInfoCharMacro(&attribute_buf, c); @@ -2316,7 +2321,7 @@ CopyReadAttribute(const char *delim, const char *null_print, /* - * Read the value of a single attribute in CSV mode, + * Read the value of a single attribute in CSV mode, * performing de-escaping as needed. Escaping does not follow the normal * PostgreSQL text mode, but instead "standard" (i.e. common) CSV usage. * @@ -2329,7 +2334,7 @@ CopyReadAttribute(const char *delim, const char *null_print, * *result is set to indicate what terminated the read: * NORMAL_ATTR: column delimiter * END_OF_LINE: end of line - * UNTERMINATED_FIELD no quote detected at end of a quoted field + * UNTERMINATED_FIELD no quote detected at end of a quoted field * * In any case, the string read up to the terminator (or end of file) * is returned. @@ -2345,15 +2350,15 @@ static char * CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, char *escape, CopyReadResult *result, bool *isnull) { - char delimc = delim[0]; - char quotec = quote[0]; - char escapec = escape[0]; + char delimc = delim[0]; + char quotec = quote[0]; + char escapec = escape[0]; char c; int start_cursor = line_buf.cursor; int end_cursor = start_cursor; int input_len; - bool in_quote = false; - bool saw_quote = false; + bool in_quote = false; + bool saw_quote = false; /* reset attribute_buf to empty */ attribute_buf.len = 0; @@ -2367,18 +2372,18 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, /* handle multiline quoted fields */ if (in_quote && line_buf.cursor >= line_buf.len) { - bool done; + bool done; - switch(eol_type) + switch (eol_type) { case EOL_NL: - appendStringInfoString(&attribute_buf,"\n"); + appendStringInfoString(&attribute_buf, "\n"); break; case EOL_CR: - appendStringInfoString(&attribute_buf,"\r"); + appendStringInfoString(&attribute_buf, "\r"); break; case EOL_CRNL: - appendStringInfoString(&attribute_buf,"\r\n"); + appendStringInfoString(&attribute_buf, "\r\n"); break; case EOL_UNKNOWN: /* shouldn't happen - just keep going */ @@ -2396,16 +2401,18 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, if (line_buf.cursor >= line_buf.len) break; c = line_buf.data[line_buf.cursor++]; - /* - * unquoted field delimiter + + /* + * unquoted field delimiter */ if (!in_quote && c == delimc) { *result = NORMAL_ATTR; break; } - /* - * start of quoted field (or part of field) + + /* + * start of quoted field (or part of field) */ if (!in_quote && c == quotec) { @@ -2413,18 +2420,20 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, in_quote = true; continue; } - /* + + /* * escape within a quoted field */ if (in_quote && c == escapec) { - /* - * peek at the next char if available, and escape it if it - * is an escape char or a quote char + /* + * peek at the next char if available, and escape it if it is + * an escape char or a quote char */ if (line_buf.cursor <= line_buf.len) { - char nextc = line_buf.data[line_buf.cursor]; + char nextc = line_buf.data[line_buf.cursor]; + if (nextc == escapec || nextc == quotec) { appendStringInfoCharMacro(&attribute_buf, nextc); @@ -2433,10 +2442,11 @@ CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote, } } } + /* - * end of quoted field. - * Must do this test after testing for escape in case quote char - * and escape char are the same (which is the common case). + * end of quoted field. Must do this test after testing for escape + * in case quote char and escape char are the same (which is the + * common case). */ if (in_quote && c == quotec) { @@ -2586,7 +2596,7 @@ CopyAttributeOut(char *server_string, char *delim) } /* - * Send CSV representation of one attribute, with conversion and + * Send CSV representation of one attribute, with conversion and * CSV type escaping */ static void @@ -2596,9 +2606,9 @@ CopyAttributeOutCSV(char *server_string, char *delim, char *quote, char *string; char c; char delimc = delim[0]; - char quotec = quote[0]; - char escapec = escape[0]; - char *test_string; + char quotec = quote[0]; + char escapec = escape[0]; + char *test_string; bool same_encoding; int mblen; int i; @@ -2610,13 +2620,14 @@ CopyAttributeOutCSV(char *server_string, char *delim, char *quote, else string = server_string; - /* have to run through the string twice, - * first time to see if it needs quoting, second to actually send it + /* + * have to run through the string twice, first time to see if it needs + * quoting, second to actually send it */ - for(test_string = string; - !use_quote && (c = *test_string) != '\0'; - test_string += mblen) + for (test_string = string; + !use_quote && (c = *test_string) != '\0'; + test_string += mblen) { if (c == delimc || c == quotec || c == '\n' || c == '\r') use_quote = true; @@ -2695,8 +2706,8 @@ CopyGetAttnums(Relation rel, List *attnamelist) if (list_member_int(attnums, attnum)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" specified more than once", - name))); + errmsg("column \"%s\" specified more than once", + name))); attnums = lappend_int(attnums, attnum); } } diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 055b7be7eb3..f7ef440b02e 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.140 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.141 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -78,7 +78,7 @@ createdb(const CreatedbStmt *stmt) Oid dboid; AclId datdba; ListCell *option; - DefElem *dtablespacename = NULL; + DefElem *dtablespacename = NULL; DefElem *downer = NULL; DefElem *dtemplate = NULL; DefElem *dencoding = NULL; @@ -86,6 +86,7 @@ createdb(const CreatedbStmt *stmt) char *dbowner = NULL; char *dbtemplate = NULL; int encoding = -1; + #ifndef WIN32 char buf[2 * MAXPGPATH + 100]; #endif @@ -224,7 +225,7 @@ createdb(const CreatedbStmt *stmt) &src_vacuumxid, &src_frozenxid, &src_deftablespace)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_DATABASE), - errmsg("template database \"%s\" does not exist", dbtemplate))); + errmsg("template database \"%s\" does not exist", dbtemplate))); /* * Permission check: to copy a DB that's not marked datistemplate, you @@ -265,7 +266,7 @@ createdb(const CreatedbStmt *stmt) if (dtablespacename && dtablespacename->arg) { char *tablespacename; - AclResult aclresult; + AclResult aclresult; tablespacename = strVal(dtablespacename->arg); dst_deftablespace = get_tablespace_oid(tablespacename); @@ -275,11 +276,11 @@ createdb(const CreatedbStmt *stmt) errmsg("tablespace \"%s\" does not exist", tablespacename))); /* check permissions */ - aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(), + aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(), ACL_CREATE); - if (aclresult != ACLCHECK_OK) - aclcheck_error(aclresult, ACL_KIND_TABLESPACE, - tablespacename); + if (aclresult != ACLCHECK_OK) + aclcheck_error(aclresult, ACL_KIND_TABLESPACE, + tablespacename); } else { @@ -308,22 +309,22 @@ createdb(const CreatedbStmt *stmt) closeAllVfds(); /* - * Iterate through all tablespaces of the template database, and - * copy each one to the new database. + * Iterate through all tablespaces of the template database, and copy + * each one to the new database. * - * If we are trying to change the default tablespace of the template, - * we require that the template not have any files in the new default - * tablespace. This avoids the need to merge two subdirectories. - * This could probably be improved later. + * If we are trying to change the default tablespace of the template, we + * require that the template not have any files in the new default + * tablespace. This avoids the need to merge two subdirectories. This + * could probably be improved later. */ rel = heap_openr(TableSpaceRelationName, AccessShareLock); scan = heap_beginscan(rel, SnapshotNow, 0, NULL); while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { - Oid srctablespace = HeapTupleGetOid(tuple); - Oid dsttablespace; - char *srcpath; - char *dstpath; + Oid srctablespace = HeapTupleGetOid(tuple); + Oid dsttablespace; + char *srcpath; + char *dstpath; struct stat st; /* No need to copy global tablespace */ @@ -351,10 +352,11 @@ createdb(const CreatedbStmt *stmt) remove_dbtablespaces(dboid); ereport(ERROR, (errmsg("could not initialize database directory"), - errdetail("Directory \"%s\" already exists.", dstpath))); + errdetail("Directory \"%s\" already exists.", dstpath))); } #ifndef WIN32 + /* * Copy this subdirectory to the new location * @@ -374,7 +376,7 @@ createdb(const CreatedbStmt *stmt) errdetail("Failing system command was: %s", buf), errhint("Look in the postmaster's stderr log for more information."))); } -#else /* WIN32 */ +#else /* WIN32 */ if (copydir(srcpath, dstpath) != 0) { /* copydir should already have given details of its troubles */ @@ -382,7 +384,7 @@ createdb(const CreatedbStmt *stmt) ereport(ERROR, (errmsg("could not initialize database directory"))); } -#endif /* WIN32 */ +#endif /* WIN32 */ } heap_endscan(scan); heap_close(rel, AccessShareLock); @@ -772,7 +774,7 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId) Relation rel; ScanKeyData scankey; SysScanDesc scan; - Form_pg_database datForm; + Form_pg_database datForm; rel = heap_openr(DatabaseRelationName, RowExclusiveLock); ScanKeyInit(&scankey, @@ -789,16 +791,17 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId) datForm = (Form_pg_database) GETSTRUCT(tuple); - /* + /* * If the new owner is the same as the existing owner, consider the - * command to have succeeded. This is to be consistent with other objects. + * command to have succeeded. This is to be consistent with other + * objects. */ if (datForm->datdba != newOwnerSysId) { Datum repl_val[Natts_pg_database]; char repl_null[Natts_pg_database]; char repl_repl[Natts_pg_database]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -821,9 +824,9 @@ AlterDatabaseOwner(const char *dbname, AclId newOwnerSysId) * necessary when the ACL is non-null. */ aclDatum = heap_getattr(tuple, - Anum_pg_database_datacl, - RelationGetDescr(rel), - &isNull); + Anum_pg_database_datacl, + RelationGetDescr(rel), + &isNull); if (!isNull) { newAcl = aclnewowner(DatumGetAclP(aclDatum), @@ -941,16 +944,16 @@ have_createdb_privilege(void) static void remove_dbtablespaces(Oid db_id) { - Relation rel; + Relation rel; HeapScanDesc scan; - HeapTuple tuple; + HeapTuple tuple; rel = heap_openr(TableSpaceRelationName, AccessShareLock); scan = heap_beginscan(rel, SnapshotNow, 0, NULL); while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { - Oid dsttablespace = HeapTupleGetOid(tuple); - char *dstpath; + Oid dsttablespace = HeapTupleGetOid(tuple); + char *dstpath; struct stat st; /* Don't mess with the global tablespace */ @@ -969,9 +972,9 @@ remove_dbtablespaces(Oid db_id) if (!rmtree(dstpath, true)) { ereport(WARNING, - (errmsg("could not remove database directory \"%s\"", - dstpath), - errhint("Look in the postmaster's stderr log for more information."))); + (errmsg("could not remove database directory \"%s\"", + dstpath), + errhint("Look in the postmaster's stderr log for more information."))); } pfree(dstpath); diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c index cc2643d6373..dc2ea2974a7 100644 --- a/src/backend/commands/define.c +++ b/src/backend/commands/define.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.90 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.91 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -126,8 +126,8 @@ bool defGetBoolean(DefElem *def) { /* - * Presently, boolean flags must simply be present or absent. - * Later we could allow 'flag = t', 'flag = f', etc. + * Presently, boolean flags must simply be present or absent. Later we + * could allow 'flag = t', 'flag = f', etc. */ if (def->arg == NULL) return true; @@ -265,7 +265,7 @@ defGetTypeLength(DefElem *def) case T_TypeName: /* cope if grammar chooses to believe "variable" is a typename */ if (pg_strcasecmp(TypeNameToString((TypeName *) def->arg), - "variable") == 0) + "variable") == 0) return -1; /* variable length */ break; case T_List: diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 29b4f30fce7..7ad3596fac6 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994-5, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.123 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.124 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -317,7 +317,7 @@ explain_outNode(StringInfo str, Plan *outer_plan, int indent, ExplainState *es) { - ListCell *l; + ListCell *l; char *pname; int i; diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index 8a139e56012..7bce0b9b9d2 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -3,14 +3,14 @@ * functioncmds.c * * Routines for CREATE and DROP FUNCTION commands and CREATE and DROP - * CAST commands. + * CAST commands. * * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.51 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.52 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * These routines take the parse tree and pick out the @@ -449,14 +449,14 @@ CreateFunction(CreateFunctionStmt *stmt) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("language \"%s\" does not exist", languageName), - (strcmp(languageName, "plperl") == 0 || - strcmp(languageName, "plperlu") == 0 || - strcmp(languageName, "plpgsql") == 0 || - strcmp(languageName, "plpythonu") == 0 || - strcmp(languageName, "pltcl") == 0 || - strcmp(languageName, "pltclu") == 0) ? + (strcmp(languageName, "plperl") == 0 || + strcmp(languageName, "plperlu") == 0 || + strcmp(languageName, "plpgsql") == 0 || + strcmp(languageName, "plpythonu") == 0 || + strcmp(languageName, "pltcl") == 0 || + strcmp(languageName, "pltclu") == 0) ? errhint("You need to use \"createlang\" to load the language into the database.") : 0)); - + languageOid = HeapTupleGetOid(languageTuple); languageStruct = (Form_pg_language) GETSTRUCT(languageTuple); @@ -490,7 +490,7 @@ CreateFunction(CreateFunctionStmt *stmt) &prorettype, &returnsSet); parameterCount = examine_parameter_list(stmt->parameters, languageOid, - parameterTypes, parameterNames); + parameterTypes, parameterNames); compute_attributes_with_style(stmt->withClause, &isStrict, &volatility); @@ -739,8 +739,8 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId) procOid = LookupFuncNameTypeNames(name, argtypes, false); tup = SearchSysCache(PROCOID, - ObjectIdGetDatum(procOid), - 0, 0, 0); + ObjectIdGetDatum(procOid), + 0, 0, 0); if (!HeapTupleIsValid(tup)) /* should not happen */ elog(ERROR, "cache lookup failed for function %u", procOid); procForm = (Form_pg_proc) GETSTRUCT(tup); @@ -750,9 +750,9 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId) (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("\"%s\" is an aggregate function", NameListToString(name)), - errhint("Use ALTER AGGREGATE to change owner of aggregate functions."))); + errhint("Use ALTER AGGREGATE to change owner of aggregate functions."))); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -761,7 +761,7 @@ AlterFunctionOwner(List *name, List *argtypes, AclId newOwnerSysId) Datum repl_val[Natts_pg_proc]; char repl_null[Natts_pg_proc]; char repl_repl[Natts_pg_proc]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -968,7 +968,7 @@ CreateCast(CreateCastStmt *stmt) if (nargs < 1 || nargs > 3) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("cast function must take one to three arguments"))); + errmsg("cast function must take one to three arguments"))); if (procstruct->proargtypes[0] != sourcetypeid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index 04761fac688..6e550e67c68 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.124 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.125 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -47,10 +47,10 @@ /* non-export function prototypes */ static void CheckPredicate(Expr *predicate); static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP, - List *attList, - Oid relId, - char *accessMethodName, Oid accessMethodId, - bool isconstraint); + List *attList, + Oid relId, + char *accessMethodName, Oid accessMethodId, + bool isconstraint); static Oid GetIndexOpClass(List *opclass, Oid attrType, char *accessMethodName, Oid accessMethodId); static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId); @@ -143,7 +143,8 @@ DefineIndex(RangeVar *heapRelation, * Verify we (still) have CREATE rights in the rel's namespace. * (Presumably we did when the rel was created, but maybe not * anymore.) Skip check if caller doesn't want it. Also skip check - * if bootstrapping, since permissions machinery may not be working yet. + * if bootstrapping, since permissions machinery may not be working + * yet. */ if (check_rights && !IsBootstrapProcessingMode()) { @@ -159,7 +160,7 @@ DefineIndex(RangeVar *heapRelation, /* Determine tablespace to use */ if (tableSpaceName) { - AclResult aclresult; + AclResult aclresult; tablespaceId = get_tablespace_oid(tableSpaceName); if (!OidIsValid(tablespaceId)) @@ -173,7 +174,9 @@ DefineIndex(RangeVar *heapRelation, if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_TABLESPACE, tableSpaceName); - } else { + } + else + { /* Use the parent rel's tablespace */ tablespaceId = get_rel_tablespace(relationId); /* Note there is no additional permission check in this path */ @@ -256,9 +259,9 @@ DefineIndex(RangeVar *heapRelation, /* * If ALTER TABLE, check that there isn't already a PRIMARY KEY. - * In CREATE TABLE, we have faith that the parser rejected multiple - * pkey clauses; and CREATE INDEX doesn't have a way to say - * PRIMARY KEY, so it's no problem either. + * In CREATE TABLE, we have faith that the parser rejected + * multiple pkey clauses; and CREATE INDEX doesn't have a way to + * say PRIMARY KEY, so it's no problem either. */ if (is_alter_table && relationHasPrimaryKey(rel)) @@ -270,8 +273,8 @@ DefineIndex(RangeVar *heapRelation, } /* - * Check that all of the attributes in a primary key are marked as not - * null, otherwise attempt to ALTER TABLE .. SET NOT NULL + * Check that all of the attributes in a primary key are marked as + * not null, otherwise attempt to ALTER TABLE .. SET NOT NULL */ cmds = NIL; foreach(keys, attributeList) @@ -294,7 +297,7 @@ DefineIndex(RangeVar *heapRelation, if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull) { /* Add a subcommand to make this one NOT NULL */ - AlterTableCmd *cmd = makeNode(AlterTableCmd); + AlterTableCmd *cmd = makeNode(AlterTableCmd); cmd->subtype = AT_SetNotNull; cmd->name = key->name; @@ -318,15 +321,15 @@ DefineIndex(RangeVar *heapRelation, } /* - * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade - * to child tables? Currently, since the PRIMARY KEY - * itself doesn't cascade, we don't cascade the - * notnull constraint(s) either; but this is pretty debatable. + * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child + * tables? Currently, since the PRIMARY KEY itself doesn't + * cascade, we don't cascade the notnull constraint(s) either; but + * this is pretty debatable. * - * XXX: possible future improvement: when being called from - * ALTER TABLE, it would be more efficient to merge this with - * the outer ALTER TABLE, so as to avoid two scans. But that - * seems to complicate DefineIndex's API unduly. + * XXX: possible future improvement: when being called from ALTER + * TABLE, it would be more efficient to merge this with the outer + * ALTER TABLE, so as to avoid two scans. But that seems to + * complicate DefineIndex's API unduly. */ if (cmds) AlterTableInternal(relationId, cmds, false); @@ -352,15 +355,15 @@ DefineIndex(RangeVar *heapRelation, heap_close(rel, NoLock); /* - * Report index creation if appropriate (delay this till after most - * of the error checks) + * Report index creation if appropriate (delay this till after most of + * the error checks) */ if (isconstraint && !quiet) ereport(NOTICE, (errmsg("%s %s will create implicit index \"%s\" for table \"%s\"", - is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /", + is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /", primary ? "PRIMARY KEY" : "UNIQUE", - indexRelationName, RelationGetRelationName(rel)))); + indexRelationName, RelationGetRelationName(rel)))); index_create(relationId, indexRelationName, indexInfo, accessMethodId, tablespaceId, classObjectId, @@ -450,8 +453,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo, if (isconstraint) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" named in key does not exist", - attribute->name))); + errmsg("column \"%s\" named in key does not exist", + attribute->name))); else ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), @@ -488,11 +491,11 @@ ComputeIndexAttrs(IndexInfo *indexInfo, if (contain_subplans(attribute->expr)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in index expression"))); + errmsg("cannot use subquery in index expression"))); if (contain_agg_clause(attribute->expr)) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), - errmsg("cannot use aggregate function in index expression"))); + errmsg("cannot use aggregate function in index expression"))); /* * A expression using mutable functions is probably wrong, @@ -647,7 +650,7 @@ GetDefaultOpClass(Oid attrType, Oid accessMethodId) * than one exact match, then someone put bogus entries in pg_opclass. * * The initial search is done by namespace.c so that we only consider - * opclasses visible in the current namespace search path. (See also + * opclasses visible in the current namespace search path. (See also * typcache.c, which applies the same logic, but over all opclasses.) */ for (opclass = OpclassGetCandidates(accessMethodId); @@ -962,16 +965,16 @@ ReindexTable(RangeVar *relation, bool force /* currently unused */ ) * separate transaction, so we can release the lock on it right away. */ void -ReindexDatabase(const char *dbname, bool force /* currently unused */, +ReindexDatabase(const char *dbname, bool force /* currently unused */ , bool all) { - Relation relationRelation; + Relation relationRelation; HeapScanDesc scan; - HeapTuple tuple; + HeapTuple tuple; MemoryContext private_context; MemoryContext old; - List *relids = NIL; - ListCell *l; + List *relids = NIL; + ListCell *l; AssertArg(dbname); @@ -1006,7 +1009,7 @@ ReindexDatabase(const char *dbname, bool force /* currently unused */, /* * We always want to reindex pg_class first. This ensures that if * there is any corruption in pg_class' indexes, they will be fixed - * before we process any other tables. This is critical because + * before we process any other tables. This is critical because * reindexing itself will try to update pg_class. */ old = MemoryContextSwitchTo(private_context); @@ -1054,7 +1057,7 @@ ReindexDatabase(const char *dbname, bool force /* currently unused */, CommitTransactionCommand(); foreach(l, relids) { - Oid relid = lfirst_oid(l); + Oid relid = lfirst_oid(l); StartTransactionCommand(); SetQuerySnapshot(); /* might be needed for functions in diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c index 0b2bc391f1b..e0f58d9ab2b 100644 --- a/src/backend/commands/opclasscmds.c +++ b/src/backend/commands/opclasscmds.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.27 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.28 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -300,8 +300,8 @@ DefineOpClass(CreateOpClassStmt *stmt) errmsg("could not make operator class \"%s\" be default for type %s", opcname, TypeNameToString(stmt->datatype)), - errdetail("Operator class \"%s\" already is the default.", - NameStr(opclass->opcname)))); + errdetail("Operator class \"%s\" already is the default.", + NameStr(opclass->opcname)))); } systable_endscan(scan); @@ -419,6 +419,7 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid) if (optup == NULL) elog(ERROR, "cache lookup failed for operator %u", operOid); opform = (Form_pg_operator) GETSTRUCT(optup); + /* * btree operators must be binary ops returning boolean, and the * left-side input type must match the operator class' input type. @@ -434,10 +435,11 @@ assignOperSubtype(Oid amoid, Oid typeoid, Oid operOid) if (opform->oprleft != typeoid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree operators must have index type as left input"))); + errmsg("btree operators must have index type as left input"))); + /* - * The subtype is "default" (0) if oprright matches the operator class, - * otherwise it is oprright. + * The subtype is "default" (0) if oprright matches the operator + * class, otherwise it is oprright. */ if (opform->oprright == typeoid) subtype = InvalidOid; @@ -471,6 +473,7 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid) if (proctup == NULL) elog(ERROR, "cache lookup failed for function %u", procOid); procform = (Form_pg_proc) GETSTRUCT(proctup); + /* * btree support procs must be 2-arg procs returning int4, and the * first input type must match the operator class' input type. @@ -486,10 +489,11 @@ assignProcSubtype(Oid amoid, Oid typeoid, Oid procOid) if (procform->proargtypes[0] != typeoid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("btree procedures must have index type as first input"))); + errmsg("btree procedures must have index type as first input"))); + /* - * The subtype is "default" (0) if second input type matches the operator - * class, otherwise it is the second input type. + * The subtype is "default" (0) if second input type matches the + * operator class, otherwise it is the second input type. */ if (procform->proargtypes[1] == typeoid) subtype = InvalidOid; @@ -518,13 +522,13 @@ addClassMember(List **list, OpClassMember *member, bool isProc) if (isProc) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("procedure number %d appears more than once", - member->number))); + errmsg("procedure number %d appears more than once", + member->number))); else ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("operator number %d appears more than once", - member->number))); + errmsg("operator number %d appears more than once", + member->number))); } } *list = lappend(*list, member); @@ -885,7 +889,7 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId) char *opcname; HeapTuple tup; Relation rel; - Form_pg_opclass opcForm; + Form_pg_opclass opcForm; amOid = GetSysCacheOid(AMNAME, CStringGetDatum(access_method), @@ -937,7 +941,7 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId) } opcForm = (Form_pg_opclass) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -949,7 +953,10 @@ AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on tup because it's a copy */ + /* + * Modify the owner --- okay to scribble on tup because it's a + * copy + */ opcForm->opcowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c index 0605b75b3ed..280404ceb7e 100644 --- a/src/backend/commands/operatorcmds.c +++ b/src/backend/commands/operatorcmds.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.18 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.19 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -275,7 +275,7 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, Oid operOid; HeapTuple tup; Relation rel; - Form_pg_operator oprForm; + Form_pg_operator oprForm; rel = heap_openr(OperatorRelationName, RowExclusiveLock); @@ -283,14 +283,14 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, false); tup = SearchSysCacheCopy(OPEROID, - ObjectIdGetDatum(operOid), - 0, 0, 0); + ObjectIdGetDatum(operOid), + 0, 0, 0); if (!HeapTupleIsValid(tup)) /* should not happen */ elog(ERROR, "cache lookup failed for operator %u", operOid); oprForm = (Form_pg_operator) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -302,7 +302,10 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on tup because it's a copy */ + /* + * Modify the owner --- okay to scribble on tup because it's a + * copy + */ oprForm->oprowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); @@ -314,5 +317,3 @@ AlterOperatorOwner(List *name, TypeName *typeName1, TypeName *typeName2, heap_freetuple(tup); } - - diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c index a8356e5dcf3..08b14013547 100644 --- a/src/backend/commands/portalcmds.c +++ b/src/backend/commands/portalcmds.c @@ -14,7 +14,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.32 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.33 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -106,10 +106,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt, ParamListInfo params) /* * Also copy the outer portal's parameter list into the inner portal's - * memory context. We want to pass down the parameter values in case - * we had a command like - * DECLARE c CURSOR FOR SELECT ... WHERE foo = $1 - * This will have been parsed using the outer parameter set and the + * memory context. We want to pass down the parameter values in case + * we had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo = + * $1 This will have been parsed using the outer parameter set and the * parameter value needs to be preserved for use when the cursor is * executed. */ @@ -180,8 +179,8 @@ PerformPortalFetch(FetchStmt *stmt, { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_CURSOR), - errmsg("cursor \"%s\" does not exist", stmt->portalname))); - return; /* keep compiler happy */ + errmsg("cursor \"%s\" does not exist", stmt->portalname))); + return; /* keep compiler happy */ } /* Adjust dest if needed. MOVE wants destination None */ @@ -228,7 +227,7 @@ PerformPortalClose(const char *name) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_CURSOR), errmsg("cursor \"%s\" does not exist", name))); - return; /* keep compiler happy */ + return; /* keep compiler happy */ } /* @@ -354,8 +353,9 @@ PersistHoldablePortal(Portal portal) MemoryContextSwitchTo(PortalContext); /* - * Rewind the executor: we need to store the entire result set in the - * tuplestore, so that subsequent backward FETCHs can be processed. + * Rewind the executor: we need to store the entire result set in + * the tuplestore, so that subsequent backward FETCHs can be + * processed. */ ExecutorRewind(queryDesc); @@ -371,15 +371,15 @@ PersistHoldablePortal(Portal portal) /* * Now shut down the inner executor. */ - portal->queryDesc = NULL; /* prevent double shutdown */ + portal->queryDesc = NULL; /* prevent double shutdown */ ExecutorEnd(queryDesc); /* * Reset the position in the result set: ideally, this could be - * implemented by just skipping straight to the tuple # that we need - * to be at, but the tuplestore API doesn't support that. So we start - * at the beginning of the tuplestore and iterate through it until we - * reach where we need to be. FIXME someday? + * implemented by just skipping straight to the tuple # that we + * need to be at, but the tuplestore API doesn't support that. So + * we start at the beginning of the tuplestore and iterate through + * it until we reach where we need to be. FIXME someday? */ MemoryContextSwitchTo(portal->holdContext); @@ -389,8 +389,8 @@ PersistHoldablePortal(Portal portal) if (portal->posOverflow) /* oops, cannot trust portalPos */ ereport(ERROR, - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("could not reposition held cursor"))); + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not reposition held cursor"))); tuplestore_rescan(portal->holdStore); diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c index 31de3e839f9..032fe4acbcd 100644 --- a/src/backend/commands/prepare.c +++ b/src/backend/commands/prepare.c @@ -10,7 +10,7 @@ * Copyright (c) 2002-2004, PostgreSQL Global Development Group * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.30 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.31 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -211,7 +211,8 @@ EvaluateParams(EState *estate, List *params, List *argtypes) int nargs = list_length(argtypes); ParamListInfo paramLI; List *exprstates; - ListCell *le, *la; + ListCell *le, + *la; int i = 0; /* Parser should have caught this error, but check for safety */ @@ -510,7 +511,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate) } /* Explain each query */ - forboth (q, query_list, p, plan_list) + forboth(q, query_list, p, plan_list) { Query *query = (Query *) lfirst(q); Plan *plan = (Plan *) lfirst(p); diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c index 94de3f1235e..404436e8c0e 100644 --- a/src/backend/commands/proclang.c +++ b/src/backend/commands/proclang.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.54 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.55 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -101,8 +101,8 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) else ereport(ERROR, (errcode(ERRCODE_WRONG_OBJECT_TYPE), - errmsg("function %s must return type \"language_handler\"", - NameListToString(stmt->plhandler)))); + errmsg("function %s must return type \"language_handler\"", + NameListToString(stmt->plhandler)))); } /* validate the validator function */ @@ -126,12 +126,12 @@ CreateProceduralLanguage(CreatePLangStmt *stmt) i = 0; namestrcpy(&langname, languageName); - values[i++] = NameGetDatum(&langname); /* lanname */ - values[i++] = BoolGetDatum(true); /* lanispl */ - values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */ - values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */ - values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */ - nulls[i] = 'n'; /* lanacl */ + values[i++] = NameGetDatum(&langname); /* lanname */ + values[i++] = BoolGetDatum(true); /* lanispl */ + values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */ + values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */ + values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */ + nulls[i] = 'n'; /* lanacl */ rel = heap_openr(LanguageRelationName, RowExclusiveLock); diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c index df565d46e8e..8a3d02d100b 100644 --- a/src/backend/commands/schemacmds.c +++ b/src/backend/commands/schemacmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.23 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.24 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -103,12 +103,12 @@ CreateSchemaCommand(CreateSchemaStmt *stmt) errdetail("The prefix \"pg_\" is reserved for system schemas."))); /* - * Select default tablespace for schema. If not given, use zero - * which implies the database's default tablespace. + * Select default tablespace for schema. If not given, use zero which + * implies the database's default tablespace. */ if (stmt->tablespacename) { - AclResult aclresult; + AclResult aclresult; tablespaceId = get_tablespace_oid(stmt->tablespacename); if (!OidIsValid(tablespaceId)) @@ -122,7 +122,9 @@ CreateSchemaCommand(CreateSchemaStmt *stmt) if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_TABLESPACE, stmt->tablespacename); - } else { + } + else + { tablespaceId = InvalidOid; /* note there is no permission check in this path */ } @@ -316,20 +318,20 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId) { HeapTuple tup; Relation rel; - Form_pg_namespace nspForm; + Form_pg_namespace nspForm; rel = heap_openr(NamespaceRelationName, RowExclusiveLock); tup = SearchSysCache(NAMESPACENAME, - CStringGetDatum(name), - 0, 0, 0); + CStringGetDatum(name), + 0, 0, 0); if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_SCHEMA), errmsg("schema \"%s\" does not exist", name))); nspForm = (Form_pg_namespace) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -338,7 +340,7 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId) Datum repl_val[Natts_pg_namespace]; char repl_null[Natts_pg_namespace]; char repl_repl[Natts_pg_namespace]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -377,7 +379,7 @@ AlterSchemaOwner(const char *name, AclId newOwnerSysId) heap_freetuple(newtuple); } - + ReleaseSysCache(tup); heap_close(rel, NoLock); } diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c index d9852ed9d9f..53ec53e39fd 100644 --- a/src/backend/commands/sequence.c +++ b/src/backend/commands/sequence.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.115 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.116 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -326,7 +326,7 @@ AlterSequence(AlterSeqStmt *stmt) memcpy(seq, &new, sizeof(FormData_pg_sequence)); /* Clear local cache so that we don't think we have cached numbers */ - elm->last = new.last_value; /* last returned number */ + elm->last = new.last_value; /* last returned number */ elm->cached = new.last_value; /* last cached number (forget * cached values) */ @@ -950,26 +950,22 @@ init_params(List *options, Form_pg_sequence new, bool isInit) /* MAXVALUE (null arg means NO MAXVALUE) */ if (max_value != NULL && max_value->arg) - { new->max_value = defGetInt64(max_value); - } else if (isInit || max_value != NULL) { if (new->increment_by > 0) new->max_value = SEQ_MAXVALUE; /* ascending seq */ else - new->max_value = -1; /* descending seq */ + new->max_value = -1; /* descending seq */ } /* MINVALUE (null arg means NO MINVALUE) */ if (min_value != NULL && min_value->arg) - { new->min_value = defGetInt64(min_value); - } else if (isInit || min_value != NULL) { if (new->increment_by > 0) - new->min_value = 1; /* ascending seq */ + new->min_value = 1; /* ascending seq */ else new->min_value = SEQ_MINVALUE; /* descending seq */ } @@ -1073,7 +1069,7 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record) buffer = XLogReadBuffer(true, reln, 0); if (!BufferIsValid(buffer)) elog(PANIC, "seq_redo: can't read block 0 of rel %u/%u/%u", - xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); + xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode); page = (Page) BufferGetPage(buffer); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 73a51c2da9f..ab0d659dc5b 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.128 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.129 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -77,8 +77,8 @@ typedef struct OnCommitItem * entries in the list until commit so that we can roll back if * needed. */ - TransactionId creating_xid; - TransactionId deleting_xid; + TransactionId creating_xid; + TransactionId deleting_xid; } OnCommitItem; static List *on_commits = NIL; @@ -117,7 +117,7 @@ typedef struct AlteredTableInfo char relkind; /* Its relkind */ TupleDesc oldDesc; /* Pre-modification tuple descriptor */ /* Information saved by Phase 1 for Phase 2: */ - List *subcmds[AT_NUM_PASSES]; /* Lists of AlterTableCmd */ + List *subcmds[AT_NUM_PASSES]; /* Lists of AlterTableCmd */ /* Information saved by Phases 1/2 for Phase 3: */ List *constraints; /* List of NewConstraint */ List *newvals; /* List of NewColumnValue */ @@ -125,8 +125,8 @@ typedef struct AlteredTableInfo /* Objects to rebuild after completing ALTER TYPE operations */ List *changedConstraintOids; /* OIDs of constraints to rebuild */ List *changedConstraintDefs; /* string definitions of same */ - List *changedIndexOids; /* OIDs of indexes to rebuild */ - List *changedIndexDefs; /* string definitions of same */ + List *changedIndexOids; /* OIDs of indexes to rebuild */ + List *changedIndexDefs; /* string definitions of same */ } AlteredTableInfo; /* Struct describing one new constraint to check in Phase 3 scan */ @@ -171,12 +171,12 @@ static bool needs_toast_table(Relation rel); static int transformColumnNameList(Oid relId, List *colList, int16 *attnums, Oid *atttypids); static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, - List **attnamelist, - int16 *attnums, Oid *atttypids, - Oid *opclasses); + List **attnamelist, + int16 *attnums, Oid *atttypids, + Oid *opclasses); static Oid transformFkeyCheckAttrs(Relation pkrel, - int numattrs, int16 *attnums, - Oid *opclasses); + int numattrs, int16 *attnums, + Oid *opclasses); static void validateForeignKeyConstraint(FkConstraint *fkconstraint, Relation rel, Relation pkrel); static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint, @@ -184,7 +184,7 @@ static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint, static char *fkMatchTypeToString(char match_type); static void ATController(Relation rel, List *cmds, bool recurse); static void ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, - bool recurse, bool recursing); + bool recurse, bool recursing); static void ATRewriteCatalogs(List **wqueue); static void ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd); static void ATRewriteTables(List **wqueue); @@ -192,55 +192,55 @@ static void ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap); static AlteredTableInfo *ATGetQueueEntry(List **wqueue, Relation rel); static void ATSimplePermissions(Relation rel, bool allowView); static void ATSimpleRecursion(List **wqueue, Relation rel, - AlterTableCmd *cmd, bool recurse); + AlterTableCmd *cmd, bool recurse); static void ATOneLevelRecursion(List **wqueue, Relation rel, - AlterTableCmd *cmd); + AlterTableCmd *cmd); static void find_composite_type_dependencies(Oid typeOid, - const char *origTblName); + const char *origTblName); static void ATPrepAddColumn(List **wqueue, Relation rel, bool recurse, - AlterTableCmd *cmd); + AlterTableCmd *cmd); static void ATExecAddColumn(AlteredTableInfo *tab, Relation rel, - ColumnDef *colDef); + ColumnDef *colDef); static void add_column_datatype_dependency(Oid relid, int32 attnum, Oid typid); static void add_column_support_dependency(Oid relid, int32 attnum, - RangeVar *support); + RangeVar *support); static void ATExecDropNotNull(Relation rel, const char *colName); static void ATExecSetNotNull(AlteredTableInfo *tab, Relation rel, - const char *colName); + const char *colName); static void ATExecColumnDefault(Relation rel, const char *colName, - Node *newDefault); + Node *newDefault); static void ATPrepSetStatistics(Relation rel, const char *colName, - Node *flagValue); + Node *flagValue); static void ATExecSetStatistics(Relation rel, const char *colName, - Node *newValue); + Node *newValue); static void ATExecSetStorage(Relation rel, const char *colName, - Node *newValue); + Node *newValue); static void ATExecDropColumn(Relation rel, const char *colName, - DropBehavior behavior, - bool recurse, bool recursing); + DropBehavior behavior, + bool recurse, bool recursing); static void ATExecAddIndex(AlteredTableInfo *tab, Relation rel, - IndexStmt *stmt, bool is_rebuild); + IndexStmt *stmt, bool is_rebuild); static void ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, - Node *newConstraint); + Node *newConstraint); static void ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, - FkConstraint *fkconstraint); + FkConstraint *fkconstraint); static void ATPrepDropConstraint(List **wqueue, Relation rel, - bool recurse, AlterTableCmd *cmd); + bool recurse, AlterTableCmd *cmd); static void ATExecDropConstraint(Relation rel, const char *constrName, - DropBehavior behavior, bool quiet); + DropBehavior behavior, bool quiet); static void ATPrepAlterColumnType(List **wqueue, - AlteredTableInfo *tab, Relation rel, - bool recurse, bool recursing, - AlterTableCmd *cmd); + AlteredTableInfo *tab, Relation rel, + bool recurse, bool recursing, + AlterTableCmd *cmd); static void ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, - const char *colName, TypeName *typename); + const char *colName, TypeName *typename); static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab); static void ATPostAlterTypeParse(char *cmd, List **wqueue); static void ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId); static void ATExecClusterOn(Relation rel, const char *indexName); static void ATExecDropCluster(Relation rel); static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, - char *tablespacename); + char *tablespacename); static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace); static void copy_relation_data(Relation rel, SMgrRelation dst); static int ri_trigger_type(Oid tgfoid); @@ -289,7 +289,7 @@ DefineRelation(CreateStmt *stmt, char relkind) if (stmt->oncommit != ONCOMMIT_NOOP && !stmt->relation->istemp) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("ON COMMIT can only be used on temporary tables"))); + errmsg("ON COMMIT can only be used on temporary tables"))); /* * Look up the namespace in which we are supposed to create the @@ -310,12 +310,13 @@ DefineRelation(CreateStmt *stmt, char relkind) } /* - * Select tablespace to use. If not specified, use containing schema's - * default tablespace (which may in turn default to database's default). + * Select tablespace to use. If not specified, use containing + * schema's default tablespace (which may in turn default to + * database's default). */ if (stmt->tablespacename) { - AclResult aclresult; + AclResult aclresult; tablespaceId = get_tablespace_oid(stmt->tablespacename); if (!OidIsValid(tablespaceId)) @@ -329,7 +330,9 @@ DefineRelation(CreateStmt *stmt, char relkind) if (aclresult != ACLCHECK_OK) aclcheck_error(aclresult, ACL_KIND_TABLESPACE, stmt->tablespacename); - } else { + } + else + { tablespaceId = get_namespace_tablespace(namespaceId); /* note no permission check on tablespace in this case */ } @@ -340,7 +343,7 @@ DefineRelation(CreateStmt *stmt, char relkind) */ schema = MergeAttributes(schema, stmt->inhRelations, stmt->relation->istemp, - &inheritOids, &old_constraints, &parentOidCount); + &inheritOids, &old_constraints, &parentOidCount); /* * Create a relation descriptor from the relation schema and create @@ -357,23 +360,25 @@ DefineRelation(CreateStmt *stmt, char relkind) if (old_constraints != NIL) { ConstrCheck *check = (ConstrCheck *) - palloc0(list_length(old_constraints) * sizeof(ConstrCheck)); + palloc0(list_length(old_constraints) * sizeof(ConstrCheck)); int ncheck = 0; foreach(listptr, old_constraints) { Constraint *cdef = (Constraint *) lfirst(listptr); - bool dup = false; + bool dup = false; if (cdef->contype != CONSTR_CHECK) continue; Assert(cdef->name != NULL); Assert(cdef->raw_expr == NULL && cdef->cooked_expr != NULL); + /* - * In multiple-inheritance situations, it's possible to inherit - * the same grandparent constraint through multiple parents. - * Hence, discard inherited constraints that match as to both - * name and expression. Otherwise, gripe if the names conflict. + * In multiple-inheritance situations, it's possible to + * inherit the same grandparent constraint through multiple + * parents. Hence, discard inherited constraints that match as + * to both name and expression. Otherwise, gripe if the names + * conflict. */ for (i = 0; i < ncheck; i++) { @@ -546,8 +551,9 @@ TruncateRelation(const RangeVar *relation) RelationGetRelationName(rel)))); /* - * We can never allow truncation of shared or nailed-in-cache relations, - * because we can't support changing their relfilenode values. + * We can never allow truncation of shared or nailed-in-cache + * relations, because we can't support changing their relfilenode + * values. */ if (rel->rd_rel->relisshared || rel->rd_isnailed) ereport(ERROR, @@ -562,7 +568,7 @@ TruncateRelation(const RangeVar *relation) if (isOtherTempNamespace(RelationGetNamespace(rel))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot truncate temporary tables of other sessions"))); + errmsg("cannot truncate temporary tables of other sessions"))); /* * Don't allow truncate on tables which are referenced by foreign keys @@ -571,7 +577,7 @@ TruncateRelation(const RangeVar *relation) /* * Okay, here we go: create a new empty storage file for the relation, - * and assign it as the relfilenode value. The old storage file is + * and assign it as the relfilenode value. The old storage file is * scheduled for deletion at commit. */ setNewRelfilenode(rel); @@ -797,8 +803,8 @@ MergeAttributes(List *schema, List *supers, bool istemp, def->typename->typmod != attribute->atttypmod) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("inherited column \"%s\" has a type conflict", - attributeName), + errmsg("inherited column \"%s\" has a type conflict", + attributeName), errdetail("%s versus %s", TypeNameToString(def->typename), format_type_be(attribute->atttypid)))); @@ -935,15 +941,15 @@ MergeAttributes(List *schema, List *supers, bool istemp, * have the same type and typmod. */ ereport(NOTICE, - (errmsg("merging column \"%s\" with inherited definition", - attributeName))); + (errmsg("merging column \"%s\" with inherited definition", + attributeName))); def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1); if (typenameTypeId(def->typename) != typenameTypeId(newdef->typename) || def->typename->typmod != newdef->typename->typmod) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("column \"%s\" has a type conflict", - attributeName), + errmsg("column \"%s\" has a type conflict", + attributeName), errdetail("%s versus %s", TypeNameToString(def->typename), TypeNameToString(newdef->typename)))); @@ -1061,12 +1067,12 @@ StoreCatalogInheritance(Oid relationId, List *supers) /* * Store INHERITS information in pg_inherits using direct ancestors - * only. Also enter dependencies on the direct ancestors, and make sure - * they are marked with relhassubclass = true. + * only. Also enter dependencies on the direct ancestors, and make + * sure they are marked with relhassubclass = true. * * (Once upon a time, both direct and indirect ancestors were found here - * and then entered into pg_ipl. Since that catalog doesn't exist anymore, - * there's no need to look for indirect ancestors.) + * and then entered into pg_ipl. Since that catalog doesn't exist + * anymore, there's no need to look for indirect ancestors.) */ relation = heap_openr(InheritsRelationName, RowExclusiveLock); desc = RelationGetDescr(relation); @@ -1081,7 +1087,7 @@ StoreCatalogInheritance(Oid relationId, List *supers) parentobject; datum[0] = ObjectIdGetDatum(relationId); /* inhrel */ - datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */ + datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */ datum[2] = Int16GetDatum(seqNumber); /* inhseqno */ nullarr[0] = ' '; @@ -1156,9 +1162,8 @@ setRelhassubclassInRelation(Oid relationId, bool relhassubclass) /* * Fetch a modifiable copy of the tuple, modify it, update pg_class. * - * If the tuple already has the right relhassubclass setting, we - * don't need to update it, but we still need to issue an SI inval - * message. + * If the tuple already has the right relhassubclass setting, we don't + * need to update it, but we still need to issue an SI inval message. */ relationRelation = heap_openr(RelationRelationName, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, @@ -1318,7 +1323,7 @@ renameatt(Oid myrelid, 0, 0)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" of relation \"%s\" already exists", + errmsg("column \"%s\" of relation \"%s\" already exists", newattname, RelationGetRelationName(targetrelation)))); namestrcpy(&(attform->attname), newattname); @@ -1712,9 +1717,9 @@ update_ri_trigger_args(Oid relid, * rebuild relcache entries. (Ideally this should happen * automatically...) * - * We can skip this for triggers on relid itself, since that - * relcache flush will happen anyway due to the table or column - * rename. We just need to catch the far ends of RI relationships. + * We can skip this for triggers on relid itself, since that relcache + * flush will happen anyway due to the table or column rename. We + * just need to catch the far ends of RI relationships. */ pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple); if (pg_trigger->tgrelid != relid) @@ -1747,11 +1752,11 @@ update_ri_trigger_args(Oid relid, * 3. Scan table(s) to check new constraints, and optionally recopy * the data into new table(s). * Phase 3 is not performed unless one or more of the subcommands requires - * it. The intention of this design is to allow multiple independent + * it. The intention of this design is to allow multiple independent * updates of the table schema to be performed with only one pass over the * data. * - * ATPrepCmd performs phase 1. A "work queue" entry is created for + * ATPrepCmd performs phase 1. A "work queue" entry is created for * each table to be affected (there may be multiple affected tables if the * commands traverse a table inheritance hierarchy). Also we do preliminary * validation of the subcommands, including parse transformation of those @@ -1762,7 +1767,7 @@ update_ri_trigger_args(Oid relid, * phases 2 and 3 do no explicit recursion, since phase 1 already did it). * Certain subcommands need to be performed before others to avoid * unnecessary conflicts; for example, DROP COLUMN should come before - * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple + * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple * lists, one for each logical "pass" of phase 2. * * ATRewriteTables performs phase 3 for those tables that need it. @@ -1843,8 +1848,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, cmd = copyObject(cmd); /* - * Do permissions checking, recursion to child tables if needed, - * and any additional phase-1 processing needed. + * Do permissions checking, recursion to child tables if needed, and + * any additional phase-1 processing needed. */ switch (cmd->subtype) { @@ -1855,9 +1860,10 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, pass = AT_PASS_ADD_COL; break; case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */ + /* - * We allow defaults on views so that INSERT into a view can have - * default-ish behavior. This works because the rewriter + * We allow defaults on views so that INSERT into a view can + * have default-ish behavior. This works because the rewriter * substitutes default values into INSERTs before it expands * rules. */ @@ -1906,6 +1912,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, break; case AT_AddConstraint: /* ADD CONSTRAINT */ ATSimplePermissions(rel, false); + /* * Currently we recurse only for CHECK constraints, never for * foreign-key constraints. UNIQUE/PKEY constraints won't be @@ -1928,13 +1935,13 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* No command-specific prep needed */ pass = AT_PASS_DROP; break; - case AT_AlterColumnType: /* ALTER COLUMN TYPE */ + case AT_AlterColumnType: /* ALTER COLUMN TYPE */ ATSimplePermissions(rel, false); /* Performs own recursion */ ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd); pass = AT_PASS_ALTER_TYPE; break; - case AT_ToastTable: /* CREATE TOAST TABLE */ + case AT_ToastTable: /* CREATE TOAST TABLE */ ATSimplePermissions(rel, false); /* This command never recurses */ /* No command-specific prep needed */ @@ -1945,14 +1952,14 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* No command-specific prep needed */ pass = AT_PASS_MISC; break; - case AT_ClusterOn: /* CLUSTER ON */ + case AT_ClusterOn: /* CLUSTER ON */ case AT_DropCluster: /* SET WITHOUT CLUSTER */ ATSimplePermissions(rel, false); /* These commands never recurse */ /* No command-specific prep needed */ pass = AT_PASS_MISC; break; - case AT_DropOids: /* SET WITHOUT OIDS */ + case AT_DropOids: /* SET WITHOUT OIDS */ ATSimplePermissions(rel, false); /* Performs own recursion */ if (rel->rd_rel->relhasoids) @@ -1969,9 +1976,9 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, case AT_SetTableSpace: /* SET TABLESPACE */ /* This command never recurses */ ATPrepSetTableSpace(tab, rel, cmd->name); - pass = AT_PASS_MISC; /* doesn't actually matter */ + pass = AT_PASS_MISC; /* doesn't actually matter */ break; - default: /* oops */ + default: /* oops */ elog(ERROR, "unrecognized alter table type: %d", (int) cmd->subtype); pass = 0; /* keep compiler quiet */ @@ -1985,7 +1992,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd, /* * ATRewriteCatalogs * - * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are + * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are * dispatched in a "safe" execution order (designed to avoid unnecessary * conflicts). */ @@ -1997,10 +2004,10 @@ ATRewriteCatalogs(List **wqueue) /* * We process all the tables "in parallel", one pass at a time. This - * is needed because we may have to propagate work from one table - * to another (specifically, ALTER TYPE on a foreign key's PK has to + * is needed because we may have to propagate work from one table to + * another (specifically, ALTER TYPE on a foreign key's PK has to * dispatch the re-adding of the foreign key constraint to the other - * table). Work can only be propagated into later passes, however. + * table). Work can only be propagated into later passes, however. */ for (pass = 0; pass < AT_NUM_PASSES; pass++) { @@ -2015,18 +2022,19 @@ ATRewriteCatalogs(List **wqueue) if (subcmds == NIL) continue; - /* Exclusive lock was obtained by phase 1, needn't get it again */ + /* + * Exclusive lock was obtained by phase 1, needn't get it + * again + */ rel = relation_open(tab->relid, NoLock); foreach(lcmd, subcmds) - { ATExecCmd(tab, rel, (AlterTableCmd *) lfirst(lcmd)); - } /* - * After the ALTER TYPE pass, do cleanup work (this is not done in - * ATExecAlterColumnType since it should be done only once if - * multiple columns of a table are altered). + * After the ALTER TYPE pass, do cleanup work (this is not + * done in ATExecAlterColumnType since it should be done only + * once if multiple columns of a table are altered). */ if (pass == AT_PASS_ALTER_TYPE) ATPostAlterTypeCleanup(wqueue, tab); @@ -2047,9 +2055,7 @@ ATRewriteCatalogs(List **wqueue) (tab->subcmds[AT_PASS_ADD_COL] || tab->subcmds[AT_PASS_ALTER_TYPE] || tab->subcmds[AT_PASS_COL_ATTRS])) - { AlterTableCreateToastTable(tab->relid, true); - } } } @@ -2082,7 +2088,7 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd) case AT_DropColumn: /* DROP COLUMN */ ATExecDropColumn(rel, cmd->name, cmd->behavior, false, false); break; - case AT_DropColumnRecurse: /* DROP COLUMN with recursion */ + case AT_DropColumnRecurse: /* DROP COLUMN with recursion */ ATExecDropColumn(rel, cmd->name, cmd->behavior, true, false); break; case AT_AddIndex: /* ADD INDEX */ @@ -2100,7 +2106,7 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd) case AT_DropConstraintQuietly: /* DROP CONSTRAINT for child */ ATExecDropConstraint(rel, cmd->name, cmd->behavior, true); break; - case AT_AlterColumnType: /* ALTER COLUMN TYPE */ + case AT_AlterColumnType: /* ALTER COLUMN TYPE */ ATExecAlterColumnType(tab, rel, cmd->name, (TypeName *) cmd->def); break; case AT_ToastTable: /* CREATE TOAST TABLE */ @@ -2113,29 +2119,31 @@ ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd) case AT_ClusterOn: /* CLUSTER ON */ ATExecClusterOn(rel, cmd->name); break; - case AT_DropCluster: /* SET WITHOUT CLUSTER */ + case AT_DropCluster: /* SET WITHOUT CLUSTER */ ATExecDropCluster(rel); break; case AT_DropOids: /* SET WITHOUT OIDS */ + /* - * Nothing to do here; we'll have generated a DropColumn subcommand - * to do the real work + * Nothing to do here; we'll have generated a DropColumn + * subcommand to do the real work */ break; - case AT_SetTableSpace: /* SET TABLESPACE */ + case AT_SetTableSpace: /* SET TABLESPACE */ + /* * Nothing to do here; Phase 3 does the work */ break; - default: /* oops */ + default: /* oops */ elog(ERROR, "unrecognized alter table type: %d", (int) cmd->subtype); break; } /* - * Bump the command counter to ensure the next subcommand in the sequence - * can see the changes so far + * Bump the command counter to ensure the next subcommand in the + * sequence can see the changes so far */ CommandCounterIncrement(); } @@ -2164,14 +2172,14 @@ ATRewriteTables(List **wqueue) char NewHeapName[NAMEDATALEN]; Oid NewTableSpace; Relation OldHeap; - ObjectAddress object; + ObjectAddress object; OldHeap = heap_open(tab->relid, NoLock); /* * We can never allow rewriting of shared or nailed-in-cache - * relations, because we can't support changing their relfilenode - * values. + * relations, because we can't support changing their + * relfilenode values. */ if (OldHeap->rd_rel->relisshared || OldHeap->rd_isnailed) ereport(ERROR, @@ -2180,8 +2188,8 @@ ATRewriteTables(List **wqueue) RelationGetRelationName(OldHeap)))); /* - * Don't allow rewrite on temp tables of other backends ... their - * local buffer manager is not going to cope. + * Don't allow rewrite on temp tables of other backends ... + * their local buffer manager is not going to cope. */ if (isOtherTempNamespace(RelationGetNamespace(OldHeap))) ereport(ERROR, @@ -2201,11 +2209,12 @@ ATRewriteTables(List **wqueue) /* * Create the new heap, using a temporary name in the same - * namespace as the existing table. NOTE: there is some risk of - * collision with user relnames. Working around this seems more - * trouble than it's worth; in particular, we can't create the new - * heap in a different namespace from the old, or we will have - * problems with the TEMP status of temp tables. + * namespace as the existing table. NOTE: there is some risk + * of collision with user relnames. Working around this seems + * more trouble than it's worth; in particular, we can't + * create the new heap in a different namespace from the old, + * or we will have problems with the TEMP status of temp + * tables. */ snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tab->relid); @@ -2230,15 +2239,15 @@ ATRewriteTables(List **wqueue) object.objectSubId = 0; /* - * The new relation is local to our transaction and we know nothing - * depends on it, so DROP_RESTRICT should be OK. + * The new relation is local to our transaction and we know + * nothing depends on it, so DROP_RESTRICT should be OK. */ performDeletion(&object, DROP_RESTRICT); /* performDeletion does CommandCounterIncrement at end */ /* - * Rebuild each index on the relation (but not the toast table, - * which is all-new anyway). We do not need + * Rebuild each index on the relation (but not the toast + * table, which is all-new anyway). We do not need * CommandCounterIncrement() because reindex_relation does it. */ reindex_relation(tab->relid, false); @@ -2246,14 +2255,16 @@ ATRewriteTables(List **wqueue) else { /* - * Test the current data within the table against new constraints - * generated by ALTER TABLE commands, but don't rebuild data. + * Test the current data within the table against new + * constraints generated by ALTER TABLE commands, but don't + * rebuild data. */ if (tab->constraints != NIL) ATRewriteTable(tab, InvalidOid); + /* - * If we had SET TABLESPACE but no reason to reconstruct tuples, - * just do a block-by-block copy. + * If we had SET TABLESPACE but no reason to reconstruct + * tuples, just do a block-by-block copy. */ if (tab->newTableSpace) ATExecSetTableSpace(tab->relid, tab->newTableSpace); @@ -2261,17 +2272,17 @@ ATRewriteTables(List **wqueue) } /* - * Foreign key constraints are checked in a final pass, since - * (a) it's generally best to examine each one separately, and - * (b) it's at least theoretically possible that we have changed - * both relations of the foreign key, and we'd better have finished - * both rewrites before we try to read the tables. + * Foreign key constraints are checked in a final pass, since (a) it's + * generally best to examine each one separately, and (b) it's at + * least theoretically possible that we have changed both relations of + * the foreign key, and we'd better have finished both rewrites before + * we try to read the tables. */ foreach(ltab, *wqueue) { - AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab); - Relation rel = NULL; - ListCell *lcon; + AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab); + Relation rel = NULL; + ListCell *lcon; foreach(lcon, tab->constraints) { @@ -2324,7 +2335,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) */ oldrel = heap_open(tab->relid, NoLock); oldTupDesc = tab->oldDesc; - newTupDesc = RelationGetDescr(oldrel); /* includes all mods */ + newTupDesc = RelationGetDescr(oldrel); /* includes all mods */ if (OidIsValid(OIDNewHeap)) newrel = heap_open(OIDNewHeap, AccessExclusiveLock); @@ -2335,9 +2346,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) * If we need to rewrite the table, the operation has to be propagated * to tables that use this table's rowtype as a column type. * - * (Eventually this will probably become true for scans as well, but - * at the moment a composite type does not enforce any constraints, - * so it's not necessary/appropriate to enforce them just during ALTER.) + * (Eventually this will probably become true for scans as well, but at + * the moment a composite type does not enforce any constraints, so + * it's not necessary/appropriate to enforce them just during ALTER.) */ if (newrel) find_composite_type_dependencies(oldrel->rd_rel->reltype, @@ -2375,7 +2386,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) foreach(l, tab->newvals) { - NewColumnValue *ex = lfirst(l); + NewColumnValue *ex = lfirst(l); needscan = true; @@ -2384,12 +2395,12 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) if (needscan) { - ExprContext *econtext; + ExprContext *econtext; Datum *values; char *nulls; TupleTableSlot *oldslot; TupleTableSlot *newslot; - HeapScanDesc scan; + HeapScanDesc scan; HeapTuple tuple; econtext = GetPerTupleExprContext(estate); @@ -2425,7 +2436,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) * Extract data from old tuple. We can force to null any * columns that are deleted according to the new tuple. */ - int natts = newTupDesc->natts; + int natts = newTupDesc->natts; heap_deformtuple(tuple, oldTupDesc, values, nulls); @@ -2436,16 +2447,16 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) } /* - * Process supplied expressions to replace selected columns. - * Expression inputs come from the old tuple. + * Process supplied expressions to replace selected + * columns. Expression inputs come from the old tuple. */ ExecStoreTuple(tuple, oldslot, InvalidBuffer, false); econtext->ecxt_scantuple = oldslot; foreach(l, tab->newvals) { - NewColumnValue *ex = lfirst(l); - bool isNull; + NewColumnValue *ex = lfirst(l); + bool isNull; values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate, econtext, @@ -2478,20 +2489,20 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap) con->name))); break; case CONSTR_NOTNULL: - { - Datum d; - bool isnull; + { + Datum d; + bool isnull; - d = heap_getattr(tuple, con->attnum, newTupDesc, - &isnull); - if (isnull) - ereport(ERROR, + d = heap_getattr(tuple, con->attnum, newTupDesc, + &isnull); + if (isnull) + ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("column \"%s\" contains null values", get_attname(tab->relid, con->attnum)))); - } - break; + } + break; case CONSTR_FOREIGN: /* Nothing to do here */ break; @@ -2733,8 +2744,9 @@ find_composite_type_dependencies(Oid typeOid, const char *origTblName) else if (OidIsValid(rel->rd_rel->reltype)) { /* - * A view or composite type itself isn't a problem, but we must - * recursively check for indirect dependencies via its rowtype. + * A view or composite type itself isn't a problem, but we + * must recursively check for indirect dependencies via its + * rowtype. */ find_composite_type_dependencies(rel->rd_rel->reltype, origTblName); @@ -2790,7 +2802,7 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse, if (find_inheritance_children(RelationGetRelid(rel)) != NIL) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("column must be added to child tables too"))); + errmsg("column must be added to child tables too"))); } } @@ -2815,8 +2827,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, attrdesc = heap_openr(AttributeRelationName, RowExclusiveLock); /* - * Are we adding the column to a recursion child? If so, check whether - * to merge with an existing definition for the column. + * Are we adding the column to a recursion child? If so, check + * whether to merge with an existing definition for the column. */ if (colDef->inhcount > 0) { @@ -2834,7 +2846,7 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("child table \"%s\" has different type for column \"%s\"", - RelationGetRelationName(rel), colDef->colname))); + RelationGetRelationName(rel), colDef->colname))); /* Bump the existing child att's inhcount */ childatt->attinhcount++; @@ -2846,7 +2858,7 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, /* Inform the user about the merge */ ereport(NOTICE, (errmsg("merging definition of column \"%s\" for child \"%s\"", - colDef->colname, RelationGetRelationName(rel)))); + colDef->colname, RelationGetRelationName(rel)))); heap_close(attrdesc, RowExclusiveLock); return; @@ -2872,8 +2884,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, 0, 0)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_COLUMN), - errmsg("column \"%s\" of relation \"%s\" already exists", - colDef->colname, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" already exists", + colDef->colname, RelationGetRelationName(rel)))); minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts; maxatts = minattnum + 1; @@ -2965,21 +2977,20 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel, /* * Tell Phase 3 to fill in the default expression, if there is one. * - * If there is no default, Phase 3 doesn't have to do anything, - * because that effectively means that the default is NULL. The - * heap tuple access routines always check for attnum > # of attributes - * in tuple, and return NULL if so, so without any modification of - * the tuple data we will get the effect of NULL values in the new - * column. + * If there is no default, Phase 3 doesn't have to do anything, because + * that effectively means that the default is NULL. The heap tuple + * access routines always check for attnum > # of attributes in tuple, + * and return NULL if so, so without any modification of the tuple + * data we will get the effect of NULL values in the new column. * * Note: we use build_column_default, and not just the cooked default - * returned by AddRelationRawConstraints, so that the right thing happens - * when a datatype's default applies. + * returned by AddRelationRawConstraints, so that the right thing + * happens when a datatype's default applies. */ defval = (Expr *) build_column_default(rel, attribute->attnum); if (defval) { - NewColumnValue *newval; + NewColumnValue *newval; newval = (NewColumnValue *) palloc0(sizeof(NewColumnValue)); newval->attnum = attribute->attnum; @@ -3099,8 +3110,8 @@ ATExecDropNotNull(Relation rel, const char *colName) if (indexStruct->indkey[i] == attnum) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("column \"%s\" is in a primary key", - colName))); + errmsg("column \"%s\" is in a primary key", + colName))); } } @@ -3162,7 +3173,7 @@ ATExecSetNotNull(AlteredTableInfo *tab, Relation rel, /* * Okay, actually perform the catalog change ... if needed */ - if (! ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull) + if (!((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull) { ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = TRUE; @@ -3199,8 +3210,8 @@ ATExecColumnDefault(Relation rel, const char *colName, if (attnum == InvalidAttrNumber) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - colName, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" does not exist", + colName, RelationGetRelationName(rel)))); /* Prevent them from altering a system attribute */ if (attnum <= 0) @@ -3240,10 +3251,10 @@ static void ATPrepSetStatistics(Relation rel, const char *colName, Node *flagValue) { /* - * We do our own permission checking because (a) we want to allow - * SET STATISTICS on indexes (for expressional index columns), and - * (b) we want to allow SET STATISTICS on system catalogs without - * requiring allowSystemTableMods to be turned on. + * We do our own permission checking because (a) we want to allow SET + * STATISTICS on indexes (for expressional index columns), and (b) we + * want to allow SET STATISTICS on system catalogs without requiring + * allowSystemTableMods to be turned on. */ if (rel->rd_rel->relkind != RELKIND_RELATION && rel->rd_rel->relkind != RELKIND_INDEX) @@ -3295,8 +3306,8 @@ ATExecSetStatistics(Relation rel, const char *colName, Node *newValue) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - colName, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" does not exist", + colName, RelationGetRelationName(rel)))); attrtuple = (Form_pg_attribute) GETSTRUCT(tuple); if (attrtuple->attnum <= 0) @@ -3356,8 +3367,8 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - colName, RelationGetRelationName(rel)))); + errmsg("column \"%s\" of relation \"%s\" does not exist", + colName, RelationGetRelationName(rel)))); attrtuple = (Form_pg_attribute) GETSTRUCT(tuple); if (attrtuple->attnum <= 0) @@ -3394,9 +3405,9 @@ ATExecSetStorage(Relation rel, const char *colName, Node *newValue) * * DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism, * because we have to decide at runtime whether to recurse or not depending - * on whether attinhcount goes to zero or not. (We can't check this in a + * on whether attinhcount goes to zero or not. (We can't check this in a * static pre-pass because it won't handle multiple inheritance situations - * correctly.) Since DROP COLUMN doesn't need to create any work queue + * correctly.) Since DROP COLUMN doesn't need to create any work queue * entries for Phase 3, it's okay to recurse internally in this routine * without considering the work queue. */ @@ -3479,8 +3490,8 @@ ATExecDropColumn(Relation rel, const char *colName, { /* * If the child column has other definition sources, just - * decrement its inheritance count; if not, recurse to delete - * it. + * decrement its inheritance count; if not, recurse to + * delete it. */ if (childatt->attinhcount == 1 && !childatt->attislocal) { @@ -3504,9 +3515,9 @@ ATExecDropColumn(Relation rel, const char *colName, else { /* - * If we were told to drop ONLY in this table (no recursion), - * we need to mark the inheritors' attribute as locally - * defined rather than inherited. + * If we were told to drop ONLY in this table (no + * recursion), we need to mark the inheritors' attribute + * as locally defined rather than inherited. */ childatt->attinhcount--; childatt->attislocal = true; @@ -3547,7 +3558,7 @@ ATExecDropColumn(Relation rel, const char *colName, class_rel = heap_openr(RelationRelationName, RowExclusiveLock); tuple = SearchSysCacheCopy(RELOID, - ObjectIdGetDatum(RelationGetRelid(rel)), + ObjectIdGetDatum(RelationGetRelid(rel)), 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", @@ -3575,9 +3586,9 @@ static void ATExecAddIndex(AlteredTableInfo *tab, Relation rel, IndexStmt *stmt, bool is_rebuild) { - bool check_rights; - bool skip_build; - bool quiet; + bool check_rights; + bool skip_build; + bool quiet; Assert(IsA(stmt, IndexStmt)); @@ -3588,17 +3599,17 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel, /* suppress notices when rebuilding existing index */ quiet = is_rebuild; - DefineIndex(stmt->relation, /* relation */ - stmt->idxname, /* index name */ - stmt->accessMethod, /* am name */ + DefineIndex(stmt->relation, /* relation */ + stmt->idxname, /* index name */ + stmt->accessMethod, /* am name */ stmt->tableSpace, - stmt->indexParams, /* parameters */ + stmt->indexParams, /* parameters */ (Expr *) stmt->whereClause, stmt->rangetable, stmt->unique, stmt->primary, stmt->isconstraint, - true, /* is_alter_table */ + true, /* is_alter_table */ check_rights, skip_build, quiet); @@ -3613,84 +3624,85 @@ ATExecAddConstraint(AlteredTableInfo *tab, Relation rel, Node *newConstraint) switch (nodeTag(newConstraint)) { case T_Constraint: - { - Constraint *constr = (Constraint *) newConstraint; - - /* - * Currently, we only expect to see CONSTR_CHECK nodes - * arriving here (see the preprocessing done in - * parser/analyze.c). Use a switch anyway to make it - * easier to add more code later. - */ - switch (constr->contype) { - case CONSTR_CHECK: - { - List *newcons; - ListCell *lcon; + Constraint *constr = (Constraint *) newConstraint; - /* - * Call AddRelationRawConstraints to do the work. - * It returns a list of cooked constraints. - */ - newcons = AddRelationRawConstraints(rel, NIL, - list_make1(constr)); - /* Add each constraint to Phase 3's queue */ - foreach(lcon, newcons) - { - CookedConstraint *ccon = (CookedConstraint *) lfirst(lcon); - NewConstraint *newcon; - - newcon = (NewConstraint *) palloc0(sizeof(NewConstraint)); - newcon->name = ccon->name; - newcon->contype = ccon->contype; - newcon->attnum = ccon->attnum; - /* ExecQual wants implicit-AND format */ - newcon->qual = (Node *) - make_ands_implicit((Expr *) ccon->expr); - - tab->constraints = lappend(tab->constraints, - newcon); - } - break; + /* + * Currently, we only expect to see CONSTR_CHECK nodes + * arriving here (see the preprocessing done in + * parser/analyze.c). Use a switch anyway to make it + * easier to add more code later. + */ + switch (constr->contype) + { + case CONSTR_CHECK: + { + List *newcons; + ListCell *lcon; + + /* + * Call AddRelationRawConstraints to do the + * work. It returns a list of cooked + * constraints. + */ + newcons = AddRelationRawConstraints(rel, NIL, + list_make1(constr)); + /* Add each constraint to Phase 3's queue */ + foreach(lcon, newcons) + { + CookedConstraint *ccon = (CookedConstraint *) lfirst(lcon); + NewConstraint *newcon; + + newcon = (NewConstraint *) palloc0(sizeof(NewConstraint)); + newcon->name = ccon->name; + newcon->contype = ccon->contype; + newcon->attnum = ccon->attnum; + /* ExecQual wants implicit-AND format */ + newcon->qual = (Node *) + make_ands_implicit((Expr *) ccon->expr); + + tab->constraints = lappend(tab->constraints, + newcon); + } + break; + } + default: + elog(ERROR, "unrecognized constraint type: %d", + (int) constr->contype); } - default: - elog(ERROR, "unrecognized constraint type: %d", - (int) constr->contype); + break; } - break; - } case T_FkConstraint: - { - FkConstraint *fkconstraint = (FkConstraint *) newConstraint; - - /* - * Assign or validate constraint name - */ - if (fkconstraint->constr_name) { - if (ConstraintNameIsUsed(CONSTRAINT_RELATION, - RelationGetRelid(rel), - RelationGetNamespace(rel), - fkconstraint->constr_name)) - ereport(ERROR, - (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("constraint \"%s\" for relation \"%s\" already exists", - fkconstraint->constr_name, - RelationGetRelationName(rel)))); - } - else - fkconstraint->constr_name = - ChooseConstraintName(RelationGetRelationName(rel), - strVal(linitial(fkconstraint->fk_attrs)), - "fkey", - RelationGetNamespace(rel), - NIL); + FkConstraint *fkconstraint = (FkConstraint *) newConstraint; - ATAddForeignKeyConstraint(tab, rel, fkconstraint); + /* + * Assign or validate constraint name + */ + if (fkconstraint->constr_name) + { + if (ConstraintNameIsUsed(CONSTRAINT_RELATION, + RelationGetRelid(rel), + RelationGetNamespace(rel), + fkconstraint->constr_name)) + ereport(ERROR, + (errcode(ERRCODE_DUPLICATE_OBJECT), + errmsg("constraint \"%s\" for relation \"%s\" already exists", + fkconstraint->constr_name, + RelationGetRelationName(rel)))); + } + else + fkconstraint->constr_name = + ChooseConstraintName(RelationGetRelationName(rel), + strVal(linitial(fkconstraint->fk_attrs)), + "fkey", + RelationGetNamespace(rel), + NIL); - break; - } + ATAddForeignKeyConstraint(tab, rel, fkconstraint); + + break; + } default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(newConstraint)); @@ -3761,12 +3773,12 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, RelationGetRelationName(rel)); /* - * Disallow reference from permanent table to temp table or vice versa. - * (The ban on perm->temp is for fairly obvious reasons. The ban on - * temp->perm is because other backends might need to run the RI triggers - * on the perm table, but they can't reliably see tuples the owning - * backend has created in the temp table, because non-shared buffers - * are used for temp tables.) + * Disallow reference from permanent table to temp table or vice + * versa. (The ban on perm->temp is for fairly obvious reasons. The + * ban on temp->perm is because other backends might need to run the + * RI triggers on the perm table, but they can't reliably see tuples + * the owning backend has created in the temp table, because + * non-shared buffers are used for temp tables.) */ if (isTempNamespace(RelationGetNamespace(pkrel))) { @@ -3834,11 +3846,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, * fktypoid[i] is the foreign key table's i'th key's type * * Note that we look for an operator with the PK type on the left; - * when the types are different this is critical because the PK index - * will need operators with the indexkey on the left. (Ordinarily - * both commutator operators will exist if either does, but we won't - * get the right answer from the test below on opclass membership - * unless we select the proper operator.) + * when the types are different this is critical because the PK + * index will need operators with the indexkey on the left. + * (Ordinarily both commutator operators will exist if either + * does, but we won't get the right answer from the test below on + * opclass membership unless we select the proper operator.) */ Operator o = oper(list_make1(makeString("=")), pktypoid[i], fktypoid[i], true); @@ -3851,8 +3863,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, fkconstraint->constr_name), errdetail("Key columns \"%s\" and \"%s\" " "are of incompatible types: %s and %s.", - strVal(list_nth(fkconstraint->fk_attrs, i)), - strVal(list_nth(fkconstraint->pk_attrs, i)), + strVal(list_nth(fkconstraint->fk_attrs, i)), + strVal(list_nth(fkconstraint->pk_attrs, i)), format_type_be(fktypoid[i]), format_type_be(pktypoid[i])))); @@ -3868,8 +3880,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, fkconstraint->constr_name), errdetail("Key columns \"%s\" and \"%s\" " "are of different types: %s and %s.", - strVal(list_nth(fkconstraint->fk_attrs, i)), - strVal(list_nth(fkconstraint->pk_attrs, i)), + strVal(list_nth(fkconstraint->fk_attrs, i)), + strVal(list_nth(fkconstraint->pk_attrs, i)), format_type_be(fktypoid[i]), format_type_be(pktypoid[i])))); @@ -3877,8 +3889,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel, } /* - * Tell Phase 3 to check that the constraint is satisfied by existing rows - * (we can skip this during table creation). + * Tell Phase 3 to check that the constraint is satisfied by existing + * rows (we can skip this during table creation). */ if (!fkconstraint->skip_validation) { @@ -3971,10 +3983,10 @@ transformColumnNameList(Oid relId, List *colList, * transformFkeyGetPrimaryKey - * * Look up the names, attnums, and types of the primary key attributes - * for the pkrel. Also return the index OID and index opclasses of the + * for the pkrel. Also return the index OID and index opclasses of the * index supporting the primary key. * - * All parameters except pkrel are output parameters. Also, the function + * All parameters except pkrel are output parameters. Also, the function * return value is the number of attributes in the primary key. * * Used when the column list in the REFERENCES specification is omitted. @@ -4060,7 +4072,7 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid, static Oid transformFkeyCheckAttrs(Relation pkrel, int numattrs, int16 *attnums, - Oid *opclasses) /* output parameter */ + Oid *opclasses) /* output parameter */ { Oid indexoid = InvalidOid; bool found = false; @@ -4190,8 +4202,8 @@ validateForeignKeyConstraint(FkConstraint *fkconstraint, trig.tginitdeferred = FALSE; trig.tgargs = (char **) palloc(sizeof(char *) * - (4 + list_length(fkconstraint->fk_attrs) - + list_length(fkconstraint->pk_attrs))); + (4 + list_length(fkconstraint->fk_attrs) + + list_length(fkconstraint->pk_attrs))); trig.tgargs[0] = trig.tgname; trig.tgargs[1] = RelationGetRelationName(rel); @@ -4518,8 +4530,8 @@ ATExecDropConstraint(Relation rel, const char *constrName, /* Otherwise if more than one constraint deleted, notify */ else if (deleted > 1) ereport(NOTICE, - (errmsg("multiple constraints named \"%s\" were dropped", - constrName))); + (errmsg("multiple constraints named \"%s\" were dropped", + constrName))); } } @@ -4578,12 +4590,12 @@ ATPrepAlterColumnType(List **wqueue, CheckAttributeType(colName, targettype); /* - * Set up an expression to transform the old data value to the new type. - * If a USING option was given, transform and use that expression, else - * just take the old value and try to coerce it. We do this first so - * that type incompatibility can be detected before we waste effort, - * and because we need the expression to be parsed against the original - * table rowtype. + * Set up an expression to transform the old data value to the new + * type. If a USING option was given, transform and use that + * expression, else just take the old value and try to coerce it. We + * do this first so that type incompatibility can be detected before + * we waste effort, and because we need the expression to be parsed + * against the original table rowtype. */ if (cmd->transform) { @@ -4592,7 +4604,7 @@ ATPrepAlterColumnType(List **wqueue, /* Expression must be able to access vars of old table */ rte = addRangeTableEntryForRelation(pstate, RelationGetRelid(rel), - makeAlias(RelationGetRelationName(rel), NIL), + makeAlias(RelationGetRelationName(rel), NIL), false, true); addRTEtoQuery(pstate, rte, false, true); @@ -4603,13 +4615,13 @@ ATPrepAlterColumnType(List **wqueue, if (expression_returns_set(transform)) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("transform expression must not return a set"))); + errmsg("transform expression must not return a set"))); /* No subplans or aggregates, either... */ if (pstate->p_hasSubLinks) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot use subquery in transform expression"))); + errmsg("cannot use subquery in transform expression"))); if (pstate->p_hasAggs) ereport(ERROR, (errcode(ERRCODE_GROUPING_ERROR), @@ -4646,9 +4658,9 @@ ATPrepAlterColumnType(List **wqueue, ReleaseSysCache(tuple); /* - * The recursion case is handled by ATSimpleRecursion. However, - * if we are told not to recurse, there had better not be any - * child tables; else the alter would put them out of step. + * The recursion case is handled by ATSimpleRecursion. However, if we + * are told not to recurse, there had better not be any child tables; + * else the alter would put them out of step. */ if (recurse) ATSimpleRecursion(wqueue, rel, cmd, recurse); @@ -4683,15 +4695,15 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, heapTup = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName); if (!HeapTupleIsValid(heapTup)) /* shouldn't happen */ ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_COLUMN), - errmsg("column \"%s\" of relation \"%s\" does not exist", - colName, RelationGetRelationName(rel)))); + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("column \"%s\" of relation \"%s\" does not exist", + colName, RelationGetRelationName(rel)))); attTup = (Form_pg_attribute) GETSTRUCT(heapTup); attnum = attTup->attnum; /* Check for multiple ALTER TYPE on same column --- can't cope */ - if (attTup->atttypid != tab->oldDesc->attrs[attnum-1]->atttypid || - attTup->atttypmod != tab->oldDesc->attrs[attnum-1]->atttypmod) + if (attTup->atttypid != tab->oldDesc->attrs[attnum - 1]->atttypid || + attTup->atttypmod != tab->oldDesc->attrs[attnum - 1]->atttypmod) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot alter type of column \"%s\" twice", @@ -4713,8 +4725,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, { defaultexpr = build_column_default(rel, attnum); Assert(defaultexpr); - defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */ - defaultexpr, exprType(defaultexpr), + defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */ + defaultexpr, exprType(defaultexpr), targettype, typename->typmod, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST); @@ -4728,18 +4740,18 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, defaultexpr = NULL; /* - * Find everything that depends on the column (constraints, indexes, etc), - * and record enough information to let us recreate the objects. + * Find everything that depends on the column (constraints, indexes, + * etc), and record enough information to let us recreate the objects. * * The actual recreation does not happen here, but only after we have - * performed all the individual ALTER TYPE operations. We have to save - * the info before executing ALTER TYPE, though, else the deparser will - * get confused. + * performed all the individual ALTER TYPE operations. We have to + * save the info before executing ALTER TYPE, though, else the + * deparser will get confused. * * There could be multiple entries for the same object, so we must check - * to ensure we process each one only once. Note: we assume that an index - * that implements a constraint will not show a direct dependency on the - * column. + * to ensure we process each one only once. Note: we assume that an + * index that implements a constraint will not show a direct + * dependency on the column. */ depRel = heap_openr(DependRelationName, RowExclusiveLock); @@ -4761,8 +4773,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, while (HeapTupleIsValid(depTup = systable_getnext(scan))) { - Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup); - ObjectAddress foundObject; + Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup); + ObjectAddress foundObject; /* We don't expect any PIN dependencies on columns */ if (foundDep->deptype == DEPENDENCY_PIN) @@ -4775,45 +4787,45 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, switch (getObjectClass(&foundObject)) { case OCLASS_CLASS: - { - char relKind = get_rel_relkind(foundObject.objectId); - - if (relKind == RELKIND_INDEX) { - Assert(foundObject.objectSubId == 0); - if (!list_member_oid(tab->changedIndexOids, foundObject.objectId)) + char relKind = get_rel_relkind(foundObject.objectId); + + if (relKind == RELKIND_INDEX) { - tab->changedIndexOids = lappend_oid(tab->changedIndexOids, - foundObject.objectId); - tab->changedIndexDefs = lappend(tab->changedIndexDefs, - pg_get_indexdef_string(foundObject.objectId)); + Assert(foundObject.objectSubId == 0); + if (!list_member_oid(tab->changedIndexOids, foundObject.objectId)) + { + tab->changedIndexOids = lappend_oid(tab->changedIndexOids, + foundObject.objectId); + tab->changedIndexDefs = lappend(tab->changedIndexDefs, + pg_get_indexdef_string(foundObject.objectId)); + } } + else if (relKind == RELKIND_SEQUENCE) + { + /* + * This must be a SERIAL column's sequence. We + * need not do anything to it. + */ + Assert(foundObject.objectSubId == 0); + } + else + { + /* Not expecting any other direct dependencies... */ + elog(ERROR, "unexpected object depending on column: %s", + getObjectDescription(&foundObject)); + } + break; } - else if (relKind == RELKIND_SEQUENCE) - { - /* - * This must be a SERIAL column's sequence. We need not - * do anything to it. - */ - Assert(foundObject.objectSubId == 0); - } - else - { - /* Not expecting any other direct dependencies... */ - elog(ERROR, "unexpected object depending on column: %s", - getObjectDescription(&foundObject)); - } - break; - } case OCLASS_CONSTRAINT: Assert(foundObject.objectSubId == 0); if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId)) { tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids, - foundObject.objectId); + foundObject.objectId); tab->changedConstraintDefs = lappend(tab->changedConstraintDefs, - pg_get_constraintdef_string(foundObject.objectId)); + pg_get_constraintdef_string(foundObject.objectId)); } break; @@ -4828,9 +4840,10 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, break; case OCLASS_DEFAULT: + /* - * Ignore the column's default expression, since we will fix - * it below. + * Ignore the column's default expression, since we will + * fix it below. */ Assert(defaultexpr); break; @@ -4844,6 +4857,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, case OCLASS_OPCLASS: case OCLASS_TRIGGER: case OCLASS_SCHEMA: + /* * We don't expect any of these sorts of objects to depend * on a column. @@ -4883,7 +4897,7 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, while (HeapTupleIsValid(depTup = systable_getnext(scan))) { - Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup); + Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup); if (foundDep->deptype != DEPENDENCY_NORMAL) elog(ERROR, "found unexpected dependency type '%c'", @@ -4900,8 +4914,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, heap_close(depRel, RowExclusiveLock); /* - * Here we go --- change the recorded column type. (Note heapTup is - * a copy of the syscache entry, so okay to scribble on.) + * Here we go --- change the recorded column type. (Note heapTup is a + * copy of the syscache entry, so okay to scribble on.) */ attTup->atttypid = targettype; attTup->atttypmod = typename->typmod; @@ -4923,15 +4937,18 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, /* Install dependency on new datatype */ add_column_datatype_dependency(RelationGetRelid(rel), attnum, targettype); - /* Drop any pg_statistic entry for the column, since it's now wrong type */ + /* + * Drop any pg_statistic entry for the column, since it's now wrong + * type + */ RemoveStatistics(RelationGetRelid(rel), attnum); /* - * Update the default, if present, by brute force --- remove and re-add - * the default. Probably unsafe to take shortcuts, since the new version - * may well have additional dependencies. (It's okay to do this now, - * rather than after other ALTER TYPE commands, since the default won't - * depend on other column types.) + * Update the default, if present, by brute force --- remove and + * re-add the default. Probably unsafe to take shortcuts, since the + * new version may well have additional dependencies. (It's okay to + * do this now, rather than after other ALTER TYPE commands, since the + * default won't depend on other column types.) */ if (defaultexpr) { @@ -4939,8 +4956,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel, CommandCounterIncrement(); /* - * We use RESTRICT here for safety, but at present we do not expect - * anything to depend on the default. + * We use RESTRICT here for safety, but at present we do not + * expect anything to depend on the default. */ RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true); @@ -4960,31 +4977,26 @@ static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab) { ObjectAddress obj; - ListCell *l; + ListCell *l; /* * Re-parse the index and constraint definitions, and attach them to - * the appropriate work queue entries. We do this before dropping + * the appropriate work queue entries. We do this before dropping * because in the case of a FOREIGN KEY constraint, we might not yet - * have exclusive lock on the table the constraint is attached to, - * and we need to get that before dropping. It's safe because the - * parser won't actually look at the catalogs to detect the existing - * entry. + * have exclusive lock on the table the constraint is attached to, and + * we need to get that before dropping. It's safe because the parser + * won't actually look at the catalogs to detect the existing entry. */ foreach(l, tab->changedIndexDefs) - { ATPostAlterTypeParse((char *) lfirst(l), wqueue); - } foreach(l, tab->changedConstraintDefs) - { ATPostAlterTypeParse((char *) lfirst(l), wqueue); - } /* - * Now we can drop the existing constraints and indexes --- constraints - * first, since some of them might depend on the indexes. It should be - * okay to use DROP_RESTRICT here, since nothing else should be depending - * on these objects. + * Now we can drop the existing constraints and indexes --- + * constraints first, since some of them might depend on the indexes. + * It should be okay to use DROP_RESTRICT here, since nothing else + * should be depending on these objects. */ if (tab->changedConstraintOids) obj.classId = get_system_catalog_relid(ConstraintRelationName); @@ -5017,8 +5029,8 @@ ATPostAlterTypeParse(char *cmd, List **wqueue) ListCell *list_item; /* - * We expect that we only have to do raw parsing and parse analysis, not - * any rule rewriting, since these will all be utility statements. + * We expect that we only have to do raw parsing and parse analysis, + * not any rule rewriting, since these will all be utility statements. */ raw_parsetree_list = raw_parser(cmd); querytree_list = NIL; @@ -5027,12 +5039,13 @@ ATPostAlterTypeParse(char *cmd, List **wqueue) Node *parsetree = (Node *) lfirst(list_item); querytree_list = list_concat(querytree_list, - parse_analyze(parsetree, NULL, 0)); + parse_analyze(parsetree, NULL, 0)); } /* - * Attach each generated command to the proper place in the work queue. - * Note this could result in creation of entirely new work-queue entries. + * Attach each generated command to the proper place in the work + * queue. Note this could result in creation of entirely new + * work-queue entries. */ foreach(list_item, querytree_list) { @@ -5045,50 +5058,50 @@ ATPostAlterTypeParse(char *cmd, List **wqueue) switch (nodeTag(query->utilityStmt)) { case T_IndexStmt: - { - IndexStmt *stmt = (IndexStmt *) query->utilityStmt; - AlterTableCmd *newcmd; - - rel = relation_openrv(stmt->relation, AccessExclusiveLock); - tab = ATGetQueueEntry(wqueue, rel); - newcmd = makeNode(AlterTableCmd); - newcmd->subtype = AT_ReAddIndex; - newcmd->def = (Node *) stmt; - tab->subcmds[AT_PASS_OLD_INDEX] = - lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd); - relation_close(rel, NoLock); - break; - } + { + IndexStmt *stmt = (IndexStmt *) query->utilityStmt; + AlterTableCmd *newcmd; + + rel = relation_openrv(stmt->relation, AccessExclusiveLock); + tab = ATGetQueueEntry(wqueue, rel); + newcmd = makeNode(AlterTableCmd); + newcmd->subtype = AT_ReAddIndex; + newcmd->def = (Node *) stmt; + tab->subcmds[AT_PASS_OLD_INDEX] = + lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd); + relation_close(rel, NoLock); + break; + } case T_AlterTableStmt: - { - AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; - ListCell *lcmd; - - rel = relation_openrv(stmt->relation, AccessExclusiveLock); - tab = ATGetQueueEntry(wqueue, rel); - foreach(lcmd, stmt->cmds) { - AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt; + ListCell *lcmd; - switch (cmd->subtype) + rel = relation_openrv(stmt->relation, AccessExclusiveLock); + tab = ATGetQueueEntry(wqueue, rel); + foreach(lcmd, stmt->cmds) { - case AT_AddIndex: - cmd->subtype = AT_ReAddIndex; - tab->subcmds[AT_PASS_OLD_INDEX] = - lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd); - break; - case AT_AddConstraint: - tab->subcmds[AT_PASS_OLD_CONSTR] = - lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); - break; - default: - elog(ERROR, "unexpected statement type: %d", - (int) cmd->subtype); + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + + switch (cmd->subtype) + { + case AT_AddIndex: + cmd->subtype = AT_ReAddIndex; + tab->subcmds[AT_PASS_OLD_INDEX] = + lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd); + break; + case AT_AddConstraint: + tab->subcmds[AT_PASS_OLD_CONSTR] = + lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); + break; + default: + elog(ERROR, "unexpected statement type: %d", + (int) cmd->subtype); + } } + relation_close(rel, NoLock); + break; } - relation_close(rel, NoLock); - break; - } default: elog(ERROR, "unexpected statement type: %d", (int) nodeTag(query->utilityStmt)); @@ -5116,8 +5129,8 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) class_rel = heap_openr(RelationRelationName, RowExclusiveLock); tuple = SearchSysCache(RELOID, - ObjectIdGetDatum(relationOid), - 0, 0, 0); + ObjectIdGetDatum(relationOid), + 0, 0, 0); if (!HeapTupleIsValid(tuple)) elog(ERROR, "cache lookup failed for relation %u", relationOid); tuple_class = (Form_pg_class) GETSTRUCT(tuple); @@ -5139,7 +5152,7 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) NameStr(tuple_class->relname)))); } - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -5148,7 +5161,7 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) Datum repl_val[Natts_pg_class]; char repl_null[Natts_pg_class]; char repl_repl[Natts_pg_class]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -5156,8 +5169,8 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) /* Otherwise, check that we are the superuser */ if (!superuser()) ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to change owner"))); + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("must be superuser to change owner"))); memset(repl_null, ' ', sizeof(repl_null)); memset(repl_repl, ' ', sizeof(repl_repl)); @@ -5188,9 +5201,9 @@ ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId) heap_freetuple(newtuple); /* - * If we are operating on a table, also change the ownership of any - * indexes that belong to the table, as well as the table's toast - * table (if it has one) + * If we are operating on a table, also change the ownership of + * any indexes that belong to the table, as well as the table's + * toast table (if it has one) */ if (tuple_class->relkind == RELKIND_RELATION || tuple_class->relkind == RELKIND_TOASTVALUE) @@ -5265,7 +5278,7 @@ static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename) { Oid tablespaceId; - AclResult aclresult; + AclResult aclresult; /* * We do our own permission checking because we want to allow this on @@ -5294,7 +5307,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename) if (!OidIsValid(tablespaceId)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablespace \"%s\" does not exist", tablespacename))); + errmsg("tablespace \"%s\" does not exist", tablespacename))); /* Check its permissions */ aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE); @@ -5305,7 +5318,7 @@ ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename) if (OidIsValid(tab->newTableSpace)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("multiple SET TABLESPACE subcommands are not valid"))); + errmsg("multiple SET TABLESPACE subcommands are not valid"))); tab->newTableSpace = tablespaceId; } @@ -5339,13 +5352,13 @@ ATExecSetTableSpace(Oid tableOid, Oid newTableSpace) RelationGetRelationName(rel)))); /* - * Don't allow moving temp tables of other backends ... their - * local buffer manager is not going to cope. + * Don't allow moving temp tables of other backends ... their local + * buffer manager is not going to cope. */ if (isOtherTempNamespace(RelationGetNamespace(rel))) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot move temporary tables of other sessions"))); + errmsg("cannot move temporary tables of other sessions"))); /* * No work if no change in tablespace. @@ -5425,14 +5438,15 @@ copy_relation_data(Relation rel, SMgrRelation dst) bool use_wal; BlockNumber nblocks; BlockNumber blkno; - char buf[BLCKSZ]; + char buf[BLCKSZ]; Page page = (Page) buf; /* - * Since we copy the data directly without looking at the shared buffers, - * we'd better first flush out any pages of the source relation that are - * in shared buffers. We assume no new pages will get loaded into - * buffers while we are holding exclusive lock on the rel. + * Since we copy the data directly without looking at the shared + * buffers, we'd better first flush out any pages of the source + * relation that are in shared buffers. We assume no new pages will + * get loaded into buffers while we are holding exclusive lock on the + * rel. */ FlushRelationBuffers(rel, 0); @@ -5479,7 +5493,7 @@ copy_relation_data(Relation rel, SMgrRelation dst) } /* - * Now write the page. We say isTemp = true even if it's not a + * Now write the page. We say isTemp = true even if it's not a * temp rel, because there's no need for smgr to schedule an fsync * for this write; we'll do it ourselves below. */ @@ -5488,18 +5502,18 @@ copy_relation_data(Relation rel, SMgrRelation dst) /* * If the rel isn't temp, we must fsync it down to disk before it's - * safe to commit the transaction. (For a temp rel we don't care + * safe to commit the transaction. (For a temp rel we don't care * since the rel will be uninteresting after a crash anyway.) * - * It's obvious that we must do this when not WAL-logging the copy. - * It's less obvious that we have to do it even if we did WAL-log the + * It's obvious that we must do this when not WAL-logging the copy. It's + * less obvious that we have to do it even if we did WAL-log the * copied pages. The reason is that since we're copying outside * shared buffers, a CHECKPOINT occurring during the copy has no way * to flush the previously written data to disk (indeed it won't know - * the new rel even exists). A crash later on would replay WAL from the - * checkpoint, therefore it wouldn't replay our earlier WAL entries. - * If we do not fsync those pages here, they might still not be on disk - * when the crash occurs. + * the new rel even exists). A crash later on would replay WAL from + * the checkpoint, therefore it wouldn't replay our earlier WAL + * entries. If we do not fsync those pages here, they might still not + * be on disk when the crash occurs. */ if (!rel->rd_istemp) smgrimmedsync(dst); @@ -5510,7 +5524,7 @@ copy_relation_data(Relation rel, SMgrRelation dst) * * Note: this is also invoked from outside this module; in such cases we * expect the caller to have verified that the relation is a table and we - * have all the right permissions. Callers expect this function + * have all the right permissions. Callers expect this function * to end with CommandCounterIncrement if it makes any changes. */ void @@ -5532,8 +5546,8 @@ AlterTableCreateToastTable(Oid relOid, bool silent) /* * Grab an exclusive lock on the target table, which we will NOT - * release until end of transaction. (This is probably redundant - * in all present uses...) + * release until end of transaction. (This is probably redundant in + * all present uses...) */ rel = heap_open(relOid, AccessExclusiveLock); @@ -5543,15 +5557,15 @@ AlterTableCreateToastTable(Oid relOid, bool silent) * We cannot allow toasting a shared relation after initdb (because * there's no way to mark it toasted in other databases' pg_class). * Unfortunately we can't distinguish initdb from a manually started - * standalone backend (toasting happens after the bootstrap phase, - * so checking IsBootstrapProcessingMode() won't work). However, we can + * standalone backend (toasting happens after the bootstrap phase, so + * checking IsBootstrapProcessingMode() won't work). However, we can * at least prevent this mistake under normal multi-user operation. */ shared_relation = rel->rd_rel->relisshared; if (shared_relation && IsUnderPostmaster) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("shared tables cannot be toasted after initdb"))); + errmsg("shared tables cannot be toasted after initdb"))); /* * Is it already toasted? @@ -5894,8 +5908,8 @@ PreCommit_on_commit_actions(void) void AtEOXact_on_commit_actions(bool isCommit, TransactionId xid) { - ListCell *cur_item; - ListCell *prev_item; + ListCell *cur_item; + ListCell *prev_item; prev_item = NULL; cur_item = list_head(on_commits); @@ -5930,15 +5944,15 @@ AtEOXact_on_commit_actions(bool isCommit, TransactionId xid) * Post-subcommit or post-subabort cleanup for ON COMMIT management. * * During subabort, we can immediately remove entries created during this - * subtransaction. During subcommit, just relabel entries marked during + * subtransaction. During subcommit, just relabel entries marked during * this subtransaction as being the parent's responsibility. */ void AtEOSubXact_on_commit_actions(bool isCommit, TransactionId childXid, TransactionId parentXid) { - ListCell *cur_item; - ListCell *prev_item; + ListCell *cur_item; + ListCell *prev_item; prev_item = NULL; cur_item = list_head(on_commits); diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c index 05a13315a1f..15fe8392882 100644 --- a/src/backend/commands/tablespace.c +++ b/src/backend/commands/tablespace.c @@ -35,7 +35,7 @@ * To allow CREATE DATABASE to give a new database a default tablespace * that's different from the template database's default, we make the * provision that a zero in pg_class.reltablespace means the database's - * default tablespace. Without this, CREATE DATABASE would have to go in + * default tablespace. Without this, CREATE DATABASE would have to go in * and munge the system catalogs of the new database. This special meaning * of zero also applies in pg_namespace.nsptablespace. * @@ -45,7 +45,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.8 2004/08/08 01:31:11 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.9 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -95,11 +95,11 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) { #ifdef HAVE_SYMLINK struct stat st; - char *dir; + char *dir; /* - * The global tablespace doesn't have per-database subdirectories, - * so nothing to do for it. + * The global tablespace doesn't have per-database subdirectories, so + * nothing to do for it. */ if (spcNode == GLOBALTABLESPACE_OID) return; @@ -118,7 +118,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) * DROP TABLESPACE or TablespaceCreateDbspace is running * concurrently. Simple reads from pg_tablespace are OK. */ - Relation rel; + Relation rel; if (!isRedo) rel = heap_openr(TableSpaceRelationName, ExclusiveLock); @@ -126,8 +126,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) rel = NULL; /* - * Recheck to see if someone created the directory while - * we were waiting for lock. + * Recheck to see if someone created the directory while we + * were waiting for lock. */ if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode)) { @@ -139,8 +139,8 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) if (mkdir(dir, S_IRWXU) < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not create directory \"%s\": %m", - dir))); + errmsg("could not create directory \"%s\": %m", + dir))); } /* OK to drop the exclusive lock */ @@ -165,7 +165,7 @@ TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo) } pfree(dir); -#endif /* HAVE_SYMLINK */ +#endif /* HAVE_SYMLINK */ } /* @@ -179,13 +179,13 @@ void CreateTableSpace(CreateTableSpaceStmt *stmt) { #ifdef HAVE_SYMLINK - Relation rel; - Datum values[Natts_pg_tablespace]; + Relation rel; + Datum values[Natts_pg_tablespace]; char nulls[Natts_pg_tablespace]; HeapTuple tuple; Oid tablespaceoid; - char *location; - char *linkloc; + char *location; + char *linkloc; AclId ownerid; /* validate */ @@ -196,10 +196,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) /* Must be super user */ if (!superuser()) ereport(ERROR, - (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("permission denied to create tablespace \"%s\"", - stmt->tablespacename), - errhint("Must be superuser to create a tablespace."))); + (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), + errmsg("permission denied to create tablespace \"%s\"", + stmt->tablespacename), + errhint("Must be superuser to create a tablespace."))); /* However, the eventual owner of the tablespace need not be */ if (stmt->owner) @@ -218,7 +218,7 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) if (strchr(location, '\'')) ereport(ERROR, (errcode(ERRCODE_INVALID_NAME), - errmsg("tablespace location may not contain single quotes"))); + errmsg("tablespace location may not contain single quotes"))); /* * Allowing relative paths seems risky @@ -231,9 +231,9 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) errmsg("tablespace location must be an absolute path"))); /* - * Check that location isn't too long. Remember that we're going to append - * '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole path - * explicitly? This may be overly conservative.) + * Check that location isn't too long. Remember that we're going to + * append '/<dboid>/<relid>.<nnn>' (XXX but do we ever form the whole + * path explicitly? This may be overly conservative.) */ if (strlen(location) >= (MAXPGPATH - 1 - 10 - 1 - 10 - 1 - 10)) ereport(ERROR, @@ -250,12 +250,12 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) (errcode(ERRCODE_RESERVED_NAME), errmsg("unacceptable tablespace name \"%s\"", stmt->tablespacename), - errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); + errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); /* - * Check that there is no other tablespace by this name. (The - * unique index would catch this anyway, but might as well give - * a friendlier message.) + * Check that there is no other tablespace by this name. (The unique + * index would catch this anyway, but might as well give a friendlier + * message.) */ if (OidIsValid(get_tablespace_oid(stmt->tablespacename))) ereport(ERROR, @@ -293,14 +293,14 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) heap_freetuple(tuple); /* - * Attempt to coerce target directory to safe permissions. If this + * Attempt to coerce target directory to safe permissions. If this * fails, it doesn't exist or has the wrong owner. */ if (chmod(location, 0700) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not set permissions on directory \"%s\": %m", - location))); + errmsg("could not set permissions on directory \"%s\": %m", + location))); /* * Check the target directory is empty. @@ -312,10 +312,10 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) location))); /* - * Create the PG_VERSION file in the target directory. This has several - * purposes: to make sure we can write in the directory, to prevent - * someone from creating another tablespace pointing at the same - * directory (the emptiness check above will fail), and to label + * Create the PG_VERSION file in the target directory. This has + * several purposes: to make sure we can write in the directory, to + * prevent someone from creating another tablespace pointing at the + * same directory (the emptiness check above will fail), and to label * tablespace directories by PG version. */ set_short_version(location); @@ -337,11 +337,11 @@ CreateTableSpace(CreateTableSpaceStmt *stmt) heap_close(rel, RowExclusiveLock); -#else /* !HAVE_SYMLINK */ +#else /* !HAVE_SYMLINK */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("tablespaces are not supported on this platform"))); -#endif /* HAVE_SYMLINK */ +#endif /* HAVE_SYMLINK */ } /* @@ -353,23 +353,24 @@ void DropTableSpace(DropTableSpaceStmt *stmt) { #ifdef HAVE_SYMLINK - char *tablespacename = stmt->tablespacename; - HeapScanDesc scandesc; - Relation rel; - HeapTuple tuple; - ScanKeyData entry[1]; - char *location; - Oid tablespaceoid; - DIR *dirdesc; + char *tablespacename = stmt->tablespacename; + HeapScanDesc scandesc; + Relation rel; + HeapTuple tuple; + ScanKeyData entry[1]; + char *location; + Oid tablespaceoid; + DIR *dirdesc; struct dirent *de; - char *subfile; + char *subfile; /* don't call this in a transaction block */ PreventTransactionChain((void *) stmt, "DROP TABLESPACE"); /* * Acquire ExclusiveLock on pg_tablespace to ensure that no one else - * is trying to do DROP TABLESPACE or TablespaceCreateDbspace concurrently. + * is trying to do DROP TABLESPACE or TablespaceCreateDbspace + * concurrently. */ rel = heap_openr(TableSpaceRelationName, ExclusiveLock); @@ -409,15 +410,15 @@ DropTableSpace(DropTableSpaceStmt *stmt) /* * Check if the tablespace still contains any files. We try to rmdir * each per-database directory we find in it. rmdir failure implies - * there are still files in that subdirectory, so give up. (We do not - * have to worry about undoing any already completed rmdirs, since - * the next attempt to use the tablespace from that database will simply + * there are still files in that subdirectory, so give up. (We do not + * have to worry about undoing any already completed rmdirs, since the + * next attempt to use the tablespace from that database will simply * recreate the subdirectory via TablespaceCreateDbspace.) * - * Since we hold exclusive lock, no one else should be creating any - * fresh subdirectories in parallel. It is possible that new files - * are being created within subdirectories, though, so the rmdir - * call could fail. Worst consequence is a less friendly error message. + * Since we hold exclusive lock, no one else should be creating any fresh + * subdirectories in parallel. It is possible that new files are + * being created within subdirectories, though, so the rmdir call + * could fail. Worst consequence is a less friendly error message. */ dirdesc = AllocateDir(location); if (dirdesc == NULL) @@ -458,8 +459,11 @@ DropTableSpace(DropTableSpaceStmt *stmt) pfree(subfile); } #ifdef WIN32 - /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but - not in released version */ + + /* + * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but + * not in released version + */ if (GetLastError() == ERROR_NO_MORE_FILES) errno = 0; #endif @@ -494,15 +498,15 @@ DropTableSpace(DropTableSpaceStmt *stmt) ereport(ERROR, (errcode_for_file_access(), errmsg("could not remove junction dir \"%s\": %m", - location))); + location))); #endif pfree(subfile); pfree(location); /* - * We have successfully destroyed the infrastructure ... there is - * now no way to roll back the DROP ... so proceed to remove the + * We have successfully destroyed the infrastructure ... there is now + * no way to roll back the DROP ... so proceed to remove the * pg_tablespace tuple. */ simple_heap_delete(rel, &tuple->t_self); @@ -511,11 +515,11 @@ DropTableSpace(DropTableSpaceStmt *stmt) heap_close(rel, ExclusiveLock); -#else /* !HAVE_SYMLINK */ +#else /* !HAVE_SYMLINK */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("tablespaces are not supported on this platform"))); -#endif /* HAVE_SYMLINK */ +#endif /* HAVE_SYMLINK */ } @@ -579,7 +583,7 @@ set_short_version(const char *path) static bool directory_is_empty(const char *path) { - DIR *dirdesc; + DIR *dirdesc; struct dirent *de; dirdesc = AllocateDir(path); @@ -602,8 +606,11 @@ directory_is_empty(const char *path) return false; } #ifdef WIN32 - /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but - not in released version */ + + /* + * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but + * not in released version + */ if (GetLastError() == ERROR_NO_MORE_FILES) errno = 0; #endif @@ -624,11 +631,11 @@ directory_is_empty(const char *path) Oid get_tablespace_oid(const char *tablespacename) { - Oid result; - Relation rel; + Oid result; + Relation rel; HeapScanDesc scandesc; HeapTuple tuple; - ScanKeyData entry[1]; + ScanKeyData entry[1]; /* Search pg_tablespace */ rel = heap_openr(TableSpaceRelationName, AccessShareLock); @@ -645,8 +652,8 @@ get_tablespace_oid(const char *tablespacename) else result = InvalidOid; - heap_endscan(scandesc); - heap_close(rel, AccessShareLock); + heap_endscan(scandesc); + heap_close(rel, AccessShareLock); return result; } @@ -659,11 +666,11 @@ get_tablespace_oid(const char *tablespacename) char * get_tablespace_name(Oid spc_oid) { - char *result; - Relation rel; + char *result; + Relation rel; HeapScanDesc scandesc; HeapTuple tuple; - ScanKeyData entry[1]; + ScanKeyData entry[1]; /* Search pg_tablespace */ rel = heap_openr(TableSpaceRelationName, AccessShareLock); @@ -681,8 +688,8 @@ get_tablespace_name(Oid spc_oid) else result = NULL; - heap_endscan(scandesc); - heap_close(rel, AccessShareLock); + heap_endscan(scandesc); + heap_close(rel, AccessShareLock); return result; } @@ -693,8 +700,8 @@ get_tablespace_name(Oid spc_oid) void RenameTableSpace(const char *oldname, const char *newname) { - Relation rel; - ScanKeyData entry[1]; + Relation rel; + ScanKeyData entry[1]; HeapScanDesc scan; HeapTuple tup; HeapTuple newtuple; @@ -729,7 +736,7 @@ RenameTableSpace(const char *oldname, const char *newname) ereport(ERROR, (errcode(ERRCODE_RESERVED_NAME), errmsg("unacceptable tablespace name \"%s\"", newname), - errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); + errdetail("The prefix \"pg_\" is reserved for system tablespaces."))); /* Make sure the new name doesn't exist */ ScanKeyInit(&entry[0], @@ -743,7 +750,7 @@ RenameTableSpace(const char *oldname, const char *newname) (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("tablespace \"%s\" already exists", newname))); - + heap_endscan(scan); /* OK, update the entry */ @@ -761,8 +768,8 @@ RenameTableSpace(const char *oldname, const char *newname) void AlterTableSpaceOwner(const char *name, AclId newOwnerSysId) { - Relation rel; - ScanKeyData entry[1]; + Relation rel; + ScanKeyData entry[1]; HeapScanDesc scandesc; Form_pg_tablespace spcForm; HeapTuple tup; @@ -783,7 +790,7 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId) spcForm = (Form_pg_tablespace) GETSTRUCT(tup); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -792,7 +799,7 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId) Datum repl_val[Natts_pg_tablespace]; char repl_null[Natts_pg_tablespace]; char repl_repl[Natts_pg_tablespace]; - Acl *newAcl; + Acl *newAcl; Datum aclDatum; bool isNull; HeapTuple newtuple; @@ -814,9 +821,9 @@ AlterTableSpaceOwner(const char *name, AclId newOwnerSysId) * necessary when the ACL is non-null. */ aclDatum = heap_getattr(tup, - Anum_pg_tablespace_spcacl, - RelationGetDescr(rel), - &isNull); + Anum_pg_tablespace_spcacl, + RelationGetDescr(rel), + &isNull); if (!isNull) { newAcl = aclnewowner(DatumGetAclP(aclDatum), diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index dfc8098782d..7e73f6b000f 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.167 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.168 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -480,8 +480,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior) if (!HeapTupleIsValid(tup)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("trigger \"%s\" for table \"%s\" does not exist", - trigname, get_rel_name(relid)))); + errmsg("trigger \"%s\" for table \"%s\" does not exist", + trigname, get_rel_name(relid)))); if (!pg_class_ownercheck(relid, GetUserId())) aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, @@ -694,8 +694,8 @@ renametrig(Oid relid, { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("trigger \"%s\" for table \"%s\" does not exist", - oldname, RelationGetRelationName(targetrel)))); + errmsg("trigger \"%s\" for table \"%s\" does not exist", + oldname, RelationGetRelationName(targetrel)))); } systable_endscan(tgscan); @@ -1638,7 +1638,7 @@ ltrmark:; * Deferred trigger stuff * * The DeferredTriggersData struct holds data about pending deferred - * trigger events during the current transaction tree. The struct and + * trigger events during the current transaction tree. The struct and * most of its subsidiary data are kept in TopTransactionContext; however * the individual event records are kept in CurTransactionContext, so that * they will easily go away during subtransaction abort. @@ -1670,7 +1670,7 @@ ltrmark:; * saves a copy, which we use to restore the state if we abort. * * numpushed and numalloc keep control of allocation and storage in the above - * stacks. numpushed is essentially the current subtransaction nesting depth. + * stacks. numpushed is essentially the current subtransaction nesting depth. * * XXX We need to be able to save the per-event data in a file if it grows too * large. @@ -1723,11 +1723,11 @@ typedef struct DeferredTriggerStatusData *DeferredTriggerStatus; */ typedef struct DeferredTriggerStateData { - bool all_isset; - bool all_isdeferred; - int numstates; /* number of trigstates[] entries in use */ - int numalloc; /* allocated size of trigstates[] */ - DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */ + bool all_isset; + bool all_isdeferred; + int numstates; /* number of trigstates[] entries in use */ + int numalloc; /* allocated size of trigstates[] */ + DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */ } DeferredTriggerStateData; typedef DeferredTriggerStateData *DeferredTriggerState; @@ -1735,15 +1735,15 @@ typedef DeferredTriggerStateData *DeferredTriggerState; /* Per-transaction data */ typedef struct DeferredTriggersData { - DeferredTriggerState state; - DeferredTriggerEvent events; - DeferredTriggerEvent tail_thisxact; - DeferredTriggerEvent events_imm; - DeferredTriggerEvent *tail_stack; - DeferredTriggerEvent *imm_stack; - DeferredTriggerState *state_stack; - int numpushed; - int numalloc; + DeferredTriggerState state; + DeferredTriggerEvent events; + DeferredTriggerEvent tail_thisxact; + DeferredTriggerEvent events_imm; + DeferredTriggerEvent *tail_stack; + DeferredTriggerEvent *imm_stack; + DeferredTriggerState *state_stack; + int numpushed; + int numalloc; } DeferredTriggersData; typedef DeferredTriggersData *DeferredTriggers; @@ -1757,7 +1757,7 @@ static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno, static DeferredTriggerState DeferredTriggerStateCreate(int numalloc); static DeferredTriggerState DeferredTriggerStateCopy(DeferredTriggerState state); static DeferredTriggerState DeferredTriggerStateAddItem(DeferredTriggerState state, - Oid tgoid, bool tgisdeferred); + Oid tgoid, bool tgisdeferred); /* ---------- @@ -1770,8 +1770,8 @@ static DeferredTriggerState DeferredTriggerStateAddItem(DeferredTriggerState sta static bool deferredTriggerCheckState(Oid tgoid, int32 itemstate) { - bool tgisdeferred; - int i; + bool tgisdeferred; + int i; /* * For not-deferrable triggers (i.e. normal AFTER ROW triggers and @@ -1798,7 +1798,8 @@ deferredTriggerCheckState(Oid tgoid, int32 itemstate) /* * No ALL state known either, remember the default state as the - * current and return that. (XXX why do we bother making a state entry?) + * current and return that. (XXX why do we bother making a state + * entry?) */ tgisdeferred = ((itemstate & TRIGGER_DEFERRED_INITDEFERRED) != 0); deferredTriggers->state = @@ -1982,8 +1983,8 @@ deferredTriggerInvokeEvents(bool immediate_only) /* * If immediate_only is true, then the only events that could need - * firing are those since events_imm. (But if - * events_imm is NULL, we must scan the entire list.) + * firing are those since events_imm. (But if events_imm is NULL, we + * must scan the entire list.) */ if (immediate_only && deferredTriggers->events_imm != NULL) { @@ -2003,13 +2004,13 @@ deferredTriggerInvokeEvents(bool immediate_only) int i; /* - * Skip executing cancelled events, and events done by transactions - * that are not aborted. + * Skip executing cancelled events, and events done by + * transactions that are not aborted. */ if (!(event->dte_event & TRIGGER_DEFERRED_CANCELED) || - (event->dte_event & TRIGGER_DEFERRED_DONE && - TransactionIdIsValid(event->dte_done_xid) && - !TransactionIdDidAbort(event->dte_done_xid))) + (event->dte_event & TRIGGER_DEFERRED_DONE && + TransactionIdIsValid(event->dte_done_xid) && + !TransactionIdDidAbort(event->dte_done_xid))) { MemoryContextReset(per_tuple_context); @@ -2019,8 +2020,8 @@ deferredTriggerInvokeEvents(bool immediate_only) for (i = 0; i < event->dte_n_items; i++) { if (event->dte_item[i].dti_state & TRIGGER_DEFERRED_DONE && - TransactionIdIsValid(event->dte_item[i].dti_done_xid) && - !(TransactionIdDidAbort(event->dte_item[i].dti_done_xid))) + TransactionIdIsValid(event->dte_item[i].dti_done_xid) && + !(TransactionIdDidAbort(event->dte_item[i].dti_done_xid))) continue; /* @@ -2097,8 +2098,8 @@ deferredTriggerInvokeEvents(bool immediate_only) { /* * We can drop an item if it's done, but only if we're not - * inside a subtransaction because it could abort later on. - * We will want to check the item again if it does. + * inside a subtransaction because it could abort later on. We + * will want to check the item again if it does. */ if (immediate_only && !IsSubTransaction()) { @@ -2209,8 +2210,8 @@ DeferredTriggerEndXact(void) /* * Forget everything we know about deferred triggers. * - * Since all the info is in TopTransactionContext or children thereof, - * we need do nothing special to reclaim memory. + * Since all the info is in TopTransactionContext or children thereof, we + * need do nothing special to reclaim memory. */ deferredTriggers = NULL; } @@ -2236,8 +2237,8 @@ DeferredTriggerAbortXact(void) /* * Forget everything we know about deferred triggers. * - * Since all the info is in TopTransactionContext or children thereof, - * we need do nothing special to reclaim memory. + * Since all the info is in TopTransactionContext or children thereof, we + * need do nothing special to reclaim memory. */ deferredTriggers = NULL; } @@ -2285,13 +2286,13 @@ DeferredTriggerBeginSubXact(void) deferredTriggers->tail_stack = (DeferredTriggerEvent *) repalloc(deferredTriggers->tail_stack, - deferredTriggers->numalloc * sizeof(DeferredTriggerEvent)); + deferredTriggers->numalloc * sizeof(DeferredTriggerEvent)); deferredTriggers->imm_stack = (DeferredTriggerEvent *) repalloc(deferredTriggers->imm_stack, - deferredTriggers->numalloc * sizeof(DeferredTriggerEvent)); + deferredTriggers->numalloc * sizeof(DeferredTriggerEvent)); deferredTriggers->state_stack = (DeferredTriggerState *) repalloc(deferredTriggers->state_stack, - deferredTriggers->numalloc * sizeof(DeferredTriggerState)); + deferredTriggers->numalloc * sizeof(DeferredTriggerState)); } } @@ -2358,8 +2359,8 @@ DeferredTriggerEndSubXact(bool isCommit) deferredTriggers->tail_thisxact->dte_next = NULL; /* - * We don't need to free the items, since the CurTransactionContext - * will be reset shortly. + * We don't need to free the items, since the + * CurTransactionContext will be reset shortly. */ /* @@ -2393,7 +2394,7 @@ DeferredTriggerStateCreate(int numalloc) state = (DeferredTriggerState) MemoryContextAllocZero(TopTransactionContext, sizeof(DeferredTriggerStateData) + - (numalloc - 1) * sizeof(DeferredTriggerStatusData)); + (numalloc - 1) *sizeof(DeferredTriggerStatusData)); state->numalloc = numalloc; @@ -2429,13 +2430,13 @@ DeferredTriggerStateAddItem(DeferredTriggerState state, { if (state->numstates >= state->numalloc) { - int newalloc = state->numalloc * 2; + int newalloc = state->numalloc * 2; - newalloc = Max(newalloc, 8); /* in case original has size 0 */ + newalloc = Max(newalloc, 8); /* in case original has size 0 */ state = (DeferredTriggerState) repalloc(state, sizeof(DeferredTriggerStateData) + - (newalloc - 1) * sizeof(DeferredTriggerStatusData)); + (newalloc - 1) *sizeof(DeferredTriggerStatusData)); state->numalloc = newalloc; Assert(state->numstates < state->numalloc); } @@ -2463,8 +2464,9 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt) return; /* - * If in a subtransaction, and we didn't save the current state already, - * save it so it can be restored if the subtransaction aborts. + * If in a subtransaction, and we didn't save the current state + * already, save it so it can be restored if the subtransaction + * aborts. */ if (deferredTriggers->numpushed > 0 && deferredTriggers->state_stack[deferredTriggers->numpushed - 1] == NULL) @@ -2686,7 +2688,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger, return; /* - * Create a new event. We use the CurTransactionContext so the event + * Create a new event. We use the CurTransactionContext so the event * will automatically go away if the subtransaction aborts. */ oldcxt = MemoryContextSwitchTo(CurTransactionContext); diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c index 8fd16fdb58d..6a438093298 100644 --- a/src/backend/commands/typecmds.c +++ b/src/backend/commands/typecmds.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.62 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.63 2004/08/29 05:06:41 momjian Exp $ * * DESCRIPTION * The "DefineFoo" routines take the parse tree and pick out the @@ -302,8 +302,8 @@ DefineType(List *names, List *parameters) else ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type output function %s must return type \"cstring\"", - NameListToString(outputName)))); + errmsg("type output function %s must return type \"cstring\"", + NameListToString(outputName)))); } if (receiveOid) { @@ -311,8 +311,8 @@ DefineType(List *names, List *parameters) if (resulttype != typoid) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type receive function %s must return type %s", - NameListToString(receiveName), typeName))); + errmsg("type receive function %s must return type %s", + NameListToString(receiveName), typeName))); } if (sendOid) { @@ -320,13 +320,14 @@ DefineType(List *names, List *parameters) if (resulttype != BYTEAOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type send function %s must return type \"bytea\"", - NameListToString(sendName)))); + errmsg("type send function %s must return type \"bytea\"", + NameListToString(sendName)))); } /* - * Convert analysis function proc name to an OID. If no analysis function - * is specified, we'll use zero to select the built-in default algorithm. + * Convert analysis function proc name to an OID. If no analysis + * function is specified, we'll use zero to select the built-in + * default algorithm. */ if (analyzeName) analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid); @@ -691,7 +692,7 @@ DefineDomain(CreateDomainStmt *stmt) case CONSTR_UNIQUE: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unique constraints not possible for domains"))); + errmsg("unique constraints not possible for domains"))); break; case CONSTR_PRIMARY: @@ -932,8 +933,8 @@ findTypeOutputFunction(List *procname, Oid typeOid) * arguments (data value, element OID). * * For backwards compatibility we allow OPAQUE in place of the actual - * type name; if we see this, we issue a warning and fix up the pg_proc - * entry. + * type name; if we see this, we issue a warning and fix up the + * pg_proc entry. */ MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid)); @@ -967,8 +968,8 @@ findTypeOutputFunction(List *procname, Oid typeOid) { /* Found, but must complain and fix the pg_proc entry */ ereport(WARNING, - (errmsg("changing argument type of function %s from \"opaque\" to %s", - NameListToString(procname), format_type_be(typeOid)))); + (errmsg("changing argument type of function %s from \"opaque\" to %s", + NameListToString(procname), format_type_be(typeOid)))); SetFunctionArgType(procOid, 0, typeOid); /* @@ -1062,7 +1063,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid) Oid procOid; /* - * Analyze functions always take one INTERNAL argument and return bool. + * Analyze functions always take one INTERNAL argument and return + * bool. */ MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid)); @@ -1078,8 +1080,8 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid) if (get_func_rettype(procOid) != BOOLOID) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("type analyze function %s must return type \"boolean\"", - NameListToString(procname)))); + errmsg("type analyze function %s must return type \"boolean\"", + NameListToString(procname)))); return procOid; } @@ -1110,8 +1112,8 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist) errmsg("composite type must have at least one attribute"))); /* - * now set the parameters for keys/inheritance etc. All of these - * are uninteresting for composite types... + * now set the parameters for keys/inheritance etc. All of these are + * uninteresting for composite types... */ createStmt->relation = (RangeVar *) typevar; createStmt->tableElts = coldeflist; @@ -1337,8 +1339,8 @@ AlterDomainNotNull(List *names, bool notNull) ereport(ERROR, (errcode(ERRCODE_NOT_NULL_VIOLATION), errmsg("column \"%s\" of table \"%s\" contains null values", - NameStr(tupdesc->attrs[attnum - 1]->attname), - RelationGetRelationName(testrel)))); + NameStr(tupdesc->attrs[attnum - 1]->attname), + RelationGetRelationName(testrel)))); } } heap_endscan(scan); @@ -1499,7 +1501,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) if (IsA(newConstraint, FkConstraint)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("foreign key constraints not possible for domains"))); + errmsg("foreign key constraints not possible for domains"))); /* otherwise it should be a plain Constraint */ if (!IsA(newConstraint, Constraint)) @@ -1517,13 +1519,13 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) case CONSTR_UNIQUE: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("unique constraints not possible for domains"))); + errmsg("unique constraints not possible for domains"))); break; case CONSTR_PRIMARY: ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("primary key constraints not possible for domains"))); + errmsg("primary key constraints not possible for domains"))); break; case CONSTR_ATTR_DEFERRABLE: @@ -1604,7 +1606,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint) ereport(ERROR, (errcode(ERRCODE_CHECK_VIOLATION), errmsg("column \"%s\" of table \"%s\" contains values that violate the new constraint", - NameStr(tupdesc->attrs[attnum - 1]->attname), + NameStr(tupdesc->attrs[attnum - 1]->attname), RelationGetRelationName(testrel)))); } @@ -2078,9 +2080,9 @@ AlterTypeOwner(List *names, AclId newOwnerSysId) typTup = (Form_pg_type) GETSTRUCT(tup); /* - * If it's a composite type, we need to check that it really is a - * free-standing composite type, and not a table's underlying type. - * We want people to use ALTER TABLE not ALTER TYPE for that case. + * If it's a composite type, we need to check that it really is a + * free-standing composite type, and not a table's underlying type. We + * want people to use ALTER TABLE not ALTER TYPE for that case. */ if (typTup->typtype == 'c' && get_rel_relkind(typTup->typrelid) != 'c') ereport(ERROR, @@ -2088,7 +2090,7 @@ AlterTypeOwner(List *names, AclId newOwnerSysId) errmsg("\"%s\" is a table's row type", TypeNameToString(typename)))); - /* + /* * If the new owner is the same as the existing owner, consider the * command to have succeeded. This is for dump restoration purposes. */ @@ -2100,7 +2102,10 @@ AlterTypeOwner(List *names, AclId newOwnerSysId) (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to change owner"))); - /* Modify the owner --- okay to scribble on typTup because it's a copy */ + /* + * Modify the owner --- okay to scribble on typTup because it's a + * copy + */ typTup->typowner = newOwnerSysId; simple_heap_update(rel, &tup->t_self, tup); diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 8e637367b37..e365f946b17 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.143 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.144 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -46,10 +46,10 @@ extern bool Password_encryption; /* * The need-to-update-files flags are a pair of TransactionIds that show what - * level of the transaction tree requested the update. To register an update, + * level of the transaction tree requested the update. To register an update, * the transaction saves its own TransactionId in the flag, unless the value * was already set to a valid TransactionId. If it aborts and the value is its - * TransactionId, it resets the value to InvalidTransactionId. If it commits, + * TransactionId, it resets the value to InvalidTransactionId. If it commits, * it changes the value to its parent's TransactionId. This way the value is * propagated up to the topmost transaction, which will update the files if a * valid TransactionId is detected. @@ -169,7 +169,7 @@ write_group_file(Relation grel) if (fp == NULL) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to temporary file \"%s\": %m", tempname))); + errmsg("could not write to temporary file \"%s\": %m", tempname))); /* * Read pg_group and write the file. Note we use SnapshotSelf to @@ -316,7 +316,7 @@ write_user_file(Relation urel) if (fp == NULL) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to temporary file \"%s\": %m", tempname))); + errmsg("could not write to temporary file \"%s\": %m", tempname))); /* * Read pg_shadow and write the file. Note we use SnapshotSelf to @@ -1009,7 +1009,7 @@ AlterUserSet(AlterUserSetStmt *stmt) errmsg("user \"%s\" does not exist", stmt->user))); if (!(superuser() || - ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId())) + ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId())) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("permission denied"))); @@ -1216,14 +1216,14 @@ RenameUser(const char *oldname, const char *newname) char repl_null[Natts_pg_shadow]; char repl_repl[Natts_pg_shadow]; int i; - + /* ExclusiveLock because we need to update the password file */ rel = heap_openr(ShadowRelationName, ExclusiveLock); dsc = RelationGetDescr(rel); oldtuple = SearchSysCache(SHADOWNAME, - CStringGetDatum(oldname), - 0, 0, 0); + CStringGetDatum(oldname), + 0, 0, 0); if (!HeapTupleIsValid(oldtuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), @@ -1259,7 +1259,7 @@ RenameUser(const char *oldname, const char *newname) repl_repl[Anum_pg_shadow_usename - 1] = 'r'; repl_val[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein, - CStringGetDatum(newname)); + CStringGetDatum(newname)); repl_null[Anum_pg_shadow_usename - 1] = ' '; datum = heap_getattr(oldtuple, Anum_pg_shadow_passwd, dsc, &isnull); @@ -1269,14 +1269,14 @@ RenameUser(const char *oldname, const char *newname) /* MD5 uses the username as salt, so just clear it on a rename */ repl_repl[Anum_pg_shadow_passwd - 1] = 'r'; repl_null[Anum_pg_shadow_passwd - 1] = 'n'; - + ereport(NOTICE, - (errmsg("MD5 password cleared because of user rename"))); + (errmsg("MD5 password cleared because of user rename"))); } - + newtuple = heap_modifytuple(oldtuple, rel, repl_val, repl_null, repl_repl); simple_heap_update(rel, &oldtuple->t_self, newtuple); - + CatalogUpdateIndexes(rel, newtuple); ReleaseSysCache(oldtuple); diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index 1a1cb2393f6..67c1c02b6d2 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -13,7 +13,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.288 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.289 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -106,7 +106,7 @@ typedef struct VRelStats * As these variables always appear together, we put them into one struct * and pull initialization and cleanup into separate routines. * ExecContext is used by repair_frag() and move_xxx_tuple(). More - * accurately: It is *used* only in move_xxx_tuple(), but because this + * accurately: It is *used* only in move_xxx_tuple(), but because this * routine is called many times, we initialize the struct just once in * repair_frag() and pass it on to move_xxx_tuple(). */ @@ -131,9 +131,9 @@ ExecContext_Init(ExecContext ec, Relation rel) ec->estate = CreateExecutorState(); ec->resultRelInfo = makeNode(ResultRelInfo); - ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */ + ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */ ec->resultRelInfo->ri_RelationDesc = rel; - ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */ + ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */ ExecOpenIndices(ec->resultRelInfo); @@ -154,6 +154,7 @@ ExecContext_Finish(ExecContext ec) ExecCloseIndices(ec->resultRelInfo); FreeExecutorState(ec->estate); } + /* * End of ExecContext Implementation *---------------------------------------------------------------------- @@ -182,16 +183,16 @@ static void repair_frag(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages, VacPageList fraged_pages, int nindexes, Relation *Irel); static void move_chain_tuple(Relation rel, - Buffer old_buf, Page old_page, HeapTuple old_tup, - Buffer dst_buf, Page dst_page, VacPage dst_vacpage, - ExecContext ec, ItemPointer ctid, bool cleanVpd); + Buffer old_buf, Page old_page, HeapTuple old_tup, + Buffer dst_buf, Page dst_page, VacPage dst_vacpage, + ExecContext ec, ItemPointer ctid, bool cleanVpd); static void move_plain_tuple(Relation rel, - Buffer old_buf, Page old_page, HeapTuple old_tup, - Buffer dst_buf, Page dst_page, VacPage dst_vacpage, - ExecContext ec); + Buffer old_buf, Page old_page, HeapTuple old_tup, + Buffer dst_buf, Page dst_page, VacPage dst_vacpage, + ExecContext ec); static void update_hint_bits(Relation rel, VacPageList fraged_pages, - int num_fraged_pages, BlockNumber last_move_dest_block, - int num_moved); + int num_fraged_pages, BlockNumber last_move_dest_block, + int num_moved); static void vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacpagelist); static void vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage); @@ -248,11 +249,11 @@ vacuum(VacuumStmt *vacstmt) * Furthermore, the forced commit that occurs before truncating the * relation's file would have the effect of committing the rest of the * user's transaction too, which would certainly not be the desired - * behavior. (This only applies to VACUUM FULL, though. We could - * in theory run lazy VACUUM inside a transaction block, but we choose - * to disallow that case because we'd rather commit as soon as possible - * after finishing the vacuum. This is mainly so that we can let go the - * AccessExclusiveLock that we may be holding.) + * behavior. (This only applies to VACUUM FULL, though. We could in + * theory run lazy VACUUM inside a transaction block, but we choose to + * disallow that case because we'd rather commit as soon as possible + * after finishing the vacuum. This is mainly so that we can let go + * the AccessExclusiveLock that we may be holding.) * * ANALYZE (without VACUUM) can run either way. */ @@ -262,9 +263,7 @@ vacuum(VacuumStmt *vacstmt) in_outer_xact = false; } else - { in_outer_xact = IsInTransactionChain((void *) vacstmt); - } /* * Send info about dead objects to the statistics collector @@ -296,22 +295,21 @@ vacuum(VacuumStmt *vacstmt) /* * It's a database-wide VACUUM. * - * Compute the initially applicable OldestXmin and FreezeLimit - * XIDs, so that we can record these values at the end of the - * VACUUM. Note that individual tables may well be processed - * with newer values, but we can guarantee that no - * (non-shared) relations are processed with older ones. + * Compute the initially applicable OldestXmin and FreezeLimit XIDs, + * so that we can record these values at the end of the VACUUM. + * Note that individual tables may well be processed with newer + * values, but we can guarantee that no (non-shared) relations are + * processed with older ones. * - * It is okay to record non-shared values in pg_database, even - * though we may vacuum shared relations with older cutoffs, - * because only the minimum of the values present in - * pg_database matters. We can be sure that shared relations - * have at some time been vacuumed with cutoffs no worse than - * the global minimum; for, if there is a backend in some - * other DB with xmin = OLDXMIN that's determining the cutoff - * with which we vacuum shared relations, it is not possible - * for that database to have a cutoff newer than OLDXMIN - * recorded in pg_database. + * It is okay to record non-shared values in pg_database, even though + * we may vacuum shared relations with older cutoffs, because only + * the minimum of the values present in pg_database matters. We + * can be sure that shared relations have at some time been + * vacuumed with cutoffs no worse than the global minimum; for, if + * there is a backend in some other DB with xmin = OLDXMIN that's + * determining the cutoff with which we vacuum shared relations, + * it is not possible for that database to have a cutoff newer + * than OLDXMIN recorded in pg_database. */ vacuum_set_xid_limits(vacstmt, false, &initialOldestXmin, @@ -321,8 +319,8 @@ vacuum(VacuumStmt *vacstmt) /* * Decide whether we need to start/commit our own transactions. * - * For VACUUM (with or without ANALYZE): always do so, so that we - * can release locks as soon as possible. (We could possibly use the + * For VACUUM (with or without ANALYZE): always do so, so that we can + * release locks as soon as possible. (We could possibly use the * outer transaction for a one-table VACUUM, but handling TOAST tables * would be problematic.) * @@ -333,9 +331,7 @@ vacuum(VacuumStmt *vacstmt) * locks sooner. */ if (vacstmt->vacuum) - { use_own_xacts = true; - } else { Assert(vacstmt->analyze); @@ -359,10 +355,10 @@ vacuum(VacuumStmt *vacstmt) ALLOCSET_DEFAULT_MAXSIZE); /* - * vacuum_rel expects to be entered with no transaction active; it will - * start and commit its own transaction. But we are called by an SQL - * command, and so we are executing inside a transaction already. We - * commit the transaction started in PostgresMain() here, and start + * vacuum_rel expects to be entered with no transaction active; it + * will start and commit its own transaction. But we are called by an + * SQL command, and so we are executing inside a transaction already. + * We commit the transaction started in PostgresMain() here, and start * another one before exiting to match the commit waiting for us back * in PostgresMain(). */ @@ -390,24 +386,24 @@ vacuum(VacuumStmt *vacstmt) if (vacstmt->vacuum) { if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION)) - all_rels = false; /* forget about updating dbstats */ + all_rels = false; /* forget about updating dbstats */ } if (vacstmt->analyze) { MemoryContext old_context = NULL; /* - * If using separate xacts, start one for analyze. Otherwise, - * we can use the outer transaction, but we still need to call - * analyze_rel in a memory context that will be cleaned up on - * return (else we leak memory while processing multiple - * tables). + * If using separate xacts, start one for analyze. + * Otherwise, we can use the outer transaction, but we + * still need to call analyze_rel in a memory context that + * will be cleaned up on return (else we leak memory while + * processing multiple tables). */ if (use_own_xacts) { StartTransactionCommand(); - SetQuerySnapshot(); /* might be needed for functions - * in indexes */ + SetQuerySnapshot(); /* might be needed for functions + * in indexes */ } else old_context = MemoryContextSwitchTo(anl_context); @@ -873,8 +869,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) * indexes */ /* - * Tell the cache replacement strategy that vacuum is causing - * all following IO + * Tell the cache replacement strategy that vacuum is causing all + * following IO */ StrategyHintVacuum(true); @@ -932,9 +928,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) } /* - * Check that it's a plain table; we used to do this in - * get_rel_oids() but seems safer to check after we've locked the - * relation. + * Check that it's a plain table; we used to do this in get_rel_oids() + * but seems safer to check after we've locked the relation. */ if (onerel->rd_rel->relkind != expected_relkind) { @@ -1201,7 +1196,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, if (PageIsNew(page)) { - VacPage vacpagecopy; + VacPage vacpagecopy; ereport(WARNING, (errmsg("relation \"%s\" page %u is uninitialized --- fixing", @@ -1220,7 +1215,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, if (PageIsEmpty(page)) { - VacPage vacpagecopy; + VacPage vacpagecopy; vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower; free_space += vacpage->free; @@ -1424,7 +1419,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, if (do_reap || do_frag) { - VacPage vacpagecopy = copy_vac_page(vacpage); + VacPage vacpagecopy = copy_vac_page(vacpage); + if (do_reap) vpage_insert(vacuum_pages, vacpagecopy); if (do_frag) @@ -1504,9 +1500,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, RelationGetRelationName(onerel), tups_vacuumed, num_tuples, nblocks), errdetail("%.0f dead row versions cannot be removed yet.\n" - "Nonremovable row versions range from %lu to %lu bytes long.\n" + "Nonremovable row versions range from %lu to %lu bytes long.\n" "There were %.0f unused item pointers.\n" - "Total free space (including removable row versions) is %.0f bytes.\n" + "Total free space (including removable row versions) is %.0f bytes.\n" "%u pages are or will become empty, including %u at the end of the table.\n" "%u pages containing %.0f free bytes are potential move destinations.\n" "%s", @@ -1544,7 +1540,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, BlockNumber last_move_dest_block = 0, last_vacuum_block; Page dst_page = NULL; - ExecContextData ec; + ExecContextData ec; VacPageListData Nvacpagelist; VacPage dst_vacpage = NULL, last_vacuum_page, @@ -1595,13 +1591,13 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, blkno > last_move_dest_block; blkno--) { - Buffer buf; - Page page; - OffsetNumber offnum, - maxoff; - bool isempty, - dowrite, - chain_tuple_moved; + Buffer buf; + Page page; + OffsetNumber offnum, + maxoff; + bool isempty, + dowrite, + chain_tuple_moved; vacuum_delay_point(); @@ -1678,9 +1674,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - Size tuple_len; - HeapTupleData tuple; - ItemId itemid = PageGetItemId(page, offnum); + Size tuple_len; + HeapTupleData tuple; + ItemId itemid = PageGetItemId(page, offnum); if (!ItemIdIsUsed(itemid)) continue; @@ -1693,29 +1689,29 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, /* * VACUUM FULL has an exclusive lock on the relation. So * normally no other transaction can have pending INSERTs or - * DELETEs in this relation. A tuple is either - * (a) a tuple in a system catalog, inserted or deleted by - * a not yet committed transaction or - * (b) dead (XMIN_INVALID or XMAX_COMMITTED) or - * (c) inserted by a committed xact (XMIN_COMMITTED) or - * (d) moved by the currently running VACUUM. - * In case (a) we wouldn't be in repair_frag() at all. + * DELETEs in this relation. A tuple is either (a) a tuple in + * a system catalog, inserted or deleted by a not yet + * committed transaction or (b) dead (XMIN_INVALID or + * XMAX_COMMITTED) or (c) inserted by a committed xact + * (XMIN_COMMITTED) or (d) moved by the currently running + * VACUUM. In case (a) we wouldn't be in repair_frag() at all. * In case (b) we cannot be here, because scan_heap() has - * already marked the item as unused, see continue above. - * Case (c) is what normally is to be expected. - * Case (d) is only possible, if a whole tuple chain has been - * moved while processing this or a higher numbered block. + * already marked the item as unused, see continue above. Case + * (c) is what normally is to be expected. Case (d) is only + * possible, if a whole tuple chain has been moved while + * processing this or a higher numbered block. */ if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)) { /* - * There cannot be another concurrently running VACUUM. If - * the tuple had been moved in by a previous VACUUM, the - * visibility check would have set XMIN_COMMITTED. If the - * tuple had been moved in by the currently running VACUUM, - * the loop would have been terminated. We had + * There cannot be another concurrently running VACUUM. + * If the tuple had been moved in by a previous VACUUM, + * the visibility check would have set XMIN_COMMITTED. If + * the tuple had been moved in by the currently running + * VACUUM, the loop would have been terminated. We had * elog(ERROR, ...) here, but as we are testing for a - * can't-happen condition, Assert() seems more appropriate. + * can't-happen condition, Assert() seems more + * appropriate. */ Assert(!(tuple.t_data->t_infomask & HEAP_MOVED_IN)); @@ -1725,6 +1721,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, * moved while cleaning this page or some previous one. */ Assert(tuple.t_data->t_infomask & HEAP_MOVED_OFF); + /* * MOVED_OFF by another VACUUM would have caused the * visibility check to set XMIN_COMMITTED or XMIN_INVALID. @@ -1734,16 +1731,15 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, /* Can't we Assert(keep_tuples > 0) here? */ if (keep_tuples == 0) continue; - if (chain_tuple_moved) /* some chains was moved - * while */ - { /* cleaning this page */ + if (chain_tuple_moved) /* some chains was moved while */ + { /* cleaning this page */ Assert(vacpage->offsets_free > 0); for (i = 0; i < vacpage->offsets_free; i++) { if (vacpage->offsets[i] == offnum) break; } - if (i >= vacpage->offsets_free) /* not found */ + if (i >= vacpage->offsets_free) /* not found */ { vacpage->offsets[vacpage->offsets_free++] = offnum; keep_tuples--; @@ -2128,18 +2124,19 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, off <= maxoff; off = OffsetNumberNext(off)) { - ItemId itemid = PageGetItemId(page, off); - HeapTupleHeader htup; + ItemId itemid = PageGetItemId(page, off); + HeapTupleHeader htup; if (!ItemIdIsUsed(itemid)) continue; htup = (HeapTupleHeader) PageGetItem(page, itemid); if (htup->t_infomask & HEAP_XMIN_COMMITTED) continue; + /* - ** See comments in the walk-along-page loop above, why we - ** have Asserts here instead of if (...) elog(ERROR). - */ + * * See comments in the walk-along-page loop above, why + * we * have Asserts here instead of if (...) elog(ERROR). + */ Assert(!(htup->t_infomask & HEAP_MOVED_IN)); Assert(htup->t_infomask & HEAP_MOVED_OFF); Assert(HeapTupleHeaderGetXvac(htup) == myXID); @@ -2152,7 +2149,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, if (vacpage->offsets[i] == off) break; } - if (i >= vacpage->offsets_free) /* not found */ + if (i >= vacpage->offsets_free) /* not found */ { vacpage->offsets[vacpage->offsets_free++] = off; Assert(keep_tuples > 0); @@ -2247,7 +2244,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, */ update_hint_bits(onerel, fraged_pages, num_fraged_pages, last_move_dest_block, num_moved); - + /* * It'd be cleaner to make this report at the bottom of this routine, * but then the rusage would double-count the second pass of index @@ -2255,11 +2252,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, * processing that occurs below. */ ereport(elevel, - (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages", - RelationGetRelationName(onerel), - num_moved, nblocks, blkno), - errdetail("%s", - vac_show_rusage(&ru0)))); + (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages", + RelationGetRelationName(onerel), + num_moved, nblocks, blkno), + errdetail("%s", + vac_show_rusage(&ru0)))); /* * Reflect the motion of system tuples to catalog cache here. @@ -2284,6 +2281,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, *vpleft = *vpright; *vpright = vpsave; } + /* * keep_tuples is the number of tuples that have been moved * off a page during chain moves but not been scanned over @@ -2301,13 +2299,13 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, if (vacpage->blkno == (blkno - 1) && vacpage->offsets_free > 0) { - Buffer buf; - Page page; - OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)]; - OffsetNumber offnum, - maxoff; - int uncnt; - int num_tuples = 0; + Buffer buf; + Page page; + OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)]; + OffsetNumber offnum, + maxoff; + int uncnt; + int num_tuples = 0; buf = ReadBuffer(onerel, vacpage->blkno); LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE); @@ -2317,7 +2315,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, offnum <= maxoff; offnum = OffsetNumberNext(offnum)) { - ItemId itemid = PageGetItemId(page, offnum); + ItemId itemid = PageGetItemId(page, offnum); HeapTupleHeader htup; if (!ItemIdIsUsed(itemid)) @@ -2327,9 +2325,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, continue; /* - ** See comments in the walk-along-page loop above, why we - ** have Asserts here instead of if (...) elog(ERROR). - */ + * * See comments in the walk-along-page loop above, why + * we * have Asserts here instead of if (...) elog(ERROR). + */ Assert(!(htup->t_infomask & HEAP_MOVED_IN)); Assert(htup->t_infomask & HEAP_MOVED_OFF); Assert(HeapTupleHeaderGetXvac(htup) == myXID); @@ -2418,10 +2416,10 @@ move_chain_tuple(Relation rel, ExecContext ec, ItemPointer ctid, bool cleanVpd) { TransactionId myXID = GetCurrentTransactionId(); - HeapTupleData newtup; - OffsetNumber newoff; - ItemId newitemid; - Size tuple_len = old_tup->t_len; + HeapTupleData newtup; + OffsetNumber newoff; + ItemId newitemid; + Size tuple_len = old_tup->t_len; heap_copytuple_with_tuple(old_tup, &newtup); @@ -2434,36 +2432,32 @@ move_chain_tuple(Relation rel, START_CRIT_SECTION(); old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | - HEAP_XMIN_INVALID | - HEAP_MOVED_IN); + HEAP_XMIN_INVALID | + HEAP_MOVED_IN); old_tup->t_data->t_infomask |= HEAP_MOVED_OFF; HeapTupleHeaderSetXvac(old_tup->t_data, myXID); /* * If this page was not used before - clean it. * - * NOTE: a nasty bug used to lurk here. It is possible - * for the source and destination pages to be the same - * (since this tuple-chain member can be on a page - * lower than the one we're currently processing in - * the outer loop). If that's true, then after - * vacuum_page() the source tuple will have been - * moved, and tuple.t_data will be pointing at - * garbage. Therefore we must do everything that uses + * NOTE: a nasty bug used to lurk here. It is possible for the source + * and destination pages to be the same (since this tuple-chain member + * can be on a page lower than the one we're currently processing in + * the outer loop). If that's true, then after vacuum_page() the + * source tuple will have been moved, and tuple.t_data will be + * pointing at garbage. Therefore we must do everything that uses * old_tup->t_data BEFORE this step!! * - * This path is different from the other callers of - * vacuum_page, because we have already incremented - * the vacpage's offsets_used field to account for the - * tuple(s) we expect to move onto the page. Therefore - * vacuum_page's check for offsets_used == 0 is wrong. - * But since that's a good debugging check for all - * other callers, we work around it here rather than - * remove it. + * This path is different from the other callers of vacuum_page, because + * we have already incremented the vacpage's offsets_used field to + * account for the tuple(s) we expect to move onto the page. Therefore + * vacuum_page's check for offsets_used == 0 is wrong. But since + * that's a good debugging check for all other callers, we work around + * it here rather than remove it. */ if (!PageIsEmpty(dst_page) && cleanVpd) { - int sv_offsets_used = dst_vacpage->offsets_used; + int sv_offsets_used = dst_vacpage->offsets_used; dst_vacpage->offsets_used = 0; vacuum_page(rel, dst_buf, dst_vacpage); @@ -2471,8 +2465,8 @@ move_chain_tuple(Relation rel, } /* - * Update the state of the copied tuple, and store it - * on the destination page. + * Update the state of the copied tuple, and store it on the + * destination page. */ newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | @@ -2484,7 +2478,7 @@ move_chain_tuple(Relation rel, if (newoff == InvalidOffsetNumber) { elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain", - (unsigned long) tuple_len, dst_vacpage->blkno); + (unsigned long) tuple_len, dst_vacpage->blkno); } newitemid = PageGetItemId(dst_page, newoff); pfree(newtup.t_data); @@ -2509,8 +2503,7 @@ move_chain_tuple(Relation rel, else { /* - * No XLOG record, but still need to flag that XID - * exists on disk + * No XLOG record, but still need to flag that XID exists on disk */ MyXactMadeTempRelUpdate = true; } @@ -2518,9 +2511,8 @@ move_chain_tuple(Relation rel, END_CRIT_SECTION(); /* - * Set new tuple's t_ctid pointing to itself for last - * tuple in chain, and to next tuple in chain - * otherwise. + * Set new tuple's t_ctid pointing to itself for last tuple in chain, + * and to next tuple in chain otherwise. */ /* Is this ok after log_heap_move() and END_CRIT_SECTION()? */ if (!ItemPointerIsValid(ctid)) @@ -2559,10 +2551,10 @@ move_plain_tuple(Relation rel, ExecContext ec) { TransactionId myXID = GetCurrentTransactionId(); - HeapTupleData newtup; - OffsetNumber newoff; - ItemId newitemid; - Size tuple_len = old_tup->t_len; + HeapTupleData newtup; + OffsetNumber newoff; + ItemId newitemid; + Size tuple_len = old_tup->t_len; /* copy tuple */ heap_copytuple_with_tuple(old_tup, &newtup); @@ -2570,9 +2562,9 @@ move_plain_tuple(Relation rel, /* * register invalidation of source tuple in catcaches. * - * (Note: we do not need to register the copied tuple, because we - * are not changing the tuple contents and so there cannot be - * any need to flush negative catcache entries.) + * (Note: we do not need to register the copied tuple, because we are not + * changing the tuple contents and so there cannot be any need to + * flush negative catcache entries.) */ CacheInvalidateHeapTuple(rel, old_tup); @@ -2609,8 +2601,8 @@ move_plain_tuple(Relation rel, * Mark old tuple as MOVED_OFF by me. */ old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | - HEAP_XMIN_INVALID | - HEAP_MOVED_IN); + HEAP_XMIN_INVALID | + HEAP_MOVED_IN); old_tup->t_data->t_infomask |= HEAP_MOVED_OFF; HeapTupleHeaderSetXvac(old_tup->t_data, myXID); @@ -2628,8 +2620,7 @@ move_plain_tuple(Relation rel, else { /* - * No XLOG record, but still need to flag that XID exists - * on disk + * No XLOG record, but still need to flag that XID exists on disk */ MyXactMadeTempRelUpdate = true; } @@ -2637,7 +2628,7 @@ move_plain_tuple(Relation rel, END_CRIT_SECTION(); dst_vacpage->free = ((PageHeader) dst_page)->pd_upper - - ((PageHeader) dst_page)->pd_lower; + ((PageHeader) dst_page)->pd_lower; LockBuffer(dst_buf, BUFFER_LOCK_UNLOCK); LockBuffer(old_buf, BUFFER_LOCK_UNLOCK); @@ -2670,17 +2661,17 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages, { int checked_moved = 0; int i; - VacPage *curpage; + VacPage *curpage; for (i = 0, curpage = fraged_pages->pagedesc; i < num_fraged_pages; i++, curpage++) { - Buffer buf; - Page page; - OffsetNumber max_offset; - OffsetNumber off; - int num_tuples = 0; + Buffer buf; + Page page; + OffsetNumber max_offset; + OffsetNumber off; + int num_tuples = 0; vacuum_delay_point(); @@ -2696,17 +2687,18 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages, off <= max_offset; off = OffsetNumberNext(off)) { - ItemId itemid = PageGetItemId(page, off); - HeapTupleHeader htup; + ItemId itemid = PageGetItemId(page, off); + HeapTupleHeader htup; if (!ItemIdIsUsed(itemid)) continue; htup = (HeapTupleHeader) PageGetItem(page, itemid); if (htup->t_infomask & HEAP_XMIN_COMMITTED) continue; + /* - * See comments in the walk-along-page loop above, why we - * have Asserts here instead of if (...) elog(ERROR). The + * See comments in the walk-along-page loop above, why we have + * Asserts here instead of if (...) elog(ERROR). The * difference here is that we may see MOVED_IN. */ Assert(htup->t_infomask & HEAP_MOVED); @@ -2865,14 +2857,14 @@ scan_index(Relation indrel, double num_tuples) false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%u index pages have been deleted, %u are currently reusable.\n" - "%s", - stats->pages_deleted, stats->pages_free, - vac_show_rusage(&ru0)))); + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%u index pages have been deleted, %u are currently reusable.\n" + "%s", + stats->pages_deleted, stats->pages_free, + vac_show_rusage(&ru0)))); /* * Check for tuple count mismatch. If the index is partial, then it's @@ -2932,16 +2924,16 @@ vacuum_index(VacPageList vacpagelist, Relation indrel, false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%.0f index row versions were removed.\n" + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%.0f index row versions were removed.\n" "%u index pages have been deleted, %u are currently reusable.\n" - "%s", - stats->tuples_removed, - stats->pages_deleted, stats->pages_free, - vac_show_rusage(&ru0)))); + "%s", + stats->tuples_removed, + stats->pages_deleted, stats->pages_free, + vac_show_rusage(&ru0)))); /* * Check for tuple count mismatch. If the index is partial, then it's @@ -3370,7 +3362,7 @@ vacuum_delay_point(void) if (VacuumCostActive && !InterruptPending && VacuumCostBalance >= VacuumCostLimit) { - int msec; + int msec; msec = VacuumCostDelay * VacuumCostBalance / VacuumCostLimit; if (msec > VacuumCostDelay * 4) diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index bfd41beec55..f19001d6796 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -31,7 +31,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.44 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.45 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -594,14 +594,14 @@ lazy_scan_index(Relation indrel, LVRelStats *vacrelstats) false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%u index pages have been deleted, %u are currently reusable.\n" - "%s", - stats->pages_deleted, stats->pages_free, - vac_show_rusage(&ru0)))); + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%u index pages have been deleted, %u are currently reusable.\n" + "%s", + stats->pages_deleted, stats->pages_free, + vac_show_rusage(&ru0)))); pfree(stats); } @@ -654,16 +654,16 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats) false); ereport(elevel, - (errmsg("index \"%s\" now contains %.0f row versions in %u pages", - RelationGetRelationName(indrel), - stats->num_index_tuples, - stats->num_pages), - errdetail("%.0f index row versions were removed.\n" + (errmsg("index \"%s\" now contains %.0f row versions in %u pages", + RelationGetRelationName(indrel), + stats->num_index_tuples, + stats->num_pages), + errdetail("%.0f index row versions were removed.\n" "%u index pages have been deleted, %u are currently reusable.\n" - "%s", - stats->tuples_removed, - stats->pages_deleted, stats->pages_free, - vac_show_rusage(&ru0)))); + "%s", + stats->tuples_removed, + stats->pages_deleted, stats->pages_free, + vac_show_rusage(&ru0)))); pfree(stats); } diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c index 8c962c5206c..cb4a3cde717 100644 --- a/src/backend/commands/variable.c +++ b/src/backend/commands/variable.c @@ -9,7 +9,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.100 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.101 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,7 +62,7 @@ assign_datestyle(const char *value, bool doit, GucSource source) if (source >= PGC_S_INTERACTIVE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid list syntax for parameter \"datestyle\""))); + errmsg("invalid list syntax for parameter \"datestyle\""))); return NULL; } @@ -148,8 +148,8 @@ assign_datestyle(const char *value, bool doit, GucSource source) if (source >= PGC_S_INTERACTIVE) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("unrecognized \"datestyle\" key word: \"%s\"", - tok))); + errmsg("unrecognized \"datestyle\" key word: \"%s\"", + tok))); ok = false; break; } @@ -314,9 +314,10 @@ assign_timezone(const char *value, bool doit, GucSource source) * * During GUC initialization, since the timezone library isn't * set up yet, pg_get_current_timezone will return NULL and we - * will leave the setting as UNKNOWN. If this isn't overridden - * from the config file then pg_timezone_initialize() will - * eventually select a default value from the environment. + * will leave the setting as UNKNOWN. If this isn't + * overridden from the config file then + * pg_timezone_initialize() will eventually select a default + * value from the environment. */ const char *curzone = pg_get_current_timezone(); @@ -329,13 +330,14 @@ assign_timezone(const char *value, bool doit, GucSource source) * Otherwise assume it is a timezone name. * * We have to actually apply the change before we can have any - * hope of checking it. So, save the old value in case we have - * to back out. We have to copy since pg_get_current_timezone - * returns a pointer to its static state. + * hope of checking it. So, save the old value in case we + * have to back out. We have to copy since + * pg_get_current_timezone returns a pointer to its static + * state. * - * This would all get a lot simpler if the TZ library had a better - * API that would let us look up and test a timezone name without - * making it the default. + * This would all get a lot simpler if the TZ library had a + * better API that would let us look up and test a timezone + * name without making it the default. */ const char *cur_tz; char *save_tz; @@ -368,22 +370,23 @@ assign_timezone(const char *value, bool doit, GucSource source) else { /* - * TZ library wasn't initialized yet. Annoyingly, we will - * come here during startup because guc-file.l checks - * the value with doit = false before actually applying. - * The best approach seems to be as follows: + * TZ library wasn't initialized yet. Annoyingly, we + * will come here during startup because guc-file.l + * checks the value with doit = false before actually + * applying. The best approach seems to be as follows: * * 1. known && acceptable: leave the setting in place, * since we'll apply it soon anyway. This is mainly - * so that any log messages printed during this interval - * are timestamped with the user's requested timezone. + * so that any log messages printed during this + * interval are timestamped with the user's requested + * timezone. * - * 2. known && !acceptable: revert to GMT for lack of - * any better idea. (select_default_timezone() may get + * 2. known && !acceptable: revert to GMT for lack of any + * better idea. (select_default_timezone() may get * called later to undo this.) * - * 3. !known: no need to do anything since TZ library - * did not change its state. + * 3. !known: no need to do anything since TZ library did + * not change its state. * * Again, this should all go away sometime soon. */ @@ -441,7 +444,7 @@ assign_timezone(const char *value, bool doit, GucSource source) const char * show_timezone(void) { - const char *tzn; + const char *tzn; if (HasCTZSet) { @@ -472,14 +475,14 @@ assign_XactIsoLevel(const char *value, bool doit, GucSource source) { if (doit && source >= PGC_S_INTERACTIVE) { - if (SerializableSnapshot != NULL) - ereport(ERROR, - (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query"))); - if (IsSubTransaction()) - ereport(ERROR, - (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction"))); + if (SerializableSnapshot != NULL) + ereport(ERROR, + (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query"))); + if (IsSubTransaction()) + ereport(ERROR, + (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), + errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction"))); } if (strcmp(value, "serializable") == 0) @@ -596,7 +599,7 @@ assign_client_encoding(const char *value, bool doit, GucSource source) * limit on names, so we can tell whether we're being passed an initial * username or a saved/restored value. */ -extern char *session_authorization_string; /* in guc.c */ +extern char *session_authorization_string; /* in guc.c */ const char * assign_session_authorization(const char *value, bool doit, GucSource source) diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c index aafc42b1d40..abc37fcc8f0 100644 --- a/src/backend/commands/view.c +++ b/src/backend/commands/view.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.84 2004/08/29 04:12:30 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.85 2004/08/29 05:06:41 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -191,8 +191,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc) newattr->atttypmod != oldattr->atttypmod) ereport(ERROR, (errcode(ERRCODE_INVALID_TABLE_DEFINITION), - errmsg("cannot change data type of view column \"%s\"", - NameStr(oldattr->attname)))); + errmsg("cannot change data type of view column \"%s\"", + NameStr(oldattr->attname)))); /* We can ignore the remaining attributes of an attribute... */ } diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index ad1b5817c4e..6cbad491eda 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -6,7 +6,7 @@ * Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * - * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.80 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.81 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -51,7 +51,7 @@ * needs access to variables of the current outer tuple. (The handling of * this parameter is currently pretty inconsistent: some callers pass NULL * and some pass down their parent's value; so don't rely on it in other - * situations. It'd probably be better to remove the whole thing and use + * situations. It'd probably be better to remove the whole thing and use * the generalized parameter mechanism instead.) */ void @@ -64,7 +64,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt) /* If we have changed parameters, propagate that info */ if (node->chgParam != NULL) { - ListCell *l; + ListCell *l; foreach(l, node->initPlan) { @@ -365,19 +365,19 @@ ExecMayReturnRawTuples(PlanState *node) { /* * At a table scan node, we check whether ExecAssignScanProjectionInfo - * decided to do projection or not. Most non-scan nodes always project - * and so we can return "false" immediately. For nodes that don't - * project but just pass up input tuples, we have to recursively + * decided to do projection or not. Most non-scan nodes always + * project and so we can return "false" immediately. For nodes that + * don't project but just pass up input tuples, we have to recursively * examine the input plan node. * - * Note: Hash and Material are listed here because they sometimes - * return an original input tuple, not a copy. But Sort and SetOp - * never return an original tuple, so they can be treated like - * projecting nodes. + * Note: Hash and Material are listed here because they sometimes return + * an original input tuple, not a copy. But Sort and SetOp never + * return an original tuple, so they can be treated like projecting + * nodes. */ switch (nodeTag(node)) { - /* Table scan nodes */ + /* Table scan nodes */ case T_SeqScanState: case T_IndexScanState: case T_TidScanState: @@ -387,7 +387,7 @@ ExecMayReturnRawTuples(PlanState *node) return true; break; - /* Non-projecting nodes */ + /* Non-projecting nodes */ case T_HashState: case T_MaterialState: case T_UniqueState: @@ -395,19 +395,19 @@ ExecMayReturnRawTuples(PlanState *node) return ExecMayReturnRawTuples(node->lefttree); case T_AppendState: - { - AppendState *appendstate = (AppendState *) node; - int j; - - for (j = 0; j < appendstate->as_nplans; j++) { - if (ExecMayReturnRawTuples(appendstate->appendplans[j])) - return true; + AppendState *appendstate = (AppendState *) node; + int j; + + for (j = 0; j < appendstate->as_nplans; j++) + { + if (ExecMayReturnRawTuples(appendstate->appendplans[j])) + return true; + } + break; } - break; - } - /* All projecting node types come here */ + /* All projecting node types come here */ default: break; } diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c index 44157fc6869..e31dc7dfcbe 100644 --- a/src/backend/executor/execGrouping.c +++ b/src/backend/executor/execGrouping.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.10 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.11 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -26,8 +26,8 @@ static TupleHashTable CurTupleHashTable = NULL; static uint32 TupleHashTableHash(const void *key, Size keysize); -static int TupleHashTableMatch(const void *key1, const void *key2, - Size keysize); +static int TupleHashTableMatch(const void *key1, const void *key2, + Size keysize); /***************************************************************************** @@ -303,7 +303,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, Assert(entrysize >= sizeof(TupleHashEntryData)); hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt, - sizeof(TupleHashTableData)); + sizeof(TupleHashTableData)); hashtable->numCols = numCols; hashtable->keyColIdx = keyColIdx; @@ -321,7 +321,7 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx, hash_ctl.hcxt = tablecxt; hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets, &hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); + HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); if (hashtable->hashtab == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), @@ -359,8 +359,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, /* * Set up data needed by hash and match functions * - * We save and restore CurTupleHashTable just in case someone manages - * to invoke this code re-entrantly. + * We save and restore CurTupleHashTable just in case someone manages to + * invoke this code re-entrantly. */ hashtable->tupdesc = tupdesc; saveCurHT = CurTupleHashTable; @@ -389,8 +389,8 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, /* * Zero any caller-requested space in the entry. (This zaps - * the "key data" dynahash.c copied into the new entry, but - * we don't care since we're about to overwrite it anyway.) + * the "key data" dynahash.c copied into the new entry, but we + * don't care since we're about to overwrite it anyway.) */ MemSet(entry, 0, hashtable->entrysize); @@ -414,13 +414,13 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot, * * The passed-in key is a pointer to a HeapTuple pointer -- this is either * the firstTuple field of a TupleHashEntry struct, or the key value passed - * to hash_search. We ignore the keysize. + * to hash_search. We ignore the keysize. * * CurTupleHashTable must be set before calling this, since dynahash.c * doesn't provide any API that would let us get at the hashtable otherwise. * * Also, the caller must select an appropriate memory context for running - * the hash functions. (dynahash.c doesn't change CurrentMemoryContext.) + * the hash functions. (dynahash.c doesn't change CurrentMemoryContext.) */ static uint32 TupleHashTableHash(const void *key, Size keysize) diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c index 43f58e036ea..c797c343d35 100644 --- a/src/backend/executor/execJunk.c +++ b/src/backend/executor/execJunk.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.42 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.43 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -274,9 +274,9 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot) * dealing with a small number of attributes. for large tuples we just * use palloc. * - * Note: we could use just one set of arrays if we were willing to - * assume that the resno mapping is monotonic... I think it is, but - * won't take the risk of breaking things right now. + * Note: we could use just one set of arrays if we were willing to assume + * that the resno mapping is monotonic... I think it is, but won't + * take the risk of breaking things right now. */ if (cleanLength > 64) { @@ -309,7 +309,7 @@ ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot) */ for (i = 0; i < cleanLength; i++) { - int j = cleanMap[i] - 1; + int j = cleanMap[i] - 1; values[i] = old_values[j]; nulls[i] = old_nulls[j]; diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 6e386d25292..d77bc7054a3 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -26,7 +26,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.235 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.236 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -521,8 +521,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) * Multiple result relations (due to inheritance) * parseTree->resultRelations identifies them all */ - ResultRelInfo *resultRelInfo; - ListCell *l; + ResultRelInfo *resultRelInfo; + ListCell *l; numResultRelations = list_length(resultRelations); resultRelInfos = (ResultRelInfo *) @@ -644,10 +644,10 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly) /* * Initialize the junk filter if needed. SELECT and INSERT queries * need a filter if there are any junk attrs in the tlist. INSERT and - * SELECT INTO also need a filter if the plan may return raw disk tuples - * (else heap_insert will be scribbling on the source relation!). - * UPDATE and DELETE always need a filter, since there's always a junk - * 'ctid' attribute present --- no need to look first. + * SELECT INTO also need a filter if the plan may return raw disk + * tuples (else heap_insert will be scribbling on the source + * relation!). UPDATE and DELETE always need a filter, since there's + * always a junk 'ctid' attribute present --- no need to look first. */ { bool junk_filter_needed = false; @@ -1460,7 +1460,7 @@ ldelete:; &ctid, estate->es_snapshot->curcid, estate->es_crosscheck_snapshot, - true /* wait for commit */); + true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: @@ -1596,7 +1596,7 @@ lreplace:; &ctid, estate->es_snapshot->curcid, estate->es_crosscheck_snapshot, - true /* wait for commit */); + true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index 65721e0863d..2ad0423810b 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.167 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.168 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -59,51 +59,51 @@ static Datum ExecEvalArrayRef(ArrayRefExprState *astate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalAggref(AggrefExprState *aggref, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo, List *argList, ExprContext *econtext); static Datum ExecMakeFunctionResultNoSets(FuncExprState *fcache, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalOper(FuncExprState *fcache, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalDistinct(FuncExprState *fcache, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalNot(BoolExprState *notclause, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCaseTestExpr(ExprState *exprstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalArray(ArrayExprState *astate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalRow(RowExprState *rstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalNullIf(FuncExprState *nullIfExpr, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalNullTest(GenericExprState *nstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); @@ -114,14 +114,14 @@ static Datum ExecEvalCoerceToDomain(CoerceToDomainState *cstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalCoerceToDomainValue(ExprState *exprstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalFieldSelect(FieldSelectState *fstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalFieldStore(FieldStoreState *fstate, - ExprContext *econtext, - bool *isNull, ExprDoneCond *isDone); + ExprContext *econtext, + bool *isNull, ExprDoneCond *isDone); static Datum ExecEvalRelabelType(GenericExprState *exprstate, ExprContext *econtext, bool *isNull, ExprDoneCond *isDone); @@ -145,7 +145,7 @@ static Datum ExecEvalRelabelType(GenericExprState *exprstate, * * Note: for notational simplicity we declare these functions as taking the * specific type of ExprState that they work on. This requires casting when - * assigning the function pointer in ExecInitExpr. Be careful that the + * assigning the function pointer in ExecInitExpr. Be careful that the * function signature is declared correctly, because the cast suppresses * automatic checking! * @@ -236,13 +236,13 @@ ExecEvalArrayRef(ArrayRefExprState *astate, isDone)); /* - * If refexpr yields NULL, and it's a fetch, then result is NULL. - * In the assignment case, we'll cons up something below. + * If refexpr yields NULL, and it's a fetch, then result is NULL. In + * the assignment case, we'll cons up something below. */ if (*isNull) { if (isDone && *isDone == ExprEndResult) - return (Datum) NULL; /* end of set result */ + return (Datum) NULL; /* end of set result */ if (!isAssignment) return (Datum) NULL; } @@ -321,10 +321,11 @@ ExecEvalArrayRef(ArrayRefExprState *astate, * * XXX At some point we'll need to look into making the old value of * the array element available via CaseTestExpr, as is done by - * ExecEvalFieldStore. This is not needed now but will be needed - * to support arrays of composite types; in an assignment to a field - * of an array member, the parser would generate a FieldStore that - * expects to fetch its input tuple via CaseTestExpr. + * ExecEvalFieldStore. This is not needed now but will be needed + * to support arrays of composite types; in an assignment to a + * field of an array member, the parser would generate a + * FieldStore that expects to fetch its input tuple via + * CaseTestExpr. */ sourceData = ExecEvalExpr(astate->refassgnexpr, econtext, @@ -339,15 +340,16 @@ ExecEvalArrayRef(ArrayRefExprState *astate, return PointerGetDatum(array_source); /* - * For an assignment, if all the subscripts and the input expression - * are non-null but the original array is null, then substitute an - * empty (zero-dimensional) array and proceed with the assignment. - * This only works for varlena arrays, though; for fixed-length - * array types we punt and return the null input array. + * For an assignment, if all the subscripts and the input + * expression are non-null but the original array is null, then + * substitute an empty (zero-dimensional) array and proceed with + * the assignment. This only works for varlena arrays, though; for + * fixed-length array types we punt and return the null input + * array. */ if (*isNull) { - if (astate->refattrlength > 0) /* fixed-length array? */ + if (astate->refattrlength > 0) /* fixed-length array? */ return PointerGetDatum(array_source); array_source = construct_md_array(NULL, 0, NULL, NULL, @@ -444,10 +446,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, /* * Get the slot and attribute number we want * - * The asserts check that references to system attributes only appear - * at the level of a relation scan; at higher levels, system attributes - * must be treated as ordinary variables (since we no longer have access - * to the original tuple). + * The asserts check that references to system attributes only appear at + * the level of a relation scan; at higher levels, system attributes + * must be treated as ordinary variables (since we no longer have + * access to the original tuple). */ attnum = variable->varattno; @@ -476,8 +478,8 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, tuple_type = slot->ttc_tupleDescriptor; /* - * Some checks that are only applied for user attribute numbers - * (bogus system attnums will be caught inside heap_getattr). + * Some checks that are only applied for user attribute numbers (bogus + * system attnums will be caught inside heap_getattr). */ if (attnum > 0) { @@ -488,9 +490,10 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, tuple_type->attrs[attnum - 1] != NULL); /* - * If the attribute's column has been dropped, we force a NULL result. - * This case should not happen in normal use, but it could happen if - * we are executing a plan cached before the column was dropped. + * If the attribute's column has been dropped, we force a NULL + * result. This case should not happen in normal use, but it could + * happen if we are executing a plan cached before the column was + * dropped. */ if (tuple_type->attrs[attnum - 1]->attisdropped) { @@ -499,13 +502,14 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext, } /* - * This assert checks that the datatype the plan expects to get (as - * told by our "variable" argument) is in fact the datatype of the - * attribute being fetched (as seen in the current context, identified - * by our "econtext" argument). Otherwise crashes are likely. + * This assert checks that the datatype the plan expects to get + * (as told by our "variable" argument) is in fact the datatype of + * the attribute being fetched (as seen in the current context, + * identified by our "econtext" argument). Otherwise crashes are + * likely. * - * Note that we can't check dropped columns, since their atttypid - * has been zeroed. + * Note that we can't check dropped columns, since their atttypid has + * been zeroed. */ Assert(variable->vartype == tuple_type->attrs[attnum - 1]->atttypid); } @@ -590,7 +594,8 @@ ExecEvalParam(ExprState *exprstate, ExprContext *econtext, else { /* - * All other parameter types must be sought in ecxt_param_list_info. + * All other parameter types must be sought in + * ecxt_param_list_info. */ ParamListInfo paramInfo; @@ -964,7 +969,7 @@ ExecMakeFunctionResult(FuncExprState *fcache, { RegisterExprContextCallback(econtext, ShutdownFuncExpr, - PointerGetDatum(fcache)); + PointerGetDatum(fcache)); fcache->shutdown_reg = true; } } @@ -1006,8 +1011,8 @@ ExecMakeFunctionResult(FuncExprState *fcache, * * We change the ExprState function pointer to use the simpler * ExecMakeFunctionResultNoSets on subsequent calls. This amounts - * to assuming that no argument can return a set if it didn't do so - * the first time. + * to assuming that no argument can return a set if it didn't do + * so the first time. */ fcache->xprstate.evalfunc = (ExprStateEvalFunc) ExecMakeFunctionResultNoSets; @@ -1098,7 +1103,7 @@ ExecMakeFunctionResultNoSets(FuncExprState *fcache, } } } - /* fcinfo.isnull = false; */ /* handled by MemSet */ + /* fcinfo.isnull = false; */ /* handled by MemSet */ result = FunctionCallInvoke(&fcinfo); *isNull = fcinfo.isnull; @@ -1273,9 +1278,9 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, break; /* - * Can't do anything useful with NULL rowtype values. Currently - * we raise an error, but another alternative is to just ignore - * the result and "continue" to get another row. + * Can't do anything useful with NULL rowtype values. + * Currently we raise an error, but another alternative is to + * just ignore the result and "continue" to get another row. */ if (returnsTuple && fcinfo.isnull) ereport(ERROR, @@ -1293,13 +1298,14 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, { /* * Use the type info embedded in the rowtype Datum to - * look up the needed tupdesc. Make a copy for the query. + * look up the needed tupdesc. Make a copy for the + * query. */ - HeapTupleHeader td; + HeapTupleHeader td; td = DatumGetHeapTupleHeader(result); tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(td), - HeapTupleHeaderGetTypMod(td)); + HeapTupleHeaderGetTypMod(td)); tupdesc = CreateTupleDescCopy(tupdesc); } else @@ -1326,7 +1332,7 @@ ExecMakeTableFunctionResult(ExprState *funcexpr, */ if (returnsTuple) { - HeapTupleHeader td; + HeapTupleHeader td; td = DatumGetHeapTupleHeader(result); @@ -1826,10 +1832,10 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, *isDone = ExprSingleResult; /* - * If there's a test expression, we have to evaluate it and save - * the value where the CaseTestExpr placeholders can find it. - * We must save and restore prior setting of econtext's caseValue fields, - * in case this node is itself within a larger CASE. + * If there's a test expression, we have to evaluate it and save the + * value where the CaseTestExpr placeholders can find it. We must save + * and restore prior setting of econtext's caseValue fields, in case + * this node is itself within a larger CASE. */ save_datum = econtext->caseValue_datum; save_isNull = econtext->caseValue_isNull; @@ -1838,7 +1844,7 @@ ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext, { econtext->caseValue_datum = ExecEvalExpr(caseExpr->arg, econtext, - &econtext->caseValue_isNull, + &econtext->caseValue_isNull, NULL); } @@ -2009,7 +2015,7 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("cannot merge incompatible arrays"), errdetail("Array with element type %s cannot be " - "included in ARRAY construct with element type %s.", + "included in ARRAY construct with element type %s.", format_type_be(ARR_ELEMTYPE(array)), format_type_be(element_type)))); @@ -2021,8 +2027,8 @@ ExecEvalArray(ArrayExprState *astate, ExprContext *econtext, if (ndims <= 0 || ndims > MAXDIM) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("number of array dimensions (%d) exceeds " \ - "the maximum allowed (%d)", ndims, MAXDIM))); + errmsg("number of array dimensions (%d) exceeds " \ + "the maximum allowed (%d)", ndims, MAXDIM))); elem_dims = (int *) palloc(elem_ndims * sizeof(int)); memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int)); @@ -2600,18 +2606,18 @@ ExecEvalFieldStore(FieldStoreState *fstate, forboth(l1, fstate->newvals, l2, fstore->fieldnums) { - ExprState *newval = (ExprState *) lfirst(l1); - AttrNumber fieldnum = lfirst_int(l2); + ExprState *newval = (ExprState *) lfirst(l1); + AttrNumber fieldnum = lfirst_int(l2); bool eisnull; Assert(fieldnum > 0 && fieldnum <= tupDesc->natts); /* - * Use the CaseTestExpr mechanism to pass down the old value of the - * field being replaced; this is useful in case we have a nested field - * update situation. It's safe to reuse the CASE mechanism because - * there cannot be a CASE between here and where the value would be - * needed. + * Use the CaseTestExpr mechanism to pass down the old value of + * the field being replaced; this is useful in case we have a + * nested field update situation. It's safe to reuse the CASE + * mechanism because there cannot be a CASE between here and where + * the value would be needed. */ econtext->caseValue_datum = values[fieldnum - 1]; econtext->caseValue_isNull = (nulls[fieldnum - 1] == 'n'); @@ -2981,7 +2987,7 @@ ExecInitExpr(Expr *node, PlanState *parent) break; case T_RowExpr: { - RowExpr *rowexpr = (RowExpr *) node; + RowExpr *rowexpr = (RowExpr *) node; RowExprState *rstate = makeNode(RowExprState); Form_pg_attribute *attrs; List *outlist = NIL; @@ -3016,15 +3022,15 @@ ExecInitExpr(Expr *node, PlanState *parent) /* * Guard against ALTER COLUMN TYPE on rowtype * since the RowExpr was created. XXX should we - * check typmod too? Not sure we can be sure it'll - * be the same. + * check typmod too? Not sure we can be sure + * it'll be the same. */ if (exprType((Node *) e) != attrs[i]->atttypid) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("ROW() column has type %s instead of type %s", - format_type_be(exprType((Node *) e)), - format_type_be(attrs[i]->atttypid)))); + format_type_be(exprType((Node *) e)), + format_type_be(attrs[i]->atttypid)))); } else { @@ -3111,7 +3117,7 @@ ExecInitExpr(Expr *node, PlanState *parent) TargetEntry *tle = (TargetEntry *) node; GenericExprState *gstate = makeNode(GenericExprState); - gstate->xprstate.evalfunc = NULL; /* not used */ + gstate->xprstate.evalfunc = NULL; /* not used */ gstate->arg = ExecInitExpr(tle->expr, parent); state = (ExprState *) gstate; } @@ -3546,8 +3552,8 @@ ExecProject(ProjectionInfo *projInfo, ExprDoneCond *isDone) /* * store the tuple in the projection slot and return the slot. */ - return ExecStoreTuple(newTuple, /* tuple to store */ - slot, /* slot to store in */ - InvalidBuffer, /* tuple has no buffer */ + return ExecStoreTuple(newTuple, /* tuple to store */ + slot, /* slot to store in */ + InvalidBuffer, /* tuple has no buffer */ true); } diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c index fd123bbd551..6adefdc2666 100644 --- a/src/backend/executor/execScan.c +++ b/src/backend/executor/execScan.c @@ -12,7 +12,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.32 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.33 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -224,8 +224,8 @@ tlist_matches_tupdesc(PlanState *ps, List *tlist, Index varno, TupleDesc tupdesc return false; /* tlist too long */ /* - * If the plan context requires a particular hasoid setting, then - * that has to match, too. + * If the plan context requires a particular hasoid setting, then that + * has to match, too. */ if (ExecContextForcesOids(ps, &hasoid) && hasoid != tupdesc->tdhasoid) diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c index 98f4d503e8c..92d6cd43743 100644 --- a/src/backend/executor/execTuples.c +++ b/src/backend/executor/execTuples.c @@ -15,7 +15,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.81 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.82 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -117,7 +117,7 @@ static TupleDesc ExecTypeFromTLInternal(List *targetList, - bool hasoid, bool skipjunk); + bool hasoid, bool skipjunk); /* ---------------------------------------------------------------- @@ -149,7 +149,7 @@ ExecCreateTupleTable(int initialSize) /* initial number of slots in /* * Now allocate our new table along with space for the pointers to the - * tuples. Zero out the slots. + * tuples. Zero out the slots. */ newtable = (TupleTable) palloc(sizeof(TupleTableData)); @@ -568,10 +568,10 @@ ExecCleanTypeFromTL(List *targetList, bool hasoid) static TupleDesc ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk) { - TupleDesc typeInfo; - ListCell *l; - int len; - int cur_resno = 1; + TupleDesc typeInfo; + ListCell *l; + int len; + int cur_resno = 1; if (skipjunk) len = ExecCleanTargetListLength(targetList); @@ -581,8 +581,8 @@ ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk) foreach(l, targetList) { - TargetEntry *tle = lfirst(l); - Resdom *resdom = tle->resdom; + TargetEntry *tle = lfirst(l); + Resdom *resdom = tle->resdom; if (skipjunk && resdom->resjunk) continue; @@ -605,16 +605,16 @@ ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk) TupleDesc ExecTypeFromExprList(List *exprList) { - TupleDesc typeInfo; - ListCell *l; - int cur_resno = 1; + TupleDesc typeInfo; + ListCell *l; + int cur_resno = 1; char fldname[NAMEDATALEN]; typeInfo = CreateTemplateTupleDesc(list_length(exprList), false); foreach(l, exprList) { - Node *e = lfirst(l); + Node *e = lfirst(l); sprintf(fldname, "f%d", cur_resno); diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 1e5694a9269..79ab787b07a 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.113 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.114 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -179,7 +179,7 @@ CreateExecutorState(void) */ estate->es_direction = ForwardScanDirection; estate->es_snapshot = SnapshotNow; - estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */ + estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */ estate->es_range_table = NIL; estate->es_result_relations = NULL; @@ -248,7 +248,8 @@ FreeExecutorState(EState *estate) */ while (estate->es_exprcontexts) { - /* XXX: seems there ought to be a faster way to implement this + /* + * XXX: seems there ought to be a faster way to implement this * than repeated list_delete(), no? */ FreeExprContext((ExprContext *) linitial(estate->es_exprcontexts)); @@ -364,7 +365,7 @@ FreeExprContext(ExprContext *econtext) * ReScanExprContext * * Reset an expression context in preparation for a rescan of its - * plan node. This requires calling any registered shutdown callbacks, + * plan node. This requires calling any registered shutdown callbacks, * since any partially complete set-returning-functions must be canceled. * * Note we make no assumption about the caller's memory context. diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index f06fabb5fc8..ea3b12be5f6 100644 --- a/src/backend/executor/functions.c +++ b/src/backend/executor/functions.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.85 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.86 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -58,7 +58,7 @@ typedef struct local_es */ typedef struct { - Oid *argtypes; /* resolved types of arguments */ + Oid *argtypes; /* resolved types of arguments */ Oid rettype; /* actual return type */ int typlen; /* length of the return type */ bool typbyval; /* true if return type is pass by value */ @@ -94,7 +94,7 @@ init_execution_state(List *queryTree_list) { execution_state *firstes = NULL; execution_state *preves = NULL; - ListCell *qtl_item; + ListCell *qtl_item; foreach(qtl_item, queryTree_list) { @@ -180,8 +180,8 @@ init_sql_fcache(FmgrInfo *finfo) typeStruct = (Form_pg_type) GETSTRUCT(typeTuple); /* - * get the type length and by-value flag from the type tuple; also - * do a preliminary check for returnsTuple (this may prove inaccurate, + * get the type length and by-value flag from the type tuple; also do + * a preliminary check for returnsTuple (this may prove inaccurate, * see below). */ fcache->typlen = typeStruct->typlen; @@ -190,8 +190,8 @@ init_sql_fcache(FmgrInfo *finfo) rettype == RECORDOID); /* - * Parse and rewrite the queries. We need the argument type info to pass - * to the parser. + * Parse and rewrite the queries. We need the argument type info to + * pass to the parser. */ nargs = procedureStruct->pronargs; haspolyarg = false; @@ -240,11 +240,11 @@ init_sql_fcache(FmgrInfo *finfo) * If the function has any arguments declared as polymorphic types, * then it wasn't type-checked at definition time; must do so now. * - * Also, force a type-check if the declared return type is a rowtype; - * we need to find out whether we are actually returning the whole - * tuple result, or just regurgitating a rowtype expression result. - * In the latter case we clear returnsTuple because we need not act - * different from the scalar result case. + * Also, force a type-check if the declared return type is a rowtype; we + * need to find out whether we are actually returning the whole tuple + * result, or just regurgitating a rowtype expression result. In the + * latter case we clear returnsTuple because we need not act different + * from the scalar result case. */ if (haspolyarg || fcache->returnsTuple) fcache->returnsTuple = check_sql_fn_retval(rettype, @@ -395,9 +395,9 @@ postquel_execute(execution_state *es, * XXX do we need to remove junk attrs from the result tuple? * Probably OK to leave them, as long as they are at the end. */ - HeapTupleHeader dtup; - Oid dtuptype; - int32 dtuptypmod; + HeapTupleHeader dtup; + Oid dtuptype; + int32 dtuptypmod; dtup = (HeapTupleHeader) palloc(tup->t_len); memcpy((char *) dtup, (char *) tup->t_data, tup->t_len); @@ -433,8 +433,8 @@ postquel_execute(execution_state *es, else { /* - * Returning a scalar, which we have to extract from the - * first column of the SELECT result, and then copy into current + * Returning a scalar, which we have to extract from the first + * column of the SELECT result, and then copy into current * execution context if needed. */ value = heap_getattr(tup, 1, tupDesc, &(fcinfo->isnull)); @@ -635,7 +635,8 @@ sql_exec_error_callback(void *arg) fn_name = NameStr(functup->proname); /* - * If there is a syntax error position, convert to internal syntax error + * If there is a syntax error position, convert to internal syntax + * error */ syntaxerrposition = geterrposition(); if (syntaxerrposition > 0) diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 99173a17a07..b31cd8b0e97 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -45,7 +45,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.124 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.125 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -252,11 +252,11 @@ initialize_aggregates(AggState *aggstate, } /* - * If we are reinitializing after a group boundary, we have to free - * any prior transValue to avoid memory leakage. We must check not - * only the isnull flag but whether the pointer is NULL; since - * pergroupstate is initialized with palloc0, the initial condition - * has isnull = 0 and null pointer. + * If we are reinitializing after a group boundary, we have to + * free any prior transValue to avoid memory leakage. We must + * check not only the isnull flag but whether the pointer is NULL; + * since pergroupstate is initialized with palloc0, the initial + * condition has isnull = 0 and null pointer. */ if (!peraggstate->transtypeByVal && !pergroupstate->transValueIsNull && @@ -811,14 +811,14 @@ agg_retrieve_direct(AggState *aggstate) /* * If we have no first tuple (ie, the outerPlan didn't return * anything), create a dummy all-nulls input tuple for use by - * ExecQual/ExecProject. 99.44% of the time this is a waste of cycles, - * because ordinarily the projected output tuple's targetlist - * cannot contain any direct (non-aggregated) references to input - * columns, so the dummy tuple will not be referenced. However - * there are special cases where this isn't so --- in particular - * an UPDATE involving an aggregate will have a targetlist - * reference to ctid. We need to return a null for ctid in that - * situation, not coredump. + * ExecQual/ExecProject. 99.44% of the time this is a waste of + * cycles, because ordinarily the projected output tuple's + * targetlist cannot contain any direct (non-aggregated) + * references to input columns, so the dummy tuple will not be + * referenced. However there are special cases where this isn't so + * --- in particular an UPDATE involving an aggregate will have a + * targetlist reference to ctid. We need to return a null for + * ctid in that situation, not coredump. * * The values returned for the aggregates will be the initial values * of the transition functions. @@ -865,9 +865,9 @@ agg_retrieve_direct(AggState *aggstate) if (ExecQual(aggstate->ss.ps.qual, econtext, false)) { /* - * Form and return a projection tuple using the aggregate results - * and the representative input tuple. Note we do not support - * aggregates returning sets ... + * Form and return a projection tuple using the aggregate + * results and the representative input tuple. Note we do not + * support aggregates returning sets ... */ return ExecProject(projInfo, NULL); } @@ -1009,9 +1009,9 @@ agg_retrieve_hash_table(AggState *aggstate) if (ExecQual(aggstate->ss.ps.qual, econtext, false)) { /* - * Form and return a projection tuple using the aggregate results - * and the representative input tuple. Note we do not support - * aggregates returning sets ... + * Form and return a projection tuple using the aggregate + * results and the representative input tuple. Note we do not + * support aggregates returning sets ... */ return ExecProject(projInfo, NULL); } @@ -1478,7 +1478,10 @@ ExecReScanAgg(AggState *node, ExprContext *exprCtxt) } else { - /* Reset the per-group state (in particular, mark transvalues null) */ + /* + * Reset the per-group state (in particular, mark transvalues + * null) + */ MemSet(node->pergroup, 0, sizeof(AggStatePerGroupData) * node->numaggs); } diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index f941ec32890..fbc55655714 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.63 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.64 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -559,7 +559,7 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate, if (nread != sizeof(HeapTupleData)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from hash-join temporary file: %m"))); + errmsg("could not read from hash-join temporary file: %m"))); heapTuple = palloc(HEAPTUPLESIZE + htup.t_len); memcpy((char *) heapTuple, (char *) &htup, sizeof(HeapTupleData)); heapTuple->t_datamcxt = CurrentMemoryContext; @@ -569,7 +569,7 @@ ExecHashJoinGetSavedTuple(HashJoinState *hjstate, if (nread != (size_t) htup.t_len) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from hash-join temporary file: %m"))); + errmsg("could not read from hash-join temporary file: %m"))); return ExecStoreTuple(heapTuple, tupleSlot, InvalidBuffer, true); } @@ -627,14 +627,14 @@ ExecHashJoinNewBatch(HashJoinState *hjstate) if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0, 0L, SEEK_SET)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rewind hash-join temporary file: %m"))); + errmsg("could not rewind hash-join temporary file: %m"))); innerFile = hashtable->innerBatchFile[newbatch - 1]; if (BufFileSeek(innerFile, 0, 0L, SEEK_SET)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not rewind hash-join temporary file: %m"))); + errmsg("could not rewind hash-join temporary file: %m"))); /* * Reload the hash table with the new inner batch @@ -685,12 +685,12 @@ ExecHashJoinSaveTuple(HeapTuple heapTuple, if (written != sizeof(HeapTupleData)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to hash-join temporary file: %m"))); + errmsg("could not write to hash-join temporary file: %m"))); written = BufFileWrite(file, (void *) heapTuple->t_data, heapTuple->t_len); if (written != (size_t) heapTuple->t_len) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to hash-join temporary file: %m"))); + errmsg("could not write to hash-join temporary file: %m"))); } void diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c index eb7b5720359..2ff0121baff 100644 --- a/src/backend/executor/nodeIndexscan.c +++ b/src/backend/executor/nodeIndexscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.96 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.97 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -38,7 +38,7 @@ * In a multiple-index plan, we must take care to return any given tuple * only once, even if it matches conditions of several index scans. Our * preferred way to do this is to record already-returned tuples in a hash - * table (using the TID as unique identifier). However, in a very large + * table (using the TID as unique identifier). However, in a very large * scan this could conceivably run out of memory. We limit the hash table * to no more than work_mem KB; if it grows past that, we fall back to the * pre-7.4 technique: evaluate the prior-scan index quals again for each @@ -129,11 +129,11 @@ IndexNext(IndexScanState *node) scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid; /* - * Clear any reference to the previously returned tuple. The idea here - * is to not have the tuple slot be the last holder of a pin on that - * tuple's buffer; if it is, we'll need a separate visit to the bufmgr - * to release the buffer. By clearing here, we get to have the release - * done by ReleaseAndReadBuffer inside index_getnext. + * Clear any reference to the previously returned tuple. The idea + * here is to not have the tuple slot be the last holder of a pin on + * that tuple's buffer; if it is, we'll need a separate visit to the + * bufmgr to release the buffer. By clearing here, we get to have the + * release done by ReleaseAndReadBuffer inside index_getnext. */ ExecClearTuple(slot); @@ -215,8 +215,9 @@ IndexNext(IndexScanState *node) false); /* don't pfree */ /* - * If any of the index operators involved in this scan are lossy, - * recheck them by evaluating the original operator clauses. + * If any of the index operators involved in this scan are + * lossy, recheck them by evaluating the original operator + * clauses. */ if (lossyQual) { @@ -224,15 +225,19 @@ IndexNext(IndexScanState *node) ResetExprContext(econtext); if (!ExecQual(lossyQual, econtext, false)) { - /* Fails lossy op, so drop it and loop back for another */ + /* + * Fails lossy op, so drop it and loop back for + * another + */ ExecClearTuple(slot); continue; } } /* - * If it's a multiple-index scan, make sure not to double-report - * a tuple matched by more than one index. (See notes above.) + * If it's a multiple-index scan, make sure not to + * double-report a tuple matched by more than one index. (See + * notes above.) */ if (numIndices > 1) { @@ -240,7 +245,7 @@ IndexNext(IndexScanState *node) if (node->iss_DupHash) { DupHashTabEntry *entry; - bool found; + bool found; entry = (DupHashTabEntry *) hash_search(node->iss_DupHash, @@ -248,7 +253,7 @@ IndexNext(IndexScanState *node) HASH_ENTER, &found); if (entry == NULL || - node->iss_DupHash->hctl->nentries > node->iss_MaxHash) + node->iss_DupHash->hctl->nentries > node->iss_MaxHash) { /* out of memory (either hard or soft limit) */ /* release hash table and fall thru to old code */ @@ -679,10 +684,11 @@ ExecInitIndexScan(IndexScan *node, EState *estate) * initialize child expressions * * Note: we don't initialize all of the indxqual expression, only the - * sub-parts corresponding to runtime keys (see below). The indxqualorig - * expression is always initialized even though it will only be used in - * some uncommon cases --- would be nice to improve that. (Problem is - * that any SubPlans present in the expression must be found now...) + * sub-parts corresponding to runtime keys (see below). The + * indxqualorig expression is always initialized even though it will + * only be used in some uncommon cases --- would be nice to improve + * that. (Problem is that any SubPlans present in the expression must + * be found now...) */ indexstate->ss.ps.targetlist = (List *) ExecInitExpr((Expr *) node->scan.plan.targetlist, @@ -788,14 +794,14 @@ ExecInitIndexScan(IndexScan *node, EState *estate) lossyflag_cell = list_head(lossyflags); for (j = 0; j < n_keys; j++) { - OpExpr *clause; /* one clause of index qual */ - Expr *leftop; /* expr on lhs of operator */ - Expr *rightop; /* expr on rhs ... */ + OpExpr *clause; /* one clause of index qual */ + Expr *leftop; /* expr on lhs of operator */ + Expr *rightop; /* expr on rhs ... */ int flags = 0; AttrNumber varattno; /* att number used in scan */ StrategyNumber strategy; /* op's strategy number */ - Oid subtype; /* op's strategy subtype */ - int lossy; /* op's recheck flag */ + Oid subtype; /* op's strategy subtype */ + int lossy; /* op's recheck flag */ RegProcedure opfuncid; /* operator proc id used in scan */ Datum scanvalue; /* value used in scan (if const) */ @@ -819,15 +825,16 @@ ExecInitIndexScan(IndexScan *node, EState *estate) /* * Here we figure out the contents of the index qual. The * usual case is (var op const) which means we form a scan key - * for the attribute listed in the var node and use the value of - * the const as comparison data. + * for the attribute listed in the var node and use the value + * of the const as comparison data. * * If we don't have a const node, it means our scan key is a - * function of information obtained during the execution of the - * plan, in which case we need to recalculate the index scan key - * at run time. Hence, we set have_runtime_keys to true and place - * the appropriate subexpression in run_keys. The corresponding - * scan key values are recomputed at run time. + * function of information obtained during the execution of + * the plan, in which case we need to recalculate the index + * scan key at run time. Hence, we set have_runtime_keys to + * true and place the appropriate subexpression in run_keys. + * The corresponding scan key values are recomputed at run + * time. */ run_keys[j] = NULL; @@ -892,18 +899,18 @@ ExecInitIndexScan(IndexScan *node, EState *estate) scanvalue); /* constant */ /* - * If this operator is lossy, add its indxqualorig - * expression to the list of quals to recheck. The - * list_nth() calls here could be avoided by chasing the - * lists in parallel to all the other lists, but since - * lossy operators are very uncommon, it's probably a - * waste of time to do so. + * If this operator is lossy, add its indxqualorig expression + * to the list of quals to recheck. The list_nth() calls here + * could be avoided by chasing the lists in parallel to all + * the other lists, but since lossy operators are very + * uncommon, it's probably a waste of time to do so. */ if (lossy) { - List *qualOrig = indexstate->indxqualorig; + List *qualOrig = indexstate->indxqualorig; + lossyQuals[i] = lappend(lossyQuals[i], - list_nth((List *) list_nth(qualOrig, i), j)); + list_nth((List *) list_nth(qualOrig, i), j)); } } @@ -1037,7 +1044,7 @@ create_duphash(IndexScanState *node) node->iss_DupHash = hash_create("DupHashTable", nbuckets, &hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); + HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT); if (node->iss_DupHash == NULL) ereport(ERROR, (errcode(ERRCODE_OUT_OF_MEMORY), diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c index 585eee19fe0..e913757d2ee 100644 --- a/src/backend/executor/nodeMergejoin.c +++ b/src/backend/executor/nodeMergejoin.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.67 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.68 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -104,10 +104,10 @@ static void MJFormSkipQuals(List *qualList, List **ltQuals, List **gtQuals, PlanState *parent) { - List *ltexprs, - *gtexprs; - ListCell *ltcdr, - *gtcdr; + List *ltexprs, + *gtexprs; + ListCell *ltcdr, + *gtcdr; /* * Make modifiable copies of the qualList. diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c index 7a4c0cc80bb..f3976c872a5 100644 --- a/src/backend/executor/nodeSeqscan.c +++ b/src/backend/executor/nodeSeqscan.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.49 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.50 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -62,11 +62,11 @@ SeqNext(SeqScanState *node) slot = node->ss_ScanTupleSlot; /* - * Clear any reference to the previously returned tuple. The idea here - * is to not have the tuple slot be the last holder of a pin on that - * tuple's buffer; if it is, we'll need a separate visit to the bufmgr - * to release the buffer. By clearing here, we get to have the release - * done by ReleaseAndReadBuffer inside heap_getnext. + * Clear any reference to the previously returned tuple. The idea + * here is to not have the tuple slot be the last holder of a pin on + * that tuple's buffer; if it is, we'll need a separate visit to the + * bufmgr to release the buffer. By clearing here, we get to have the + * release done by ReleaseAndReadBuffer inside heap_getnext. */ ExecClearTuple(slot); diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c index 028640c4b91..0a35b111109 100644 --- a/src/backend/executor/nodeSubplan.c +++ b/src/backend/executor/nodeSubplan.c @@ -7,7 +7,7 @@ * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.64 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.65 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -912,7 +912,7 @@ ExecSetParamPlan(SubPlanState *node, ExprContext *econtext) SubLinkType subLinkType = subplan->subLinkType; MemoryContext oldcontext; TupleTableSlot *slot; - ListCell *l; + ListCell *l; bool found = false; ArrayBuildState *astate = NULL; diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c index 183068a3198..3b71629ad50 100644 --- a/src/backend/executor/nodeUnique.c +++ b/src/backend/executor/nodeUnique.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.43 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.44 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -109,8 +109,9 @@ ExecUnique(UniqueState *node) * he next calls us. * * tgl 3/2004: the above concern is no longer valid; junkfilters used to - * modify their input's return slot but don't anymore, and I don't think - * anyplace else does either. Not worth changing this code though. + * modify their input's return slot but don't anymore, and I don't + * think anyplace else does either. Not worth changing this code + * though. */ if (node->priorTuple != NULL) heap_freetuple(node->priorTuple); diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index e2f7800a0bb..4ffb27b0139 100644 --- a/src/backend/executor/spi.c +++ b/src/backend/executor/spi.c @@ -8,7 +8,7 @@ * * * IDENTIFICATION - * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.124 2004/08/29 04:12:31 momjian Exp $ + * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.125 2004/08/29 05:06:42 momjian Exp $ * *------------------------------------------------------------------------- */ @@ -29,17 +29,17 @@ int SPI_result; static _SPI_connection *_SPI_stack = NULL; static _SPI_connection *_SPI_current = NULL; -static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */ +static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */ static int _SPI_connected = -1; static int _SPI_curid = -1; static int _SPI_execute(const char *src, int tcount, _SPI |